repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
CVPR19_Incremental_Learning | CVPR19_Incremental_Learning-master/cifar100-class-incremental/modified_linear.py | import math
import torch
from torch.nn.parameter import Parameter
from torch.nn import functional as F
from torch.nn import Module
class CosineLinear(Module):
def __init__(self, in_features, out_features, sigma=True):
super(CosineLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if sigma:
self.sigma = Parameter(torch.Tensor(1))
else:
self.register_parameter('sigma', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.sigma is not None:
self.sigma.data.fill_(1) #for initializaiton of sigma
def forward(self, input):
#w_norm = self.weight.data.norm(dim=1, keepdim=True)
#w_norm = w_norm.expand_as(self.weight).add_(self.epsilon)
#x_norm = input.data.norm(dim=1, keepdim=True)
#x_norm = x_norm.expand_as(input).add_(self.epsilon)
#w = self.weight.div(w_norm)
#x = input.div(x_norm)
out = F.linear(F.normalize(input, p=2,dim=1), \
F.normalize(self.weight, p=2, dim=1))
if self.sigma is not None:
out = self.sigma * out
return out
class SplitCosineLinear(Module):
#consists of two fc layers and concatenate their outputs
def __init__(self, in_features, out_features1, out_features2, sigma=True):
super(SplitCosineLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features1 + out_features2
self.fc1 = CosineLinear(in_features, out_features1, False)
self.fc2 = CosineLinear(in_features, out_features2, False)
if sigma:
self.sigma = Parameter(torch.Tensor(1))
self.sigma.data.fill_(1)
else:
self.register_parameter('sigma', None)
def forward(self, x):
out1 = self.fc1(x)
out2 = self.fc2(x)
out = torch.cat((out1, out2), dim=1) #concatenate along the channel
if self.sigma is not None:
out = self.sigma * out
return out | 2,235 | 36.898305 | 78 | py |
TRSSL | TRSSL-main/train.py | import argparse
import os
import shutil
import time
import random
import math
import numpy as np
from datetime import datetime
from tqdm import tqdm
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torch.nn.functional as F
from utils.utils import Bar, Logger, AverageMeter, accuracy, interleave, save_checkpoint
from tensorboardX import SummaryWriter
from datasets.datasets import get_dataset_class
from utils.evaluate_utils import hungarian_evaluate
from models.build_model import build_model
from utils.uncr_util import uncr_generator
from utils.sinkhorn_knopp import SinkhornKnopp
parser = argparse.ArgumentParser(description='TRSSL Training')
# Optimization options
parser.add_argument('--epochs', default=200, type=int, metavar='N',help='number of total epochs to run')
parser.add_argument('--batch-size', default=256, type=int, metavar='N', help='train batchsize')
parser.add_argument('--num-workers', default=4, type=int, help='number of dataloader workers')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--wdecay', default=1e-4, type=float, help='weight decay')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--warmup-epochs', default=10, type=int, help='number of warmup epochs')
# Checkpoints
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
# Miscs
parser.add_argument('--manualSeed', type=int, default=0, help='manual seed')
#Method options
parser.add_argument('--lbl-percent', type=int, default=10, help='Percentage of labeled data')
parser.add_argument('--novel-percent', default=50, type=int, help='Percentage of novel classes, default 50')
parser.add_argument('--train-iteration', type=int, default=1024, help='Number of iteration per epoch')
parser.add_argument('--out', default='outputs', help='Directory to output the result')
parser.add_argument('--alpha', default=0.75, type=float)
parser.add_argument('--ema-decay', default=0.999, type=float)
parser.add_argument('--dataset', default='cifar10', type=str,
choices=['cifar10', 'cifar100', 'tinyimagenet', 'oxfordpets', 'aircraft', 'stanfordcars', 'imagenet100'], help='dataset name')
parser.add_argument('--data-root', default=f'data', help='directory to store data')
parser.add_argument('--arch', default='resnet18', type=str, choices=['resnet18', 'resnet50'], help='model architecure')
parser.add_argument("--num_iters_sk", default=3, type=int, help="number of iters for Sinkhorn")
parser.add_argument("--epsilon_sk", default=0.05, type=float, help="epsilon for the Sinkhorn")
parser.add_argument("--temperature", default=0.1, type=float, help="softmax temperature")
parser.add_argument("--imagenet-classes", default=100, type=int, help="number of ImageNet classes")
parser.add_argument('--description', default="default_run", type=str, help='description of the experiment')
parser.add_argument('--no-progress', action='store_true', help="don't use progress bar")
parser.add_argument("--uncr-freq", default=1, type=int, help="frequency of generating uncertainty scores")
parser.add_argument("--threshold", default=0.5, type=float, help="threshold for hard pseudo-labeling")
parser.add_argument("--imb-factor", default=1, type=float, help="imbalance factor of the data, default 1")
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
# Use CUDA
os.environ['CUDA_VISIBLE_DEVICES']
use_cuda = torch.cuda.is_available()
args.data_root = os.path.join(args.data_root, args.dataset)
os.makedirs(args.data_root, exist_ok=True)
# Random seed
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
np.random.seed(args.manualSeed)
best_acc = 0 # best test accuracy
if args.dataset == "cifar10":
args.no_class = 10
elif args.dataset == "cifar100":
args.no_class = 100
elif args.dataset == "tinyimagenet":
args.no_class = 200
elif args.dataset == "stanfordcars":
args.no_class = 196
elif args.dataset == "aircraft":
args.no_class = 100
elif args.dataset == "oxfordpets":
args.no_class = 37
elif args.dataset == "imagenet100":
args.no_class = 100
def main():
global best_acc
run_started = datetime.today().strftime('%d-%m-%y_%H%M%S')
args.exp_name = f'dataset_{args.dataset}_arch_{args.arch}_lbl_percent_{args.lbl_percent}_novel_percent_{args.novel_percent}_{args.description}_{run_started}'
args.out = os.path.join(args.out, args.exp_name)
os.makedirs(args.out, exist_ok=True)
with open(f'{args.out}/parameters.txt', 'a+') as ofile:
ofile.write(' | '.join(f'{k}={v}' for k, v in vars(args).items()))
# load dataset
args.no_seen = args.no_class - int((args.novel_percent*args.no_class)/100)
dataset_class = get_dataset_class(args)
train_labeled_dataset, train_unlabeled_dataset, uncr_dataset, test_dataset_all, test_dataset_seen, test_dataset_novel = dataset_class.get_dataset()
# create dataloaders
labeled_trainloader = data.DataLoader(train_labeled_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, drop_last=True)
unlabeled_trainloader = data.DataLoader(train_unlabeled_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, drop_last=True)
uncr_loader = data.DataLoader(uncr_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
test_loader_all = data.DataLoader(test_dataset_all, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
test_loader_seen = data.DataLoader(test_dataset_seen, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
test_loader_novel = data.DataLoader(test_dataset_novel, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
# build models
model = build_model(args)
ema_model = build_model(args, ema=True)
# Sinkorn-Knopp
sinkhorn = SinkhornKnopp(args)
cudnn.benchmark = True
print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
optimizer = torch.optim.SGD(model.parameters(),lr=args.lr, momentum=args.momentum, weight_decay=args.wdecay)
ema_optimizer= WeightEMA(model, ema_model, alpha=args.ema_decay)
start_epoch = 0
# Resume
title = f'ood-{args.dataset}'
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
args.out = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
ema_model.load_state_dict(checkpoint['ema_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger = Logger(os.path.join(args.out, 'log.txt'), title=title, resume=True)
else:
logger = Logger(os.path.join(args.out, 'log.txt'), title=title)
logger.set_names(['-Train Loss-', '-Test Acc. Seen-', '-Test Acc. Novel-', '-Test NMI Novel-', '-Test Acc. All-', '-Test NMI All-'])
writer = SummaryWriter(args.out)
test_accs = []
# Train and val
for epoch in range(start_epoch, args.epochs):
print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
train_loss = train(args, labeled_trainloader, unlabeled_trainloader, model, optimizer, ema_optimizer, sinkhorn, epoch, use_cuda)
all_cluster_results = test_cluster(args, test_loader_all, ema_model, epoch)
novel_cluster_results = test_cluster(args, test_loader_novel, ema_model, epoch, offset=args.no_seen)
test_acc_seen = test_seen(args, test_loader_seen, ema_model, epoch)
if args.uncr_freq > 0:
if (epoch+1)%args.uncr_freq == 0:
temp_uncr = uncr_generator(args, uncr_loader, ema_model)
train_labeled_dataset, train_unlabeled_dataset = dataset_class.get_dataset(temp_uncr=temp_uncr)
labeled_trainloader = data.DataLoader(train_labeled_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, drop_last=True)
unlabeled_trainloader = data.DataLoader(train_unlabeled_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, drop_last=True)
test_acc = all_cluster_results["acc"]
is_best = test_acc > best_acc
best_acc = max(test_acc, best_acc)
print(f'epoch: {epoch}, acc-seen: {test_acc_seen}')
print(f'epoch: {epoch}, acc-novel: {novel_cluster_results["acc"]}, nmi-novel: {novel_cluster_results["nmi"]}')
print(f'epoch: {epoch}, acc-all: {all_cluster_results["acc"]}, nmi-all: {all_cluster_results["nmi"]}, best-acc: {best_acc}')
writer.add_scalar('train/1.train_loss', train_loss, epoch)
writer.add_scalar('test/1.acc_seen', test_acc_seen, epoch)
writer.add_scalar('test/2.acc_novel', novel_cluster_results['acc'], epoch)
writer.add_scalar('test/3.nmi_novel', novel_cluster_results['nmi'], epoch)
writer.add_scalar('test/4.acc_all', all_cluster_results['acc'], epoch)
writer.add_scalar('test/5.nmi_all', all_cluster_results['nmi'], epoch)
# append logger file
logger.append([train_loss, test_acc_seen, novel_cluster_results['acc'], novel_cluster_results['nmi'], all_cluster_results['acc'], all_cluster_results['nmi']])
# save model
model_to_save = model.module if hasattr(model, "module") else model
ema_model_to_save = ema_model.module if hasattr(ema_model, "module") else ema_model
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model_to_save.state_dict(),
'ema_state_dict': ema_model_to_save.state_dict(),
'acc': test_acc,
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
}, is_best, args.out)
test_accs.append(test_acc)
logger.close()
writer.close()
print('Best acc:')
print(best_acc)
print('Mean acc:')
print(np.mean(test_accs[-20:]))
def train(args, labeled_trainloader, unlabeled_trainloader, model, optimizer, ema_optimizer, sinkhorn, epoch, use_cuda):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
bar = Bar('Training', max=args.train_iteration)
labeled_train_iter = iter(labeled_trainloader)
unlabeled_train_iter = iter(unlabeled_trainloader)
model.train()
for batch_idx in range(args.train_iteration):
try:
inputs_x, targets_x, _, temp_x = labeled_train_iter.next()
except:
labeled_train_iter = iter(labeled_trainloader)
inputs_x, targets_x, _, temp_x = labeled_train_iter.next()
try:
(inputs_u, inputs_u2), _, _, temp_u = unlabeled_train_iter.next()
except:
unlabeled_train_iter = iter(unlabeled_trainloader)
(inputs_u, inputs_u2), _, _, temp_u = unlabeled_train_iter.next()
# measure data loading time
data_time.update(time.time() - end)
batch_size = inputs_x.size(0)
# Transform label to one-hot
targets_x = torch.zeros(batch_size, args.no_class).scatter_(1, targets_x.view(-1,1).long(), 1)
if use_cuda:
inputs_x, targets_x = inputs_x.cuda(), targets_x.cuda(non_blocking=True)
inputs_u, inputs_u2 = inputs_u.cuda(), inputs_u2.cuda()
temp_x, temp_u = temp_x.cuda(), temp_u.cuda()
# normalize classifier weights
with torch.no_grad():
if torch.cuda.device_count() > 1:
w = model.module.fc.weight.data.clone()
w = F.normalize(w, dim=1, p=2)
model.module.fc.weight.copy_(w)
else:
w = model.fc.weight.data.clone()
w = F.normalize(w, dim=1, p=2)
model.fc.weight.copy_(w)
with torch.no_grad():
# compute guessed labels of unlabel samples
outputs_u = model(inputs_u)
outputs_u2 = model(inputs_u2)
# cross pseudo-labeling
targets_u = sinkhorn(outputs_u2)
targets_u2 = sinkhorn(outputs_u)
# generate hard pseudo-labels for confident novel class samples
targets_u_novel = targets_u[:, args.no_seen:]
max_pred_novel, _ = torch.max(targets_u_novel, dim=-1)
hard_novel_idx1 = torch.where(max_pred_novel>=args.threshold)[0]
targets_u2_novel = targets_u2[:,args.no_seen:]
max_pred2_novel, _ = torch.max(targets_u2_novel, dim=-1)
hard_novel_idx2 = torch.where(max_pred2_novel>=args.threshold)[0]
targets_u[hard_novel_idx1] = targets_u[hard_novel_idx1].ge(args.threshold).float()
targets_u2[hard_novel_idx2] = targets_u2[hard_novel_idx2].ge(args.threshold).float()
# mixup
all_inputs = torch.cat([inputs_x, inputs_u, inputs_u2], dim=0)
all_targets = torch.cat([targets_x, targets_u, targets_u2], dim=0)
all_temp = torch.cat([temp_x, temp_u, temp_u], dim=0)
l = np.random.beta(args.alpha, args.alpha)
idx = torch.randperm(all_inputs.size(0))
input_a, input_b = all_inputs, all_inputs[idx]
target_a, target_b = all_targets, all_targets[idx]
temp_a, temp_b = all_temp, all_temp[idx]
mixed_input = l * input_a + (1 - l) * input_b
mixed_target = l * target_a + (1 - l) * target_b
mixed_temp = l * temp_a + (1 - l) * temp_b
# interleave labeled and unlabed samples between batches to get correct batchnorm calculation
mixed_input = list(torch.split(mixed_input, batch_size))
mixed_input = interleave(mixed_input, batch_size)
logits = [model(mixed_input[0])]
for input in mixed_input[1:]:
logits.append(model(input))
# put interleaved samples back
logits = interleave(logits, batch_size)
logits_x = logits[0]
logits_u = torch.cat(logits[1:], dim=0)
logits = torch.cat((logits_x, logits_u), 0)
#cross_entropy loss
preds = F.log_softmax(logits / mixed_temp.unsqueeze(1), dim=1)
loss = -torch.mean(torch.sum(mixed_target * preds, dim=1))
# record loss
losses.update(loss.item(), inputs_x.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
ema_optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f}'.format(
batch=batch_idx + 1,
size=args.train_iteration,
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
)
bar.next()
bar.finish()
return losses.avg
def test_seen(args, test_loader, model, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
model.eval()
if not args.no_progress:
test_loader = tqdm(test_loader)
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
inputs = inputs.cuda()
targets = targets.cuda()
outputs = model(inputs)
loss = F.cross_entropy(outputs, targets)
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.shape[0])
top1.update(prec1.item(), inputs.shape[0])
top5.update(prec5.item(), inputs.shape[0])
batch_time.update(time.time() - end)
end = time.time()
if not args.no_progress:
test_loader.set_description("test epoch: {epoch}/{epochs:4}. itr: {batch:4}/{iter:4}. btime: {bt:.3f}s. loss: {loss:.4f}. top1: {top1:.2f}. top5: {top5:.2f}. ".format(
epoch=epoch + 1,
epochs=args.epochs,
batch=batch_idx + 1,
iter=len(test_loader),
bt=batch_time.avg,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
))
if not args.no_progress:
test_loader.close()
return top1.avg
def test_cluster(args, test_loader, model, epoch, offset=0):
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
gt_targets =[]
predictions = []
model.eval()
if not args.no_progress:
test_loader = tqdm(test_loader)
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
data_time.update(time.time() - end)
inputs = inputs.cuda()
targets = targets.cuda()
outputs = model(inputs)
_, max_idx = torch.max(outputs, dim=1)
predictions.extend(max_idx.cpu().numpy().tolist())
gt_targets.extend(targets.cpu().numpy().tolist())
batch_time.update(time.time() - end)
end = time.time()
if not args.no_progress:
test_loader.set_description("test epoch: {epoch}/{epochs:4}. itr: {batch:4}/{iter:4}. btime: {bt:.3f}s.".format(
epoch=epoch + 1,
epochs=args.epochs,
batch=batch_idx + 1,
iter=len(test_loader),
bt=batch_time.avg,
))
if not args.no_progress:
test_loader.close()
predictions = np.array(predictions)
gt_targets = np.array(gt_targets)
predictions = torch.from_numpy(predictions)
gt_targets = torch.from_numpy(gt_targets)
eval_output = hungarian_evaluate(predictions, gt_targets, offset)
return eval_output
class WeightEMA(object):
def __init__(self, model, ema_model, alpha=0.999):
self.model = model
self.ema_model = ema_model
self.alpha = alpha
self.params = list(model.state_dict().values())
self.ema_params = list(ema_model.state_dict().values())
self.wd = 2e-5
for param, ema_param in zip(self.params, self.ema_params):
param.data.copy_(ema_param.data)
def step(self):
one_minus_alpha = 1.0 - self.alpha
for param, ema_param in zip(self.params, self.ema_params):
if ema_param.dtype==torch.float32:
ema_param.mul_(self.alpha)
ema_param.add_(param * one_minus_alpha)
# customized weight decay
param.mul_(1 - self.wd)
if __name__ == '__main__':
main()
| 19,121 | 40.934211 | 183 | py |
TRSSL | TRSSL-main/models/build_model.py | import torch
def build_model(args, ema=False):
if args.dataset in ['cifar10', 'cifar100']:
from . import resnet_cifar as models
elif args.dataset == 'tinyimagenet':
from . import resnet_tinyimagenet as models
else:
from . import resnet as models
if args.arch == 'resnet18':
model = models.resnet18(no_class=args.no_class)
if args.arch == 'resnet50':
model = models.resnet50(no_class=args.no_class)
# use dataparallel
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model = model.cuda()
if ema:
for param in model.parameters():
param.detach_()
return model | 692 | 24.666667 | 55 | py |
TRSSL | TRSSL-main/models/resnet.py | import torch
from torch import Tensor
import torch.nn as nn
# from .._internally_replaced_utils import load_state_dict_from_url
from typing import Type, Any, Callable, Union, List, Optional
import torch.nn.functional as F
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-f37072fd.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-b627a593.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-0676ba61.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-63fe2227.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-394f9c45.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
no_class: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, no_class, bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = F.normalize(x)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
# if pretrained:
# state_dict = load_state_dict_from_url(model_urls[arch],
# progress=progress)
# model.load_state_dict(state_dict)
return model
def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs) | 15,539 | 38.846154 | 111 | py |
TRSSL | TRSSL-main/models/resnet_tinyimagenet.py | """
This code is based on the Torchvision repository, which was licensed under the BSD 3-Clause.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, no_class=10, in_channel=3, zero_init_residual=False):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(in_channel, 64, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512*block.expansion, no_class, bias=False)
# torch.nn.init.kaiming_normal_(self.classifier)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves
# like an identity. This improves the model by 0.2~0.3% according to:
# https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for i in range(num_blocks):
stride = strides[i]
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.max_pool2d(out, kernel_size=3, stride=2, padding=1)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = torch.flatten(out, 1)
out = F.normalize(out)
out = self.fc(out)
return out
def resnet18(**kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
def resnet50(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
| 5,141 | 37.088889 | 104 | py |
TRSSL | TRSSL-main/models/resnet_cifar.py | """
This code is based on the Torchvision repository, which was licensed under the BSD 3-Clause.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
preact = out
out = F.relu(out)
if self.is_last:
return out, preact
else:
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, no_class=10, in_channel=3, zero_init_residual=False):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(in_channel, 64, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512*block.expansion, no_class, bias=False)
# torch.nn.init.kaiming_normal_(self.classifier)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves
# like an identity. This improves the model by 0.2~0.3% according to:
# https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for i in range(num_blocks):
stride = strides[i]
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = torch.flatten(out, 1)
out = F.normalize(out)
out = self.fc(out)
return out
def resnet18(**kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
def resnet50(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
| 5,073 | 36.865672 | 104 | py |
TRSSL | TRSSL-main/datasets/datasets.py | import numpy as np
from PIL import Image, ImageFilter, ImageOps
import random
from torchvision import datasets, transforms
import torch
import pickle
import os
import math
# normalization parameters
cifar10_mean, cifar10_std = (0.4914, 0.4822, 0.4465), (0.2471, 0.2435, 0.2616)
cifar100_mean, cifar100_std = (0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)
tinyimagenet_mean, tinyimagenet_std = (0.4802, 0.4481, 0.3975), (0.2302, 0.2265, 0.2262)
imgnet_mean, imgnet_std = (0.485, 0.456, 0.406), (0.229, 0.224, 0.225)
def get_dataset_class(args):
if args.dataset == 'cifar10':
return cifar10_dataset(args)
elif args.dataset == 'cifar100':
return cifar100_dataset(args)
elif args.dataset == 'tinyimagenet':
return tinyimagenet_dataset(args)
elif args.dataset in ['aircraft', 'stanfordcars', 'oxfordpets']:
return generic224_dataset(args)
elif args.dataset == 'imagenet100':
return imagenet100_dataset(args)
def x_u_split_seen_novel(labels, lbl_percent,
num_classes, lbl_set, unlbl_set, imb_factor):
labels = np.array(labels)
labeled_idx = []
unlabeled_idx = []
for i in range(num_classes):
idx = np.where(labels == i)[0]
np.random.shuffle(idx)
img_max = len(idx)
num = img_max * ((1/imb_factor)**(i / (num_classes - 1.0)))
idx = idx[:int(num)]
n_lbl_sample = math.ceil(len(idx)*(lbl_percent/100))
if i in lbl_set:
labeled_idx.extend(idx[:n_lbl_sample])
unlabeled_idx.extend(idx[n_lbl_sample:])
elif i in unlbl_set:
unlabeled_idx.extend(idx)
return labeled_idx, unlabeled_idx
class cifar10_dataset():
def __init__(self, args):
# augmentations
self.transform_train = transforms.Compose([
transforms.RandomChoice([
transforms.RandomCrop(32, padding=4),
transforms.RandomResizedCrop(32, (0.5, 1.0)),
]),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.6),
Solarize(p=0.1),
Equalize(p=0.1),
transforms.ToTensor(),
transforms.Normalize(cifar10_mean, cifar10_std),
])
self.transform_val = transforms.Compose([
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize(mean=cifar10_mean, std=cifar10_std)
])
base_dataset = datasets.CIFAR10(args.data_root, train=True, download=True)
train_labeled_idxs, train_unlabeled_idxs = x_u_split_seen_novel(base_dataset.targets, args.lbl_percent, args.no_class, list(range(0,args.no_seen)), list(range(args.no_seen, args.no_class)), args.imb_factor)
self.train_labeled_idxs = train_labeled_idxs
self.train_unlabeled_idxs = train_unlabeled_idxs
self.temperature = args.temperature
self.data_root = args.data_root
self.no_seen = args.no_seen
self.no_class = args.no_class
def get_dataset(self, temp_uncr=None):
train_labeled_idxs = self.train_labeled_idxs.copy()
train_unlabeled_idxs = self.train_unlabeled_idxs.copy()
train_labeled_dataset = CIFAR10SSL(self.data_root, train_labeled_idxs, train=True, transform=self.transform_train, temperature=self.temperature)
train_unlabeled_dataset = CIFAR10SSL(self.data_root, train_unlabeled_idxs, train=True, transform=TransformTwice(self.transform_train), temperature=self.temperature, temp_uncr=temp_uncr)
if temp_uncr is not None:
return train_labeled_dataset, train_unlabeled_dataset
train_uncr_dataset = CIFAR10SSL_UNCR(self.data_root, train_unlabeled_idxs, train=True, transform=self.transform_train)
test_dataset_seen = CIFAR10SSL_TEST(self.data_root, train=False, transform=self.transform_val, download=False, labeled_set=list(range(0,self.no_seen)))
test_dataset_novel = CIFAR10SSL_TEST(self.data_root, train=False, transform=self.transform_val, download=False, labeled_set=list(range(self.no_seen, self.no_class)))
test_dataset_all = CIFAR10SSL_TEST(self.data_root, train=False, transform=self.transform_val, download=False)
return train_labeled_dataset, train_unlabeled_dataset, train_uncr_dataset, test_dataset_all, test_dataset_seen, test_dataset_novel
class cifar100_dataset():
def __init__(self, args):
# augmentations
self.transform_train = transforms.Compose([
transforms.RandomChoice([
transforms.RandomCrop(32, padding=4),
transforms.RandomResizedCrop(32, (0.5, 1.0)),
]),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.6),
Solarize(p=0.1),
Equalize(p=0.1),
transforms.ToTensor(),
transforms.Normalize(cifar100_mean, cifar100_std),
])
self.transform_val = transforms.Compose([
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize(mean=cifar100_mean, std=cifar100_std)
])
base_dataset = datasets.CIFAR100(args.data_root, train=True, download=True)
train_labeled_idxs, train_unlabeled_idxs = x_u_split_seen_novel(base_dataset.targets, args.lbl_percent, args.no_class, list(range(0,args.no_seen)), list(range(args.no_seen, args.no_class)), args.imb_factor)
self.train_labeled_idxs = train_labeled_idxs
self.train_unlabeled_idxs = train_unlabeled_idxs
self.temperature = args.temperature
self.data_root = args.data_root
self.no_seen = args.no_seen
self.no_class = args.no_class
def get_dataset(self, temp_uncr=None):
train_labeled_idxs = self.train_labeled_idxs.copy()
train_unlabeled_idxs = self.train_unlabeled_idxs.copy()
train_labeled_dataset = CIFAR100SSL(self.data_root, train_labeled_idxs, train=True, transform=self.transform_train, temperature=self.temperature)
train_unlabeled_dataset = CIFAR100SSL(self.data_root, train_unlabeled_idxs, train=True, transform=TransformTwice(self.transform_train), temperature=self.temperature, temp_uncr=temp_uncr)
if temp_uncr is not None:
return train_labeled_dataset, train_unlabeled_dataset
train_uncr_dataset = CIFAR100SSL_UNCR(self.data_root, train_unlabeled_idxs, train=True, transform=self.transform_train)
test_dataset_seen = CIFAR100SSL_TEST(self.data_root, train=False, transform=self.transform_val, download=False, labeled_set=list(range(0,self.no_seen)))
test_dataset_novel = CIFAR100SSL_TEST(self.data_root, train=False, transform=self.transform_val, download=False, labeled_set=list(range(self.no_seen, self.no_class)))
test_dataset_all = CIFAR100SSL_TEST(self.data_root, train=False, transform=self.transform_val, download=False)
return train_labeled_dataset, train_unlabeled_dataset, train_uncr_dataset, test_dataset_all, test_dataset_seen, test_dataset_novel
class tinyimagenet_dataset():
def __init__(self, args):
# augmentations
self.transform_train = transforms.Compose([
transforms.RandomChoice([
transforms.RandomCrop(64, padding=8),
transforms.RandomResizedCrop(64, (0.5, 1.0)),
]),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.5),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.2),
transforms.ToTensor(),
transforms.Normalize(tinyimagenet_mean, tinyimagenet_std),
])
self.transform_val = transforms.Compose([
transforms.CenterCrop(64),
transforms.ToTensor(),
transforms.Normalize(mean=tinyimagenet_mean, std=tinyimagenet_std)
])
base_dataset = datasets.ImageFolder(os.path.join(args.data_root, 'train'))
base_dataset_targets = np.array(base_dataset.imgs)
base_dataset_targets = base_dataset_targets[:,1]
base_dataset_targets= list(map(int, base_dataset_targets.tolist()))
train_labeled_idxs, train_unlabeled_idxs = x_u_split_seen_novel(base_dataset_targets, args.lbl_percent, args.no_class, list(range(0,args.no_seen)), list(range(args.no_seen, args.no_class)), args.imb_factor)
self.train_labeled_idxs = train_labeled_idxs
self.train_unlabeled_idxs = train_unlabeled_idxs
self.temperature = args.temperature
self.data_root = args.data_root
self.no_seen = args.no_seen
self.no_class = args.no_class
def get_dataset(self, temp_uncr=None):
train_labeled_idxs = self.train_labeled_idxs.copy()
train_unlabeled_idxs = self.train_unlabeled_idxs.copy()
train_labeled_dataset = GenericSSL(os.path.join(args.data_root, 'train'), train_labeled_idxs, transform=self.transform_train, temperature=self.temperature)
train_unlabeled_dataset = GenericSSL(os.path.join(args.data_root, 'train'), train_unlabeled_idxs, transform=TransformTwice(self.transform_train), temperature=self.temperature, temp_uncr=temp_uncr)
if temp_uncr is not None:
return train_labeled_dataset, train_unlabeled_dataset
train_uncr_dataset = GenericUNCR(os.path.join(args.data_root, 'train'), train_unlabeled_idxs, transform=self.transform_train)
test_dataset_seen = GenericTEST(os.path.join(args.data_root, 'test'), no_class=args.no_class, transform=self.transform_val, labeled_set=list(range(0,args.no_seen)))
test_dataset_novel = GenericTEST(os.path.join(args.data_root, 'test'), no_class=args.no_class, transform=self.transform_val, labeled_set=list(range(args.no_seen, args.no_class)))
test_dataset_all = GenericTEST(os.path.join(args.data_root, 'test'), no_class=args.no_class, transform=self.transform_val)
return train_labeled_dataset, train_unlabeled_dataset, train_uncr_dataset, test_dataset_all, test_dataset_seen, test_dataset_novel
class generic224_dataset():
def __init__(self, args):
# augmentations
self.transform_train = transforms.Compose([
transforms.RandomResizedCrop(224, (0.5, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.5),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.2),
transforms.ToTensor(),
transforms.Normalize(imgnet_mean, imgnet_std),
])
self.transform_val = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=imgnet_mean, std=imgnet_std)
])
base_dataset = datasets.ImageFolder(os.path.join(args.data_root, 'train'))
base_dataset_targets = np.array(base_dataset.imgs)
base_dataset_targets = base_dataset_targets[:,1]
base_dataset_targets= list(map(int, base_dataset_targets.tolist()))
train_labeled_idxs, train_unlabeled_idxs = x_u_split_seen_novel(base_dataset_targets, args.lbl_percent, args.no_class, list(range(0,args.no_seen)), list(range(args.no_seen, args.no_class)), args.imb_factor)
self.train_labeled_idxs = train_labeled_idxs
self.train_unlabeled_idxs = train_unlabeled_idxs
self.temperature = args.temperature
self.data_root = args.data_root
self.no_seen = args.no_seen
self.no_class = args.no_class
def get_dataset(self, temp_uncr=None):
train_labeled_idxs = self.train_labeled_idxs.copy()
train_unlabeled_idxs = self.train_unlabeled_idxs.copy()
train_labeled_dataset = GenericSSL(os.path.join(args.data_root, 'train'), train_labeled_idxs, transform=self.transform_train, temperature=self.temperature)
train_unlabeled_dataset = GenericSSL(os.path.join(args.data_root, 'train'), train_unlabeled_idxs, transform=TransformTwice(self.transform_train), temperature=self.temperature, temp_uncr=temp_uncr)
if temp_uncr is not None:
return train_labeled_dataset, train_unlabeled_dataset
train_uncr_dataset = GenericUNCR(os.path.join(args.data_root, 'train'), train_unlabeled_idxs, transform=self.transform_train)
test_dataset_seen = GenericTEST(os.path.join(args.data_root, 'test'), no_class=args.no_class, transform=self.transform_val, labeled_set=list(range(0,args.no_seen)))
test_dataset_novel = GenericTEST(os.path.join(args.data_root, 'test'), no_class=args.no_class, transform=self.transform_val, labeled_set=list(range(args.no_seen, args.no_class)))
test_dataset_all = GenericTEST(os.path.join(args.data_root, 'test'), no_class=args.no_class, transform=self.transform_val)
return train_labeled_dataset, train_unlabeled_dataset, train_uncr_dataset, test_dataset_all, test_dataset_seen, test_dataset_novel
class imagenet100_dataset():
def __init__(self, args):
# augmentations
self.transform_train = transforms.Compose([
transforms.RandomResizedCrop(224, (0.2, 1.0)), #stronger augmnetation
transforms.RandomHorizontalFlip(),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.5),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.2),
transforms.ToTensor(),
transforms.Normalize(imgnet_mean, imgnet_std),
])
self.transform_val = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=imgnet_mean, std=imgnet_std)
])
base_dataset = datasets.ImageFolder(os.path.join(args.data_root, 'train'))
base_dataset_targets = np.array(base_dataset.imgs)
base_dataset_targets = base_dataset_targets[:,1]
base_dataset_targets= list(map(int, base_dataset_targets.tolist()))
train_labeled_idxs, train_unlabeled_idxs = x_u_split_seen_novel(base_dataset_targets, args.lbl_percent, args.no_class, list(range(0,args.no_seen)), list(range(args.no_seen, args.no_class)), args.imb_factor)
self.train_labeled_idxs = train_labeled_idxs
self.train_unlabeled_idxs = train_unlabeled_idxs
self.temperature = args.temperature
self.data_root = args.data_root
self.no_seen = args.no_seen
self.no_class = args.no_class
def get_dataset(self, temp_uncr=None):
train_labeled_idxs = self.train_labeled_idxs.copy()
train_unlabeled_idxs = self.train_unlabeled_idxs.copy()
train_labeled_dataset = GenericSSL(os.path.join(args.data_root, 'train'), train_labeled_idxs, transform=self.transform_train, temperature=self.temperature)
train_unlabeled_dataset = GenericSSL(os.path.join(args.data_root, 'train'), train_unlabeled_idxs, transform=TransformTwice(self.transform_train), temperature=self.temperature, temp_uncr=temp_uncr)
if temp_uncr is not None:
return train_labeled_dataset, train_unlabeled_dataset
train_uncr_dataset = GenericUNCR(os.path.join(args.data_root, 'train'), train_unlabeled_idxs, transform=self.transform_train)
test_dataset_seen = GenericTEST(os.path.join(args.data_root, 'test'), no_class=args.no_class, transform=self.transform_val, labeled_set=list(range(0,args.no_seen)))
test_dataset_novel = GenericTEST(os.path.join(args.data_root, 'test'), no_class=args.no_class, transform=self.transform_val, labeled_set=list(range(args.no_seen, args.no_class)))
test_dataset_all = GenericTEST(os.path.join(args.data_root, 'test'), no_class=args.no_class, transform=self.transform_val)
return train_labeled_dataset, train_unlabeled_dataset, train_uncr_dataset, test_dataset_all, test_dataset_seen, test_dataset_novel
class TransformTwice:
def __init__(self, transform):
self.transform = transform
def __call__(self, inp):
out1 = self.transform(inp)
out2 = self.transform(inp)
return out1, out2
class CIFAR10SSL(datasets.CIFAR10):
def __init__(self, root, indexs, temperature=None, temp_uncr=None, train=True,
transform=None, target_transform=None,
download=True):
super().__init__(root, train=train,
transform=transform,
target_transform=target_transform,
download=download)
self.targets = np.array(self.targets)
if temperature is not None:
self.temp = temperature*np.ones(len(self.targets))
else:
self.temp = np.ones(len(self.targets))
if temp_uncr is not None:
self.temp[temp_uncr['index']] = temp_uncr['uncr']
if indexs is not None:
indexs = np.array(indexs)
self.data = self.data[indexs]
self.targets = np.array(self.targets)[indexs]
self.temp = self.temp[indexs]
self.indexs = indexs
else:
self.indexs = np.arange(len(self.targets))
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, self.indexs[index], self.temp[index]
class CIFAR10SSL_TEST(datasets.CIFAR10):
def __init__(self, root, train=False,
transform=None, target_transform=None,
download=True, labeled_set=None):
super().__init__(root, train=train,
transform=transform,
target_transform=target_transform,
download=download)
self.targets = np.array(self.targets)
indexs = []
if labeled_set is not None:
for i in range(10):
idx = np.where(self.targets == i)[0]
if i in labeled_set:
indexs.extend(idx)
indexs = np.array(indexs)
self.data = self.data[indexs]
self.targets = np.array(self.targets)[indexs]
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
class CIFAR10SSL_UNCR(datasets.CIFAR10):
def __init__(self, root, indexs, train=True,
transform=None, target_transform=None,
download=True):
super().__init__(root, train=train,
transform=transform,
target_transform=target_transform,
download=download)
self.targets = np.array(self.targets)
if indexs is not None:
indexs = np.array(indexs)
self.data = self.data[indexs]
self.targets = np.array(self.targets)[indexs]
self.indexs = indexs
else:
self.indexs = np.arange(len(self.targets))
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img1 = self.transform(img)
img2 = self.transform(img)
img3 = self.transform(img)
img4 = self.transform(img)
img5 = self.transform(img)
img6 = self.transform(img)
img7 = self.transform(img)
img8 = self.transform(img)
img9 = self.transform(img)
img10 = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img1, img2, img3, img4, img5, img6, img7, img8, img9, img10, target, self.indexs[index]
class CIFAR100SSL(datasets.CIFAR100):
def __init__(self, root, indexs, temperature=None, temp_uncr=None, train=True,
transform=None, target_transform=None,
download=False):
super().__init__(root, train=train,
transform=transform,
target_transform=target_transform,
download=download)
self.targets = np.array(self.targets)
if temperature is not None:
self.temp = temperature*np.ones(len(self.targets))
else:
self.temp = np.ones(len(self.targets))
if temp_uncr is not None:
self.temp[temp_uncr['index']] = temp_uncr['uncr']
if indexs is not None:
indexs = np.array(indexs)
self.data = self.data[indexs]
self.targets = np.array(self.targets)[indexs]
self.temp = self.temp[indexs]
self.indexs = indexs
else:
self.indexs = np.arange(len(self.targets))
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, self.indexs[index], self.temp[index]
class CIFAR100SSL_TEST(datasets.CIFAR100):
def __init__(self, root, train=False,
transform=None, target_transform=None,
download=False, labeled_set=None):
super().__init__(root, train=train,
transform=transform,
target_transform=target_transform,
download=download)
self.targets = np.array(self.targets)
indexs = []
if labeled_set is not None:
for i in range(100):
idx = np.where(self.targets == i)[0]
if i in labeled_set:
indexs.extend(idx)
indexs = np.array(indexs)
self.data = self.data[indexs]
self.targets = np.array(self.targets)[indexs]
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
class CIFAR100SSL_UNCR(datasets.CIFAR100):
def __init__(self, root, indexs, train=True,
transform=None, target_transform=None,
download=True):
super().__init__(root, train=train,
transform=transform,
target_transform=target_transform,
download=download)
self.targets = np.array(self.targets)
if indexs is not None:
indexs = np.array(indexs)
self.data = self.data[indexs]
self.targets = np.array(self.targets)[indexs]
self.indexs = indexs
else:
self.indexs = np.arange(len(self.targets))
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img1 = self.transform(img)
img2 = self.transform(img)
img3 = self.transform(img)
img4 = self.transform(img)
img5 = self.transform(img)
img6 = self.transform(img)
img7 = self.transform(img)
img8 = self.transform(img)
img9 = self.transform(img)
img10 = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img1, img2, img3, img4, img5, img6, img7, img8, img9, img10, target, self.indexs[index]
class GenericSSL(datasets.ImageFolder):
def __init__(self, root, indexs, temperature=None, temp_uncr=None,
transform=None, target_transform=None):
super().__init__(root, transform=transform, target_transform=target_transform)
self.imgs = np.array(self.imgs)
self.targets = self.imgs[:, 1]
self.targets= list(map(int, self.targets.tolist()))
self.data = np.array(self.imgs[:, 0])
self.targets = np.array(self.targets)
if temperature is not None:
self.temp = temperature*np.ones(len(self.targets))
else:
self.temp = np.ones(len(self.targets))
if temp_uncr is not None:
self.temp[temp_uncr['index']] = temp_uncr['uncr']
if indexs is not None:
indexs = np.array(indexs)
self.data = self.data[indexs]
self.targets = np.array(self.targets)[indexs]
self.temp = self.temp[indexs]
self.indexs = indexs
else:
self.indexs = np.arange(len(self.targets))
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = self.loader(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, self.indexs[index], self.temp[index]
class GenericTEST(datasets.ImageFolder):
def __init__(self, root, transform=None, target_transform=None, labeled_set=None, no_class=200):
super().__init__(root, transform=transform, target_transform=target_transform)
self.imgs = np.array(self.imgs)
self.targets = self.imgs[:, 1]
self.targets= list(map(int, self.targets.tolist()))
self.data = np.array(self.imgs[:, 0])
self.targets = np.array(self.targets)
indexs = []
if labeled_set is not None:
for i in range(no_class):
idx = np.where(self.targets == i)[0]
if i in labeled_set:
indexs.extend(idx)
indexs = np.array(indexs)
self.data = self.data[indexs]
self.targets = np.array(self.targets)[indexs]
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = self.loader(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
class GenericUNCR(datasets.ImageFolder):
def __init__(self, root, indexs,
transform=None, target_transform=None):
super().__init__(root,
transform=transform,
target_transform=target_transform)
self.imgs = np.array(self.imgs)
self.targets = self.imgs[:, 1]
self.targets= list(map(int, self.targets.tolist()))
self.data = np.array(self.imgs[:, 0])
self.targets = np.array(self.targets)
if indexs is not None:
indexs = np.array(indexs)
self.data = self.data[indexs]
self.targets = np.array(self.targets)[indexs]
self.indexs = indexs
else:
self.indexs = np.arange(len(self.targets))
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = self.loader(img)
if self.transform is not None:
img1 = self.transform(img)
img2 = self.transform(img)
img3 = self.transform(img)
img4 = self.transform(img)
img5 = self.transform(img)
img6 = self.transform(img)
img7 = self.transform(img)
img8 = self.transform(img)
img9 = self.transform(img)
img10 = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img1, img2, img3, img4, img5, img6, img7, img8, img9, img10, target, self.indexs[index]
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[0.1, 2.0]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
class Solarize(object):
def __init__(self, p=0.2):
self.prob = p
def __call__(self, img):
if torch.bernoulli(torch.tensor(self.prob)) == 0:
return img
v = torch.rand(1) * 256
return ImageOps.solarize(img, v)
class Equalize(object):
def __init__(self, p=0.2):
self.prob = p
def __call__(self, img):
if torch.bernoulli(torch.tensor(self.prob)) == 0:
return img
return ImageOps.equalize(img)
| 29,457 | 41.203438 | 214 | py |
TRSSL | TRSSL-main/utils/utils.py | import os
import torch
import numpy as np
import random
from progress.bar import Bar as Bar
import torch.nn.functional as F
import shutil
import matplotlib.pyplot as plt
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class AverageMeter(object):
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, save_path):
filename=f'checkpoint.pth.tar'
filepath = os.path.join(save_path, filename)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(save_path, f'model_best.pth.tar'))
class WeightEMA(object):
def __init__(self, args, model, ema_model):
self.model = model
self.ema_model = ema_model
self.alpha = args.ema_decay
self.params = list(model.state_dict().values())
self.ema_params = list(ema_model.state_dict().values())
# self.wd = 0.02 * args.lr
for param, ema_param in zip(self.params, self.ema_params):
param.data.copy_(ema_param.data)
def step(self):
one_minus_alpha = 1.0 - self.alpha
for param, ema_param in zip(self.params, self.ema_params):
if ema_param.dtype==torch.float32:
ema_param.mul_(self.alpha)
ema_param.add_(param * one_minus_alpha)
# customized weight decay
# param.mul_(1 - self.wd)
def interleave_offsets(batch, nu):
groups = [batch // (nu + 1)] * (nu + 1)
for x in range(batch - sum(groups)):
groups[-x - 1] += 1
offsets = [0]
for g in groups:
offsets.append(offsets[-1] + g)
assert offsets[-1] == batch
return offsets
def interleave(xy, batch):
nu = len(xy) - 1
offsets = interleave_offsets(batch, nu)
xy = [[v[offsets[p]:offsets[p + 1]] for p in range(nu + 1)] for v in xy]
for i in range(1, nu + 1):
xy[0][i], xy[i][i] = xy[i][i], xy[0][i]
return [torch.cat(v, dim=0) for v in xy]
class Logger(object):
'''Save training process to log file with simple plot function.'''
def __init__(self, fpath, title=None, resume=False):
self.file = None
self.resume = resume
self.title = '' if title == None else title
if fpath is not None:
if resume:
self.file = open(fpath, 'r')
name = self.file.readline()
self.names = name.rstrip().split('\t')
self.numbers = {}
for _, name in enumerate(self.names):
self.numbers[name] = []
for numbers in self.file:
numbers = numbers.rstrip().split('\t')
for i in range(0, len(numbers)):
self.numbers[self.names[i]].append(numbers[i])
self.file.close()
self.file = open(fpath, 'a')
else:
self.file = open(fpath, 'w')
def set_names(self, names):
if self.resume:
pass
# initialize numbers as empty list
self.numbers = {}
self.names = names
for _, name in enumerate(self.names):
self.file.write(name)
self.file.write('\t')
self.numbers[name] = []
self.file.write('\n')
self.file.flush()
def append(self, numbers):
assert len(self.names) == len(numbers), 'Numbers do not match names'
for index, num in enumerate(numbers):
self.file.write("{0:.6f}".format(num))
self.file.write('\t')
self.numbers[self.names[index]].append(num)
self.file.write('\n')
self.file.flush()
def plot(self, names=None):
names = self.names if names == None else names
numbers = self.numbers
for _, name in enumerate(names):
x = np.arange(len(numbers[name]))
plt.plot(x, np.asarray(numbers[name]))
plt.legend([self.title + '(' + name + ')' for name in names])
plt.grid(True)
def close(self):
if self.file is not None:
self.file.close()
| 4,886 | 30.127389 | 95 | py |
TRSSL | TRSSL-main/utils/evaluate_utils.py | import numpy as np
import torch
import torch.nn.functional as F
from sklearn import metrics
from scipy.optimize import linear_sum_assignment
@torch.no_grad()
def hungarian_evaluate(predictions, targets, offset=0):
# Hungarian matching
targets = targets - offset
predictions = predictions - offset
predictions_np = predictions.numpy()
num_elems = targets.size(0)
# only consider the valid predicts. rest are treated as misclassification
valid_idx = np.where(predictions_np>=0)[0]
predictions_sel = predictions[valid_idx]
targets_sel = targets[valid_idx]
num_classes = torch.unique(targets).numel()
num_classes_pred = max(torch.unique(predictions_sel).numel(), num_classes)
match = _hungarian_match(predictions_sel, targets_sel, preds_k=num_classes_pred, targets_k=num_classes) # match is data dependent
reordered_preds = torch.zeros(predictions_sel.size(0), dtype=predictions_sel.dtype)
for pred_i, target_i in match:
reordered_preds[predictions_sel == int(pred_i)] = int(target_i)
# Gather performance metrics
reordered_preds = reordered_preds.numpy()
acc = int((reordered_preds == targets_sel.numpy()).sum()) / float(num_elems) #accuracy is normalized with the total number of samples not only the valid ones
nmi = metrics.normalized_mutual_info_score(targets.numpy(), predictions.numpy())
ari = metrics.adjusted_rand_score(targets.numpy(), predictions.numpy())
return {'acc': acc*100, 'ari': ari, 'nmi': nmi, 'hungarian_match': match}
@torch.no_grad()
def _hungarian_match(flat_preds, flat_targets, preds_k, targets_k):
# Based on implementation from IIC
num_samples = flat_targets.shape[0]
num_k = preds_k
num_correct = np.zeros((num_k, num_k))
for c1 in range(num_k):
for c2 in range(num_k):
# elementwise, so each sample contributes once
votes = int(((flat_preds == c1) * (flat_targets == c2)).sum())
num_correct[c1, c2] = votes
# num_correct is small
match = linear_sum_assignment(num_samples - num_correct)
match = np.array(list(zip(*match)))
# return as list of tuples, out_c to gt_c
res = []
for out_c, gt_c in match:
res.append((out_c, gt_c))
return res
| 2,269 | 36.213115 | 161 | py |
TRSSL | TRSSL-main/utils/uncr_util.py | import random
import time
import pickle
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from .utils import AverageMeter
def uncr_generator(args, data_loader, model):
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
pseudo_idx = []
pseudo_maxstd = []
model.eval()
data_loader = tqdm(data_loader)
with torch.no_grad():
for batch_idx, (inputs1, inputs2, inputs3, inputs4, inputs5, inputs6, inputs7, inputs8, inputs9, inputs10, targets, indexs) in enumerate(data_loader):
data_time.update(time.time() - end)
inputs1 = inputs1.cuda()
inputs2 = inputs2.cuda()
inputs3 = inputs3.cuda()
inputs4 = inputs4.cuda()
inputs5 = inputs5.cuda()
inputs6 = inputs6.cuda()
inputs7 = inputs7.cuda()
inputs8 = inputs8.cuda()
inputs9 = inputs9.cuda()
inputs10 = inputs10.cuda()
targets = targets.cuda()
out_prob = []
outputs = model(inputs1)
out_prob.append(F.softmax(outputs, dim=1))
outputs = model(inputs2)
out_prob.append(F.softmax(outputs, dim=1))
outputs = model(inputs3)
out_prob.append(F.softmax(outputs, dim=1))
outputs = model(inputs4)
out_prob.append(F.softmax(outputs, dim=1))
outputs = model(inputs5)
out_prob.append(F.softmax(outputs, dim=1))
outputs = model(inputs6)
out_prob.append(F.softmax(outputs, dim=1))
outputs = model(inputs7)
out_prob.append(F.softmax(outputs, dim=1))
outputs = model(inputs8)
out_prob.append(F.softmax(outputs, dim=1))
outputs = model(inputs9)
out_prob.append(F.softmax(outputs, dim=1))
outputs = model(inputs10)
out_prob.append(F.softmax(outputs, dim=1))
# compute uncertainty scores
out_prob = torch.stack(out_prob)
out_std = torch.std(out_prob, dim=0)
out_prob = torch.mean(out_prob, dim=0)
_, max_idx = torch.max(out_prob, dim=1)
max_std = out_std.gather(1, max_idx.view(-1,1))
pseudo_maxstd.extend(max_std.squeeze(1).cpu().numpy().tolist())
pseudo_idx.extend(indexs.numpy().tolist())
batch_time.update(time.time() - end)
end = time.time()
data_loader.set_description("UncrGen Iter: {batch:4}/{iter:4}. Data: {data:.3f}s. Batch: {bt:.3f}s.".format(
batch=batch_idx + 1,
iter=len(data_loader),
data=data_time.avg,
bt=batch_time.avg,
))
data_loader.close()
pseudo_maxstd = np.array(pseudo_maxstd)
pseudo_idx = np.array(pseudo_idx)
# normalizing the uncertainty values
pseudo_maxstd = pseudo_maxstd/max(pseudo_maxstd)
pseudo_maxstd = np.clip(pseudo_maxstd, args.temperature, 1.0)
uncr_temp = {'index': pseudo_idx.tolist(), 'uncr':pseudo_maxstd.tolist()}
return uncr_temp | 3,161 | 31.265306 | 158 | py |
TRSSL | TRSSL-main/utils/sinkhorn_knopp.py | import torch
import numpy as np
def shoot_infs(inp_tensor):
"""Replaces inf by maximum of tensor"""
mask_inf = torch.isinf(inp_tensor)
ind_inf = torch.nonzero(mask_inf)
if len(ind_inf) > 0:
for ind in ind_inf:
if len(ind) == 2:
inp_tensor[ind[0], ind[1]] = 0
elif len(ind) == 1:
inp_tensor[ind[0]] = 0
m = torch.max(inp_tensor)
for ind in ind_inf:
if len(ind) == 2:
inp_tensor[ind[0], ind[1]] = m
elif len(ind) == 1:
inp_tensor[ind[0]] = m
return inp_tensor
class SinkhornKnopp(torch.nn.Module):
def __init__(self, args):
super().__init__()
self.num_iters = args.num_iters_sk
self.epsilon = args.epsilon_sk
self.imb_factor = args.imb_factor
@torch.no_grad()
def iterate(self, Q):
Q = shoot_infs(Q)
sum_Q = torch.sum(Q)
Q /= sum_Q
c = torch.ones(Q.shape[1]).cuda(non_blocking=True) / Q.shape[1] # Samples
if self.imb_factor > 1:
# obtain permutation/order from the marginals
marginals_argsort = torch.argsort(Q.sum(1))
marginals_argsort = marginals_argsort.detach()
r = []
for i in range(Q.shape[0]): # Classes
r.append((1/self.imb_factor)**(i / (Q.shape[0] - 1.0)))
r = np.array(r)
r = r * (Q.shape[1]/Q.shape[0]) # Per-class distribution in the mini-batch
r = torch.from_numpy(r).cuda(non_blocking=True)
r[marginals_argsort] = torch.sort(r)[0] # Sort/permute based on the data order
r = torch.clamp(r, min=1) # Clamp the min to have a balance distribution for the tail classes
r /= r.sum() # Scaling to make it prob
else:
r = torch.ones(Q.shape[0]).cuda(non_blocking=True) / Q.shape[0]
for it in range(self.num_iters):
u = torch.sum(Q, dim=1)
u = r / u
u = shoot_infs(u)
Q *= u.unsqueeze(1)
Q *= (c / torch.sum(Q, dim=0)).unsqueeze(0)
return (Q / torch.sum(Q, dim=0, keepdim=True)).t().float()
@torch.no_grad()
def forward(self, logits):
# get assignments
q = logits / self.epsilon
M = torch.max(q)
q -= M
q = torch.exp(q).t()
return self.iterate(q)
| 2,410 | 32.957746 | 105 | py |
BiRTE | BiRTE-main/main.py | from transformers import WEIGHTS_NAME,AdamW, get_linear_schedule_with_warmup
from bert4keras.tokenizers import Tokenizer
from model import BiRTE
from util import *
from tqdm import tqdm
import random
import os
import torch.nn as nn
import torch
from transformers.modeling_bert import BertConfig
import json
def search(pattern, sequence):
"""从sequence中寻找子串pattern
如果找到,返回第一个下标;否则返回-1。
"""
n = len(pattern)
for i in range(len(sequence)):
if sequence[i:i + n] == pattern:
return i
return -1
def judge(ex):
'''判断样本是否正确'''
for s,p,o in ex["triple_list"]:
if s=='' or o=='' or s not in ex["text"] or o not in ex["text"]:
return False
return True
class data_generator(DataGenerator):
"""数据生成器
"""
def __init__(self, args, train_data, tokenizer, predicate2id, id2predicate):
super(data_generator, self).__init__(train_data, args.batch_size)
self.max_len=args.max_len
self.tokenizer=tokenizer
self.predicate2id=predicate2id
self.id2predicate=id2predicate
def __iter__(self, is_random=True):
batch_token_ids, batch_mask = [], []
batch_s1_labels, batch_o1_labels,\
batch_s2_mask, batch_o2_mask, batch_s2_labels, batch_o2_labels,\
batch_s3_mask, batch_o3_mask, batch_r = [],[],[],[],[],[],[],[],[]
for is_end, d in self.sample(is_random):
if judge(d)==False:
continue
token_ids, _ ,mask = self.tokenizer.encode(
d['text'], max_length=self.max_len
)
# 整理三元组 {s: [(o, p)]}
spoes_s = {}
spoes_o = {}
for s, p, o in d['triple_list']:
s = self.tokenizer.encode(s)[0][1:-1]
p = self.predicate2id[p]
o = self.tokenizer.encode(o)[0][1:-1]
s_idx = search(s, token_ids)
o_idx = search(o, token_ids)
if s_idx != -1 and o_idx != -1:
s_loc = (s_idx, s_idx + len(s) - 1)
o_loc = (o_idx, o_idx + len(o) - 1)
if s_loc not in spoes_s:
spoes_s[s_loc] = []
spoes_s[s_loc].append((o_loc,p))
if o_loc not in spoes_o:
spoes_o[o_loc] = []
spoes_o[o_loc].append((s_loc,p))
if spoes_s and spoes_o:
# s1_labels o1_labels
def get_entity1_labels(item,l):
res=np.zeros([l,2])
for start,end in item:
res[start][0]=1
res[end][1]=1
return res
s1_labels = get_entity1_labels(spoes_s, len(token_ids))
o1_labels = get_entity1_labels(spoes_o, len(token_ids))
# s2_labels,o2_labels,s2_mask,o2_mask
def get_entity2_labels_mask(item,l):
start, end = random.choice(list(item.keys()))
#构造labels
labels = np.zeros((l, 2))
if (start,end) in item:
for loc,_ in item[(start,end)]:
labels[loc[0], 0] = 1
labels[loc[1], 1] = 1
#构造mask
mask=np.zeros(l)
mask[start]=1
mask[end]=1
return labels,mask
o2_labels,s2_mask=get_entity2_labels_mask(spoes_s,len(token_ids))
s2_labels,o2_mask=get_entity2_labels_mask(spoes_o,len(token_ids))
#s3_mask,o3_mask,r
s_loc=random.choice(list(spoes_s.keys()))
o_loc,_=random.choice(spoes_s[s_loc])
r=np.zeros(len(self.id2predicate))
if s_loc in spoes_s:
for loc,the_r in spoes_s[s_loc]:
if loc==o_loc:
r[the_r]=1
s3_mask=np.zeros(len(token_ids))
o3_mask=np.zeros(len(token_ids))
s3_mask[s_loc[0]]=1
s3_mask[s_loc[1]]=1
o3_mask[o_loc[0]]=1
o3_mask[o_loc[1]]=1
# 构建batch
batch_token_ids.append(token_ids)
batch_mask.append(mask)
batch_s1_labels.append(s1_labels)
batch_o1_labels.append(o1_labels)
batch_s2_mask.append(s2_mask)
batch_o2_mask.append(o2_mask)
batch_s2_labels.append(s2_labels)
batch_o2_labels.append(o2_labels)
batch_s3_mask.append(s3_mask)
batch_o3_mask.append(o3_mask)
batch_r.append(r)
if len(batch_token_ids) == self.batch_size or is_end: #输出batch
batch_token_ids,batch_mask,\
batch_s1_labels,batch_o1_labels,\
batch_s2_mask,batch_o2_mask,batch_s2_labels,batch_o2_labels,\
batch_s3_mask,batch_o3_mask=\
[sequence_padding(i).astype(np.int)
for i in [batch_token_ids,batch_mask,
batch_s1_labels,batch_o1_labels,
batch_s2_mask,batch_o2_mask,batch_s2_labels,batch_o2_labels,
batch_s3_mask,batch_o3_mask]]
batch_r = np.array(batch_r).astype(np.int)
yield [
batch_token_ids, batch_mask,
batch_s1_labels, batch_o1_labels,
batch_s2_mask, batch_o2_mask, batch_s2_labels, batch_o2_labels,
batch_s3_mask, batch_o3_mask,batch_r
]
batch_token_ids, batch_mask = [], []
batch_s1_labels, batch_o1_labels, \
batch_s2_mask, batch_o2_mask, batch_s2_labels, batch_o2_labels, \
batch_s3_mask, batch_o3_mask, batch_r = [], [], [], [], [], [], [], [], []
class CE():
def __call__(self,args,targets, pred, from_logist=False):
'''
计算二分类交叉熵
:param targets: [batch,seq,2]
:param pred: [batch,seq,2]
:param from_logist:是否没有经过softmax/sigmoid
:return: loss.shape==targets.shape==pred.shape
'''
if not from_logist:
'''返回到没有经过softmax/sigmoid得张量'''
# 截取pred,防止趋近于0或1,保持在[min_num,1-min_num]
pred = torch.where(pred < 1 - args.min_num, pred, torch.ones(pred.shape).to("cuda") * 1 - args.min_num).to("cuda")
pred = torch.where(pred > args.min_num, pred, torch.ones(pred.shape).to("cuda") * args.min_num).to("cuda")
pred = torch.log(pred / (1 - pred))
relu = nn.ReLU()
# 计算传统的交叉熵loss
loss = relu(pred) - pred * targets + torch.log(1 + torch.exp(-1 * torch.abs(pred).to("cuda"))).to("cuda")
return loss
def train(args):
output_path = os.path.join(args.base_path, args.dataset, "output", args.file_id)
train_path=os.path.join(args.base_path,args.dataset,"train.json")
dev_path=os.path.join(args.base_path,args.dataset,"dev.json")
test_path=os.path.join(args.base_path,args.dataset,"test.json")
rel2id_path=os.path.join(args.base_path,args.dataset,"rel2id.json")
test_pred_path=os.path.join(output_path,"test_pred.json")
dev_pred_path=os.path.join(output_path,"dev_pred.json")
log_path=os.path.join(output_path,"log.txt")
if not os.path.exists(output_path):
os.makedirs(output_path)
print_config(args)
# 加载数据集
train_data = json.load(open(train_path))
valid_data = json.load(open(dev_path))
test_data = json.load(open(test_path))
id2predicate, predicate2id = json.load(open(rel2id_path))
tokenizer = Tokenizer(args.bert_vocab_path) # 注意修改
config = BertConfig.from_pretrained(args.bert_config_path)
config.num_p=len(id2predicate)
torch.cuda.set_device(int(args.cuda_id))
train_model = BiRTE.from_pretrained(pretrained_model_name_or_path=args.bert_model_path,config=config)
train_model.to("cuda")
if not os.path.exists(output_path):
os.makedirs(output_path)
dataloader = data_generator(args, train_data, tokenizer, predicate2id, id2predicate)
t_total = len(dataloader) * args.num_train_epochs
""" 优化器准备 """
optimizer_grouped_parameters = [
{
"params": [p for n, p in train_model.named_parameters() if "bert." in n],
"weight_decay": args.weight_decay,
"lr": args.bert_learning_rate,
},
{
"params": [p for n, p in train_model.named_parameters() if "bert." not in n],
"weight_decay": args.weight_decay,
"lr": args.other_learning_rate,
}
]
optimizer = AdamW(optimizer_grouped_parameters, eps=args.min_num)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup * t_total, num_training_steps=t_total
)
best_f1 = -1.0 # 全局的best_f1
step = 0
binary_crossentropy=CE()
no_change=0
for epoch in range(args.num_train_epochs):
train_model.train()
epoch_loss = 0
with tqdm(total=dataloader.__len__(), desc="train", ncols=80) as t:
for i, batch in enumerate(dataloader):
batch = [torch.tensor(d).to("cuda") for d in batch]
batch_token_ids, batch_mask,\
batch_s1_labels, batch_o1_labels,\
batch_s2_mask, batch_o2_mask, batch_s2_labels, batch_o2_labels,\
batch_s3_mask, batch_o3_mask, batch_r = batch
s1_pred,o1_pred,s2_pred,o2_pred,p_pred = train_model(batch_token_ids, batch_mask,
batch_s2_mask, batch_o2_mask,
batch_s3_mask, batch_o3_mask)
#计算损失
def get_loss(target,pred,mask):
loss = binary_crossentropy(args, targets=target, pred=pred) # BL2
loss = torch.mean(loss, dim=2).to("cuda") # BL
loss = torch.sum(loss * mask).to("cuda") / torch.sum(mask).to("cuda")
return loss
s1_loss=get_loss(target=batch_s1_labels,pred=s1_pred,mask=batch_mask)
o1_loss=get_loss(target=batch_o1_labels,pred=o1_pred,mask=batch_mask)
s2_loss=get_loss(target=batch_s2_labels,pred=s2_pred,mask=batch_mask)
o2_loss=get_loss(target=batch_o2_labels,pred=o2_pred,mask=batch_mask)
r_loss=binary_crossentropy(args,targets=batch_r,pred=p_pred)
r_loss=r_loss.mean()
loss=s1_loss+o1_loss+s2_loss+o2_loss+r_loss
loss.backward()
step += 1
epoch_loss += loss.item()
torch.nn.utils.clip_grad_norm_(train_model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
train_model.zero_grad()
t.set_postfix(loss="%.4lf"%(loss.cpu().item()))
t.update(1)
f1, precision, recall = evaluate(args,tokenizer,id2predicate,train_model,valid_data,dev_pred_path)
if f1 > best_f1:
# Save model checkpoint
best_f1 = f1
torch.save(train_model.state_dict(), os.path.join(output_path, WEIGHTS_NAME)) # 保存最优模型权重
epoch_loss = epoch_loss / dataloader.__len__()
with open(log_path, "a", encoding="utf-8") as f:
print("epoch:%d\tloss:%f\tf1:%f\tprecision:%f\trecall:%f\tbest_f1:%f" % (
int(epoch), epoch_loss, f1, precision, recall, best_f1), file=f)
#对test集合进行预测
#加载训练好的权重
train_model.load_state_dict(torch.load(os.path.join(output_path, WEIGHTS_NAME), map_location="cuda"))
f1, precision, recall = evaluate(args,tokenizer,id2predicate,train_model, test_data, test_pred_path)
with open(log_path, "a", encoding="utf-8") as f:
print("test: f1:%f\tprecision:%f\trecall:%f" % (f1, precision, recall), file=f)
def extract_spoes(args,tokenizer,id2predicate,model,text,entity_start=0.5,entity_end=0.5,p_num=0.5):
"""抽取输入text所包含的三元组
"""
#sigmoid=nn.Sigmoid()
if isinstance(model,torch.nn.DataParallel):
model=model.module
model.to("cuda")
tokens = tokenizer.tokenize(text, max_length=args.max_len)
mapping = tokenizer.rematch(text, tokens)
token_ids, _ ,mask = tokenizer.encode(text, max_length=args.max_len)
#获取BERT表示
model.eval()
with torch.no_grad():
head,tail,rel,cls = model.get_embed(torch.tensor([token_ids]).to("cuda"), torch.tensor([mask]).to("cuda"))
head = head.cpu().detach().numpy() #[1,L,H]
tail = tail.cpu().detach().numpy()
rel = rel.cpu().detach().numpy()
cls = cls.cpu().detach().numpy()
def get_entity(entity_pred):
start = np.where(entity_pred[0, :, 0] > entity_start)[0]
end = np.where(entity_pred[0, :, 1] > entity_end)[0]
entity = []
for i in start:
j = end[end >= i]
if len(j) > 0:
j = j[0]
entity.append((i, j))
return entity
#抽取s1 o1
model.eval()
with torch.no_grad():
s1_preds = model.s_pred(torch.tensor(head).to("cuda"),torch.tensor(cls).to("cuda"))
o1_preds = model.o_pred(torch.tensor(tail).to("cuda"),torch.tensor(cls).to("cuda"))
s1_preds = s1_preds.cpu().detach().numpy() #[1,L,2]
o1_preds = o1_preds.cpu().detach().numpy() #[1,L,2]
s1_preds[:,0,:],s1_preds[:,-1,:]=0.0,0.0
o1_preds[:,0,:],o1_preds[:,-1,:]=0.0,0.0
s1=get_entity(s1_preds)
o1=get_entity(o1_preds)
#获得s_loc,o_loc
pairs_0=[]
for s in s1:
for o in o1:
pairs_0.append((s[0],s[1],o[0],o[1]))
pairs_1=[]
for s in s1:
#s:(start,end)
s2_mask=np.zeros(len(token_ids)).astype(np.int)
s2_mask[s[0]] = 1
s2_mask[s[1]] = 1
model.eval()
with torch.no_grad():
o2_pred=model.o_pred_from_s(torch.tensor(head).to("cuda"),torch.tensor(tail).to("cuda"),
torch.tensor([s2_mask]).to("cuda"),cls=torch.tensor(cls).to("cuda"))
o2_pred = o2_pred.cpu().detach().numpy() # [1,L,2]
o2_pred[:, 0, :], o2_pred[:, -1, :] = 0.0, 0.0
objects2 = get_entity(o2_pred)
if objects2:
for o in objects2:
pairs_1.append((s[0],s[1],o[0],o[1]))
pairs_2=[]
for o in o1:
#o:(start,end)
o2_mask=np.zeros(len(token_ids)).astype(np.int)
o2_mask[o[0]] = 1
o2_mask[o[1]] = 1
model.eval()
with torch.no_grad():
s2_pred=model.s_pred_from_o(torch.tensor(head).to("cuda"),torch.tensor(tail).to("cuda"),
torch.tensor([o2_mask]).to("cuda"),cls=torch.tensor(cls).to("cuda"))
s2_pred = s2_pred.cpu().detach().numpy() # [1,L,2]
s2_pred[:, 0, :], s2_pred[:, -1, :] = 0.0, 0.0
subjects2 = get_entity(s2_pred)
if subjects2:
for s in subjects2:
pairs_2.append((s[0],s[1],o[0],o[1]))
pairs_1=set(pairs_1)
pairs_2=set(pairs_2)
pairs=list(pairs_1|pairs_2)
if pairs: # m * 4
s_mask=np.zeros([len(pairs),len(token_ids)]).astype(np.int)
o_mask=np.zeros([len(pairs),len(token_ids)]).astype(np.int)
for i,pair in enumerate(pairs):
s1, s2, o1, o2=pair
s_mask[i,s1]=1
s_mask[i,s2]=1
o_mask[i,o1]=1
o_mask[i,o2]=1
spoes = []
rel=np.repeat(rel,len(pairs),0)
# 传入subject,抽取object和predicate
model.eval()
with torch.no_grad():
p_pred = model.p_pred(
rel=torch.tensor(rel).to("cuda"),
s_mask=torch.tensor(s_mask).to("cuda"),
o_mask=torch.tensor(o_mask).to("cuda"),
)
p_pred = p_pred.cpu().detach().numpy() #BR
index,p_index=np.where(p_pred>p_num)
for i,p in zip(index,p_index):
s1,s2,o1,o2=pairs[i]
spoes.append(
(
(mapping[s1][0],mapping[s2][-1]),
p,
(mapping[o1][0], mapping[o2][-1])
)
)
return [(text[s[0]:s[1] + 1], id2predicate[str(p)], text[o[0]:o[1] + 1])
for s, p, o, in spoes]
else:
return []
def evaluate(args,tokenizer,id2predicate,model,evl_data,evl_path):
"""评估函数,计算f1、precision、recall
"""
X, Y, Z = 1e-10, 1e-10, 1e-10
f = open(evl_path, 'w', encoding='utf-8')
pbar = tqdm()
for d in evl_data:
R = set(extract_spoes(args,tokenizer,id2predicate,model,d['text']))
T = set([(i[0],i[1],i[2]) for i in d['triple_list']])
X += len(R & T)
Y += len(R)
Z += len(T)
f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z
pbar.update()
pbar.set_description(
'f1: %.5f, precision: %.5f, recall: %.5f' % (f1, precision, recall)
)
s = json.dumps({
'text': d['text'],
'triple_list': list(T),
'triple_list_pred': list(R),
'new': list(R - T),
'lack': list(T - R),
},ensure_ascii=False,indent=4)
f.write(s + '\n')
pbar.close()
f.close()
return f1, precision, recall
def test(args):
torch.cuda.set_device(int(args.cuda_id))
test_path = os.path.join(args.base_path, args.dataset, "test.json")
output_path=os.path.join(args.base_path,args.dataset,"output",args.file_id)
test_pred_path = os.path.join(output_path, "test_pred.json")
rel2id_path=os.path.join(args.base_path,args.dataset,"rel2id.json")
test_data = json.load(open(test_path))
id2predicate, predicate2id = json.load(open(rel2id_path))
config = BertConfig.from_pretrained(args.bert_config_path)
tokenizer = Tokenizer(args.bert_vocab_path)
config.num_p=len(id2predicate)
train_model = BiRTE.from_pretrained(pretrained_model_name_or_path=args.bert_model_path,config=config)
train_model.to("cuda")
train_model.load_state_dict(torch.load(os.path.join(output_path, WEIGHTS_NAME), map_location="cuda"))
f1, precision, recall = evaluate(args,tokenizer,id2predicate,train_model, test_data, test_pred_path)
print("f1:%f, precision:%f, recall:%f"%(f1, precision, recall)) | 18,791 | 39.32618 | 126 | py |
BiRTE | BiRTE-main/model.py | from transformers.modeling_bert import BertModel,BertPreTrainedModel
import torch.nn as nn
import torch
from torch.autograd import Variable
import numpy as np
class Biaffine(nn.Module):
'''
Args:
in1_features: size of each first input sample
in2_features: size of each second input sample
out_features: size of each output sample
bias: If set to False, the layer will not learn an additive bias. Default: ``[True, True]``
bias[0, 1]: the bias of U_m
bias[2]: the b_m
'''
def __init__(self, in1_features, in2_features, out_features, bias=(True, True, True)):
super(Biaffine, self).__init__()
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.bias = bias
self.linear_input_size = in1_features + int(bias[0])
self.linear_output_size = out_features * (in2_features + int(bias[1])) # 3-dim -> 2-dim
self.linear = nn.Linear(in_features=self.linear_input_size,
out_features=self.linear_output_size,
bias=False)
self.linear_1 = nn.Linear(in_features=2*self.in1_features+1,
out_features=self.out_features,
bias=False)
self.linear_2 = nn.Linear(in_features=2*self.in1_features+1,
out_features=self.out_features,
bias=False)
self.reset_parameters()
def reset_parameters(self):
U = np.zeros((self.linear_output_size, self.linear_input_size), dtype=np.float32)
W1 = np.zeros((self.out_features, 1+2*self.in1_features), dtype=np.float32)
W2 = np.zeros((self.out_features, 1+2*self.in1_features), dtype=np.float32)
self.linear.weight.data.copy_(torch.from_numpy(U))
self.linear_1.weight.data.copy_(torch.from_numpy(W1))
self.linear_2.weight.data.copy_(torch.from_numpy(W2))
def forward(self, input1, input2):
input1=input1.unsqueeze(dim=1)
input2=input2.unsqueeze(dim=1)
input3=torch.cat([input1, input2],dim=-1)
# batch_size, len1, dim1 = input1.size()
# batch_size, len2, dim2 = input2.size()
batch_size,_, dim1 = input1.size()
batch_size,_, dim2 = input2.size()
if self.bias[0]:
ones = input1.data.new(batch_size, 1,1).zero_().fill_(1)
input1 = torch.cat((input1, Variable(ones)), dim=2)
dim1 += 1
if self.bias[1]:
ones = input2.data.new(batch_size, 1,1).zero_().fill_(1)
input2 = torch.cat((input2, Variable(ones)), dim=2)
dim2 += 1
if self.bias[2]:
ones = input3.data.new(batch_size, 1,1).zero_().fill_(1)
input3 = torch.cat((input3, Variable(ones)), dim=2)
affine = self.linear(input1)
affine = affine.view(batch_size, self.out_features, dim2)
input2 = torch.transpose(input2, 1, 2)
biaffine = torch.transpose(torch.bmm(affine, input2), 1, 2)
biaffine = biaffine.contiguous().view(batch_size, 1, 1, self.out_features)
# affine_1 = self.linear_1(input3)
# affine_1 = affine_1.view(batch_size, 1, 1, self.out_features)
# biaffine = biaffine + affine_1
return biaffine.squeeze(dim=1).squeeze(dim=1)
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ 'in1_features=' + str(self.in1_features) \
+ ', in2_features=' + str(self.in2_features) \
+ ', out_features=' + str(self.out_features) + ')'
class BiRTE(BertPreTrainedModel):
def __init__(self, config):
super(BiRTE, self).__init__(config)
self.bert=BertModel(config=config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.w1=nn.Linear(config.hidden_size,config.hidden_size)
self.w2=nn.Linear(config.hidden_size,config.hidden_size)
self.w3=nn.Linear(config.hidden_size,config.hidden_size)
#s
self.s_classier=nn.Linear(config.hidden_size,2)
self.s_classier_from_o=nn.Linear(config.hidden_size,2)
#o
self.o_classier=nn.Linear(config.hidden_size,2)
self.o_classier_from_s=nn.Linear(config.hidden_size,2)
#p
self.biaffine=Biaffine(config.hidden_size,config.hidden_size,config.num_p)
self.sigmoid=nn.Sigmoid()
self.init_weights()
def forward(self, token_ids, mask_token_ids,s2_mask,o2_mask,s3_mask,o3_mask):
'''
:param token_ids:
:param token_type_ids:
:param mask_token_ids:
:param s_loc:
:return: s_pred: [batch,seq,2]
op_pred: [batch,seq,p,2]
'''
#获取表示
head,tail,rel,cls=self.get_embed(token_ids, mask_token_ids)
#初步预测s o
s1_pred=self.s_pred(head,cls=cls)
o1_pred=self.o_pred(tail,cls=cls)
#进一步预测 s,o
o2_pred=self.o_pred_from_s(head,tail,s2_mask,cls)
s2_pred=self.s_pred_from_o(head,tail,o2_mask,cls)
#预测r
p_pred=self.p_pred(rel,s3_mask,o3_mask)
return s1_pred,o1_pred,s2_pred,o2_pred,p_pred
def get_embed(self,token_ids, mask_token_ids):
bert_out = self.bert(input_ids=token_ids.long(), attention_mask=mask_token_ids.long())
embed=bert_out[0]
head=self.w1(embed)
tail=self.w2(embed)
rel=self.w3(embed)
cls=bert_out[1]
head=head+tail[:,0,:].unsqueeze(dim=1)
tail=tail+head[:,0,:].unsqueeze(dim=1)
head, tail,rel,cls=self.dropout(head),self.dropout(tail),self.dropout(rel),self.dropout(cls)
return head, tail,rel,cls
def extract_entity(self, input, mask):
'''
取首尾平均
:param input:BLH
:param mask:BL
:return: BH
'''
_,_,dim=input.shape
entity=input*mask.unsqueeze(dim=-1) #BLH
entity=entity.sum(dim=1)/mask.sum(dim=-1,keepdim=True) #BH/B1
return entity
def s_pred(self,head,cls):
s_logist=self.s_classier(head+cls.unsqueeze(dim=1)) #BL,2
s_pred=self.sigmoid(s_logist)
return s_pred
def o_pred(self,tail,cls):
o_logist=self.o_classier(tail+cls.unsqueeze(dim=1)) #BL,2
o_pred=self.sigmoid(o_logist)
return o_pred
def o_pred_from_s(self,head,tail,s_mask,cls):
s_entity=self.extract_entity(head,s_mask)
s2o_embed=tail*s_entity.unsqueeze(dim=1) #BLH
o_logist=self.o_classier_from_s(s2o_embed+cls.unsqueeze(dim=1)) #BL2
o_pred=self.sigmoid(o_logist)
return o_pred #BL2
def s_pred_from_o(self,head,tail,o_mask,cls):
o_entity=self.extract_entity(tail,o_mask)
o2s_embed=head*o_entity.unsqueeze(dim=1) #BLH
s_logist=self.s_classier_from_o(o2s_embed+cls.unsqueeze(dim=1)) #BL2
s_pred=self.sigmoid(s_logist)
return s_pred #BL2
def p_pred(self, rel, s_mask, o_mask):
s_entity=self.extract_entity(rel,s_mask) #BH
o_entity=self.extract_entity(rel,o_mask) #BH
logist=self.biaffine(s_entity,o_entity) #bc
r_pred=self.sigmoid(logist)
return r_pred #BR | 7,229 | 36.46114 | 100 | py |
BiRTE | BiRTE-main/run.py | import argparse
from main import *
import torch
parser = argparse.ArgumentParser(description='Model Controller')
parser.add_argument('--cuda_id', default="0", type=str)
parser.add_argument('--base_path', default="./dataset", type=str)
parser.add_argument('--dataset', default='WebNLG', type=str)
parser.add_argument('--train', default="train", type=str)
parser.add_argument('--bert_learning_rate', default=3e-5, type=float)
parser.add_argument('--other_learning_rate', default=(3e-5)*5, type=float)
parser.add_argument('--num_train_epochs', default=100, type=int)
parser.add_argument('--file_id', default='999', type=str)
parser.add_argument('--batch_size', default=6, type=int)
parser.add_argument('--max_len', default=100, type=int)
parser.add_argument('--warmup', default=0.0, type=float)
parser.add_argument('--weight_decay', default=0.0, type=float)
parser.add_argument('--max_grad_norm', default=1.0, type=float)
parser.add_argument('--min_num', default=1e-7, type=float)
parser.add_argument('--bert_vocab_path', default="./pretrained/bert-base-cased/vocab.txt", type=str)
parser.add_argument('--bert_config_path', default="./pretrained/bert-base-cased/config.json", type=str)
parser.add_argument('--bert_model_path', default="./pretrained/bert-base-cased/pytorch_model.bin", type=str)
args = parser.parse_args()
if args.train=="train":
train(args)
else:
test(args) | 1,383 | 45.133333 | 108 | py |
BiRTE | BiRTE-main/util.py | #! -*- coding:utf-8 -*-
import numpy as np
import random
from copy import deepcopy
import os
import pickle
import torch
import json
def get_more_data(all_data):
s_more = []
o_more = []
for ex in all_data:
all_s = set()
all_o = set()
for s, p, o in ex["triple_list"]:
all_s.add(s)
all_o.add(o)
if len(all_s) >= len(all_o):
s_more.append(ex)
if len(all_o) >= len(all_s):
o_more.append(ex)
return s_more,o_more
def get_over_lap(all_data):
normal,epo,seo=[],[],[]
for ex in all_data:
item=set()
for s, p, o in ex["triple_list"]:
item.add((s, p, o))
spo=[]
for s,p,o in item:
spo.extend([s,o,p])
if is_normal_triple(spo): # 绗琲涓彞瀛愮殑spo鍒楄〃
normal.append(ex)
if is_multi_label(spo):
epo.append(ex)
if is_over_lapping(spo):
seo.append(ex)
return [normal,epo,seo]
def is_normal_triple(triples, is_relation_first=False):
entities = set()
for i, e in enumerate(triples):
key = 0 if is_relation_first else 2
if i % 3 != key:
entities.add(e)
return len(entities) == 2 * int(len(triples) / 3)
def is_multi_label(triples, is_relation_first=False):
if is_normal_triple(triples, is_relation_first):
return False
if is_relation_first:
entity_pair = [tuple(triples[3 * i + 1: 3 * i + 3]) for i in range(int(len(triples) / 3))]
else:
entity_pair = [tuple(triples[3 * i: 3 * i + 2]) for i in range(int(len(triples) / 3))]
# if is multi label, then, at least one entity pair appeared more than once
return len(entity_pair) != len(set(entity_pair))
def is_over_lapping(triples, is_relation_first=False):
'''实体对集合 -> 实体列表 -> 实体集合 -> len(实体集合)==2*len(实体对集合)'''
if is_normal_triple(triples, is_relation_first):
return False
if is_relation_first:
entity_pair = [tuple(triples[3 * i + 1: 3 * i + 3]) for i in range(int(len(triples) / 3))]
else:
entity_pair = [tuple(triples[3 * i: 3 * i + 2]) for i in range(int(len(triples) / 3))]
# remove the same entity_pair, then, if one entity appear more than once, it's overlapping
entity_pair = set(entity_pair)
entities = []
for pair in entity_pair:
entities.extend(pair)
entities = set(entities)
return len(entities) != 2 * len(entity_pair)
def to_set(item):
'''
三元组列表转化为集合
'''
item_set=set()
for s,p,o in item:
item_set.add((s,p,o))
return item_set
def get_pred_data(path):
all_s=""
with open(path,"r") as f1:
for l in f1.readlines():
text=l.rsplit("\n")[0]
if text=="}":
text="},"
all_s+=text
return json.loads("["+all_s[:-1]+"]")
def get_json_data(path):
with open(path,"r") as f:
data=json.load(f)
return data
def save_json_data(data,path):
with open(path,"w") as f:
json.dump(data,f,indent=4,ensure_ascii=False)
def set_seed():
# 每次运行代码时设置相同的seed,则每次生成的随机数也相同,如果不设置seed,则每次生成的随机数都会不一样
random.seed(1) # seed()方法改变随机数生成器的种子,可以在调用其他随机模块函数之前调用此函数
np.random.seed(1)
torch.manual_seed(1)
def print_config(args):
config_path=os.path.join(args.base_path, args.dataset, "output", args.file_id,"config.txt")
with open(config_path,"w",encoding="utf-8") as f:
for k,v in sorted(vars(args).items()):
print(k,'=',v,file=f)
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
def mat_padding(inputs,dim=0, length=None, padding=0):
"""Numpy函数,将序列的dim维padding到同一长度
"""
if not type(inputs[0]) is np.ndarray:
inputs = [np.array(i) for i in inputs]
if length is None:
length = max([x.shape[dim] for x in inputs])
pad_width = [(0, 0) for _ in np.shape(inputs[0])]
outputs = []
for x in inputs:
pad_width[0] = (0, length - x.shape[dim])
pad_width[1] = (0, length - x.shape[dim])
x = np.pad(x, pad_width, 'constant', constant_values=padding)
outputs.append(x)
return np.array(outputs)
def tuple_mat_padding(inputs,dim=1, length=None, padding=0):
"""Numpy函数,将序列的dim维padding到同一长度
"""
if not type(inputs[0]) is np.ndarray:
inputs = [np.array(i) for i in inputs]
if length is None:
length = max([x.shape[dim] for x in inputs])
pad_width = [(0, 0) for _ in np.shape(inputs[0])]
outputs = []
for x in inputs:
pad_width[1] = (0, length - x.shape[dim])
pad_width[2] = (0, length - x.shape[dim])
x = np.pad(x, pad_width, 'constant', constant_values=padding)
outputs.append(x)
return np.array(outputs)
def sequence_padding(inputs,dim=0, length=None, padding=0):
"""Numpy函数,将序列的dim维padding到同一长度
"""
if not type(inputs[0]) is np.ndarray:
inputs = [np.array(i) for i in inputs]
if length is None:
length = max([x.shape[dim] for x in inputs])
pad_width = [(0, 0) for _ in np.shape(inputs[0])]
outputs = []
for x in inputs:
pad_width[dim] = (0, length - x.shape[dim])
x = np.pad(x, pad_width, 'constant', constant_values=padding)
outputs.append(x)
return np.array(outputs)
def data_augmentation(example,ex2):
'''数据增强,返回新的样例'''
same_example=deepcopy(example)
try:
a=random.randint(0,6)
if a==0:#多个句子随机拼接
text1,text2=example["text"],ex2["text"]
tokens1=text1.split()
tokens2=text2.split()
loc=random.randint(0,len(tokens1))
tokens=tokens1[:loc]+tokens2+tokens2[loc:]
spo_list=[]
text=" ".join(tokens)
for s,p,o in example['triple_list']+ex2["triple_list"]:
if s in text and o in text:
spo_list.append([s,p,o])
res={"text":text,"triple_list":spo_list}
elif a==1:#随机插入单词
text=example["text"]
all_tokens=text.split()
num_token=len(all_tokens)//10+1 #每10个单词随机插入一个词语
for i in range(num_token):
token=random.choice(all_tokens) #将要插入的单词
loc = random.randint(0, len(all_tokens)) #将要插入的位置
all_tokens.insert(loc,token)
text=" ".join(all_tokens)
spo_list=[]
for s,p,o in example["triple_list"]:
if s in text and o in text:
spo_list.append([s,p,o])
else:
return same_example #保证原本的三元组不变
res={"text":text,"triple_list":spo_list}
elif a==2:#随机减少单词
text=example["text"]
all_tokens=text.split()
num_token=len(text)//10+1 #每10个单词随机减少一个词语
for i in range(num_token):
loc = random.randint(0, len(all_tokens)-1) #将要删除的位置
all_tokens.pop(loc)
text=" ".join(all_tokens)
spo_list=[]
for s,p,o in example["triple_list"]:
if s in text and o in text:
spo_list.append([s,p,o])
else:
return same_example #保证原本的三元组不变
res={"text":text,"triple_list":spo_list}
else: #不进行数据增强
res=example
if len(res["triple_list"])==0: #防止没有标注的情况
res=same_example
return res
except:
return same_example
def judge(ex):
'''判断样本是否正确'''
for s,p,o in ex["triple_list"]:
if s=='' or o=='' or s not in ex["text"] or o not in ex["text"]:
return False
return True
class DataGenerator(object):
"""数据生成器模版
"""
def __init__(self, data, batch_size=32, buffer_size=None):
self.data = data
self.batch_size = batch_size
if hasattr(self.data, '__len__'):
self.steps = len(self.data) // self.batch_size
if len(self.data) % self.batch_size != 0:
self.steps += 1
else:
self.steps = None
self.buffer_size = buffer_size or batch_size * 1000
def __len__(self):
return self.steps
def sample(self, random=False):
"""采样函数,每个样本同时返回一个is_end标记
"""
if random: #乱序
if self.steps is None:
def generator():
caches, isfull = [], False
for d in self.data:
caches.append(d)
if isfull:
i = np.random.randint(len(caches))
yield caches.pop(i)
elif len(caches) == self.buffer_size:
isfull = True
while caches:
i = np.random.randint(len(caches))
yield caches.pop(i)
else:
def generator():
indices = list(range(len(self.data)))
np.random.shuffle(indices)
for i in indices:
yield self.data[i] #返回样本编号
data = generator()
else: #正序
data = iter(self.data)
d_current = next(data)
for d_next in data:
yield False, d_current
d_current = d_next
yield True, d_current
def __iter__(self, random=False):
raise NotImplementedError
def forfit(self):
for d in self.__iter__(True):
yield d
class Vocab(object):
def __init__(self, filename, load=False, word_counter=None, threshold=0):
if load:
assert os.path.exists(filename), "Vocab file does not exist at " + filename
# load from file and ignore all other params
self.id2word, self.word2id = self.load(filename)
self.size = len(self.id2word)
print("Vocab size {} loaded from file".format(self.size))
else:
print("Creating vocab from scratch...")
assert word_counter is not None, "word_counter is not provided for vocab creation."
self.word_counter = word_counter
if threshold > 1:
# remove words that occur less than thres
self.word_counter = dict([(k, v) for k, v in self.word_counter.items() if v >= threshold])
self.id2word = sorted(self.word_counter, key=lambda k: self.word_counter[k], reverse=True)
# add special tokens to the beginning
self.id2word = ['**PAD**', '**UNK**'] + self.id2word
self.word2id = dict([(self.id2word[idx], idx) for idx in range(len(self.id2word))])
self.size = len(self.id2word)
self.save(filename)
print("Vocab size {} saved to file {}".format(self.size, filename))
def load(self, filename):
with open(filename, 'rb') as infile:
id2word = pickle.load(infile)
word2id = dict([(id2word[idx], idx) for idx in range(len(id2word))])
return id2word, word2id
def save(self, filename):
# assert not os.path.exists(filename), "Cannot save vocab: file exists at " + filename
if os.path.exists(filename):
print("Overwriting old vocab file at " + filename)
os.remove(filename)
with open(filename, 'wb') as outfile:
pickle.dump(self.id2word, outfile)
return
def map(self, token_list):
"""
Map a list of tokens to their ids.
"""
return [self.word2id[w] if w in self.word2id else constant.VOCAB_UNK_ID for w in token_list]
def unmap(self, idx_list):
"""
Unmap ids back to tokens.
"""
return [self.id2word[idx] for idx in idx_list]
def get_embeddings(self, word_vectors=None, dim=100):
# self.embeddings = 2 * constant.EMB_INIT_RANGE * np.random.rand(self.size, dim) - constant.EMB_INIT_RANGE
self.embeddings = np.zeros((self.size, dim))
if word_vectors is not None:
assert len(list(word_vectors.values())[0]) == dim, \
"Word vectors does not have required dimension {}.".format(dim)
for w, idx in self.word2id.items():
if w in word_vectors:
self.embeddings[idx] = np.asarray(word_vectors[w])
return self.embeddings
| 12,507 | 32.354667 | 114 | py |
BiRTE | BiRTE-main/bert4keras/optimizers.py | # -*- coding: utf-8 -*-
# 优化相关
import numpy as np
import tensorflow as tf
from bert4keras.backend import keras, K, is_tf_keras
from bert4keras.snippets import is_string, string_matching
from bert4keras.snippets import is_one_of, insert_arguments
from bert4keras.backend import piecewise_linear
import re
class Adam(keras.optimizers.Optimizer):
"""重新定义Adam优化器,便于派生出新的优化器
(tensorflow的optimizer_v2类)
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
bias_correction=True,
**kwargs
):
kwargs['name'] = kwargs.get('name') or 'Adam'
super(Adam, self).__init__(**kwargs)
self._set_hyper('learning_rate', learning_rate)
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self.epsilon = epsilon or K.epislon()
self.bias_correction = bias_correction
def _create_slots(self, var_list):
for var in var_list:
self.add_slot(var, 'm')
self.add_slot(var, 'v')
def _resource_apply(self, grad, var, indices=None):
# 准备变量
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
beta_1_t = self._get_hyper('beta_1', var_dtype)
beta_2_t = self._get_hyper('beta_2', var_dtype)
epsilon_t = K.cast(self.epsilon, var_dtype)
local_step = K.cast(self.iterations + 1, var_dtype)
beta_1_t_power = K.pow(beta_1_t, local_step)
beta_2_t_power = K.pow(beta_2_t, local_step)
# 更新公式
if indices is None:
m_t = K.update(m, beta_1_t * m + (1 - beta_1_t) * grad)
v_t = K.update(v, beta_2_t * v + (1 - beta_2_t) * grad**2)
else:
mv_ops = [K.update(m, beta_1_t * m), K.update(v, beta_2_t * v)]
with tf.control_dependencies(mv_ops):
m_t = self._resource_scatter_add(
m, indices, (1 - beta_1_t) * grad
)
v_t = self._resource_scatter_add(
v, indices, (1 - beta_2_t) * grad**2
)
# 返回算子
with tf.control_dependencies([m_t, v_t]):
if self.bias_correction:
m_t = m_t / (1.0 - beta_1_t_power)
v_t = v_t / (1.0 - beta_2_t_power)
var_t = var - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
return K.update(var, var_t)
def _resource_apply_dense(self, grad, var):
return self._resource_apply(grad, var)
def _resource_apply_sparse(self, grad, var, indices):
return self._resource_apply(grad, var, indices)
def get_config(self):
config = {
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'beta_1': self._serialize_hyperparameter('beta_1'),
'beta_2': self._serialize_hyperparameter('beta_2'),
'epsilon': self.epsilon,
}
base_config = super(Adam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AdaFactorBase(keras.optimizers.Optimizer):
"""AdaFactor优化器(基类)
论文链接:https://arxiv.org/abs/1804.04235
参考实现:https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/optimize.py
"""
def __init__(
self,
learning_rate=1e-3, # 可以为None
beta1=0.0,
beta2=None,
epsilon1=1e-30,
epsilon2=1e-3,
multiply_by_parameter_scale=True,
clipping_threshold=1.0,
min_dim_size_to_factor=128,
**kwargs
):
super(AdaFactorBase, self).__init__(**kwargs)
self._learning_rate = learning_rate
self.beta1 = beta1
self._beta2 = beta2
self.epsilon1 = epsilon1
self.epsilon2 = epsilon2
self.multiply_by_parameter_scale = multiply_by_parameter_scale
self.clipping_threshold = clipping_threshold
self.min_dim_size_to_factor = min_dim_size_to_factor
@property
def learning_rate(self):
if self._learning_rate is None:
iterations = K.cast(self.iterations + 1, K.floatx())
learning_rate = K.minimum(1.0 / K.sqrt(iterations), 0.01)
if self.multiply_by_parameter_scale:
return learning_rate
else:
return learning_rate * 0.05
else:
if not hasattr(self, '__learning_rate'):
with K.name_scope(self.__class__.__name__):
self.__learning_rate = K.variable(
self._learning_rate, name='learning_rate'
)
return self.__learning_rate
@property
def beta2(self):
if self._beta2 is None:
iterations = K.cast(self.iterations + 1, K.floatx())
return 1.0 - K.pow(iterations, -0.8)
else:
return self._beta2
def factored_shape(self, shape):
if len(shape) < 2:
return None
shape = np.array(shape)
indices = shape.argpartition(-2)
if indices[-2] < self.min_dim_size_to_factor:
return None
shape1, shape2 = np.array(shape), np.array(shape)
shape1[indices[-1]] = 1
shape2[indices[-2]] = 1
return shape1, indices[-1], shape2, indices[-2]
def get_config(self):
config = {
'learning_rate': self._learning_rate,
'beta1': self.beta1,
'beta2': self._beta2,
'epsilon1': self.epsilon1,
'epsilon2': self.epsilon2,
'multiply_by_parameter_scale': self.multiply_by_parameter_scale,
'clipping_threshold': self.clipping_threshold,
'min_dim_size_to_factor': self.min_dim_size_to_factor,
}
base_config = super(AdaFactorBase, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AdaFactorV1(AdaFactorBase):
"""AdaFactor优化器(纯Keras版)
论文链接:https://arxiv.org/abs/1804.04235
参考实现:https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/optimize.py
"""
def __init__(self, *args, **kwargs):
super(AdaFactorV1, self).__init__(*args, **kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
@K.symbolic
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
self.weights = [self.iterations]
lr = self.learning_rate
for i, (p, g) in enumerate(zip(params, grads)):
g2 = K.square(g) + self.epsilon1
shape, dtype = K.int_shape(p), K.dtype(p)
factored_shape = self.factored_shape(shape)
if factored_shape is None:
# 定义参数
v = K.zeros(shape, dtype=dtype, name='v_' + str(i))
self.weights.append(v)
# 定义更新
v_t = self.beta2 * v + (1.0 - self.beta2) * g2
self.updates.append(K.update(v, v_t))
else:
# 定义参数
shape1, axis1, shape2, axis2 = factored_shape
vr = K.zeros(shape1, dtype=dtype, name='vr_' + str(i))
vc = K.zeros(shape2, dtype=dtype, name='vc_' + str(i))
self.weights.extend([vr, vc])
# 定义更新
vr_t = self.beta2 * vr + K.mean(g2, axis=axis1, keepdims=True)
vc_t = self.beta2 * vc + K.mean(g2, axis=axis2, keepdims=True)
self.updates.extend([K.update(vr, vr_t), K.update(vc, vc_t)])
# 合成矩阵
v_t = vr_t * vc_t / K.mean(vr_t, axis=axis2, keepdims=True)
# 增量主体
u = g / K.sqrt(v_t)
# 增量裁剪
if self.clipping_threshold is not None:
u_rms = K.mean(K.sum(K.square(u)))
d = self.clipping_threshold
u = u / K.maximum(1.0, u_rms / d)
# 增量滑动
if self.beta1 > 0.0:
# 定义参数
m = K.zeros(shape, dtype=dtype, name='m_' + str(i))
self.weights.append(m)
# 定义更新
m_t = self.beta1 * m + (1.0 - self.beta1) * u
self.updates.append(K.update(m, m_t))
u = m_t
# 增量调整
if self.multiply_by_parameter_scale:
u = u * K.maximum(K.mean(K.sum(K.square(p))), self.epsilon2)
# 更新参数
self.updates.append(K.update(p, p - lr * u))
return self.updates
class AdaFactorV2(AdaFactorBase):
"""AdaFactor优化器(tf.keras版)
论文链接:https://arxiv.org/abs/1804.04235
参考实现:https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/optimize.py
"""
def __init__(self, *args, **kwargs):
kwargs['name'] = kwargs.get('name') or 'AdaFactor'
super(AdaFactorV2, self).__init__(*args, **kwargs)
def _create_slots(self, var_list):
for var in var_list:
if self.beta1 > 0.0:
self.add_slot(var, 'm')
shape = K.int_shape(var)
factored_shape = self.factored_shape(shape)
if factored_shape is None:
self.add_slot(var, 'v')
else:
shape1, axis1, shape2, axis2 = factored_shape
value1, value2 = np.zeros(shape1), np.zeros(shape2)
self.add_slot(var, 'vr', value1)
self.add_slot(var, 'vc', value2)
def _resource_apply(self, grad, var, indices=None):
lr = self.learning_rate
g2 = K.square(grad) + self.epsilon1
shape = K.int_shape(var)
factored_shape = self.factored_shape(shape)
if factored_shape is None:
v = self.get_slot(var, 'v')
# 定义更新
v_t = self.beta2 * v + (1.0 - self.beta2) * g2
v_t = K.update(v, v_t)
else:
shape1, axis1, shape2, axis2 = factored_shape
vr = self.get_slot(var, 'vr')
vc = self.get_slot(var, 'vc')
# 定义更新
vr_t = self.beta2 * vr + K.mean(g2, axis=axis1, keepdims=True)
vc_t = self.beta2 * vc + K.mean(g2, axis=axis2, keepdims=True)
vr_t, vc_t = K.update(vr, vr_t), K.update(vc, vc_t)
# 合成矩阵
v_t = vr_t * vc_t / K.mean(vr_t, axis=axis2, keepdims=True)
# 增量主体
u = grad / K.sqrt(v_t)
# 增量裁剪
if self.clipping_threshold is not None:
u_rms = K.mean(K.sum(K.square(u)))
d = self.clipping_threshold
u = u / K.maximum(1.0, u_rms / d)
# 增量滑动
if self.beta1 > 0.0:
m = self.get_slot(var, 'm')
# 定义更新
m_t = self.beta1 * m + (1.0 - self.beta1) * u
u = K.update(m, m_t)
# 增量调整
if self.multiply_by_parameter_scale:
u = u * K.maximum(K.mean(K.sum(K.square(var))), self.epsilon2)
# 更新参数
return K.update(var, var - lr * u)
def _resource_apply_dense(self, grad, var):
return self._resource_apply(grad, var)
def _resource_apply_sparse(self, grad, var, indices):
grad = tf.IndexedSlices(grad, indices, K.shape(var))
grad = tf.convert_to_tensor(grad)
return self._resource_apply_dense(grad, var)
def export_to_custom_objects(base_extend_with):
"""装饰器,用来将优化器放到custom_objects中
"""
def new_extend_with(BaseOptimizer, name=None):
NewOptimizer = base_extend_with(BaseOptimizer)
if is_string(name):
NewOptimizer.__name__ = name
name = NewOptimizer.__name__
keras.utils.get_custom_objects()[name] = NewOptimizer
return NewOptimizer
return new_extend_with
@export_to_custom_objects
def extend_with_weight_decay(BaseOptimizer):
"""返回新的优化器类,加入权重衰减
"""
class NewOptimizer(BaseOptimizer):
"""带有权重衰减的优化器
"""
@insert_arguments(weight_decay_rate=0.01, exclude_from_weight_decay=[])
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
if not hasattr(self, 'learning_rate'):
self.learning_rate = self.lr
@K.symbolic
def get_updates(self, loss, params):
old_update = K.update
def new_update(x, new_x):
if is_one_of(x, params) and self._do_weight_decay(x):
new_x = new_x - self.learning_rate * self.weight_decay_rate * x
return old_update(x, new_x)
K.update = new_update
updates = super(NewOptimizer, self).get_updates(loss, params)
K.update = old_update
return updates
def _do_weight_decay(self, w):
return (not string_matching(w.name, self.exclude_from_weight_decay))
def get_config(self):
config = {
'weight_decay_rate': self.weight_decay_rate,
'exclude_from_weight_decay': self.exclude_from_weight_decay,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_weight_decay_v2(BaseOptimizer):
"""返回新的优化器类,加入权重衰减
"""
class NewOptimizer(BaseOptimizer):
"""带有权重衰减的优化器
"""
@insert_arguments(weight_decay_rate=0.01, exclude_from_weight_decay=[])
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
def _resource_apply(self, grad, var, indices=None):
old_update = K.update
def new_update(x, new_x):
if x is var and self._do_weight_decay(x):
lr_t = self._decayed_lr(x.dtype.base_dtype)
new_x = new_x - lr_t * self.weight_decay_rate * x
return old_update(x, new_x)
K.update = new_update
op = super(NewOptimizer, self)._resource_apply(grad, var, indices)
K.update = old_update
return op
def _do_weight_decay(self, w):
return (not string_matching(w.name, self.exclude_from_weight_decay))
def get_config(self):
config = {
'weight_decay_rate': self.weight_decay_rate,
'exclude_from_weight_decay': self.exclude_from_weight_decay,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_layer_adaptation(BaseOptimizer):
"""返回新的优化器类,加入层自适应学习率
"""
class NewOptimizer(BaseOptimizer):
"""带有层自适应学习率的优化器
用每一层参数的模长来校正当前参数的学习率
https://arxiv.org/abs/1904.00962
"""
@insert_arguments(exclude_from_layer_adaptation=[])
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
if not hasattr(self, 'learning_rate'):
self.learning_rate = self.lr
@K.symbolic
def get_updates(self, loss, params):
old_update = K.update
def new_update(x, new_x):
if is_one_of(x, params) and self._do_layer_adaptation(x):
dx = new_x - x
lr_t = K.clip(self.learning_rate, K.epsilon(), 1e10)
x_norm = tf.norm(x)
g_norm = tf.norm(dx / lr_t)
ratio = K.switch(
x_norm > 0.0,
K.switch(g_norm > K.epsilon(), x_norm / g_norm, 1.0),
1.0
)
new_x = x + dx * ratio
return old_update(x, new_x)
K.update = new_update
updates = super(NewOptimizer, self).get_updates(loss, params)
K.update = old_update
return updates
def _do_layer_adaptation(self, w):
return (
not string_matching(w.name, self.exclude_from_layer_adaptation)
)
def get_config(self):
config = {
'exclude_from_layer_adaptation':
self.exclude_from_layer_adaptation,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_layer_adaptation_v2(BaseOptimizer):
"""返回新的优化器类,加入层自适应学习率
"""
class NewOptimizer(BaseOptimizer):
"""带有层自适应学习率的优化器
用每一层参数的模长来校正当前参数的学习率
https://arxiv.org/abs/1904.00962
"""
@insert_arguments(exclude_from_layer_adaptation=[])
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
def _resource_apply(self, grad, var, indices=None):
old_update = K.update
def new_update(x, new_x):
if x is var and self._do_layer_adaptation(x):
dx = new_x - x
lr_t = self._decayed_lr(x.dtype.base_dtype)
lr_t = K.clip(lr_t, K.epsilon(), 1e10)
x_norm = tf.norm(x)
g_norm = tf.norm(dx / lr_t)
ratio = K.switch(
x_norm > 0.0,
K.switch(g_norm > K.epsilon(), x_norm / g_norm, 1.0),
1.0
)
new_x = x + dx * ratio
return old_update(x, new_x)
K.update = new_update
op = super(NewOptimizer, self)._resource_apply(grad, var, indices)
K.update = old_update
return op
def _do_layer_adaptation(self, w):
return (
not string_matching(w.name, self.exclude_from_layer_adaptation)
)
def get_config(self):
config = {
'exclude_from_layer_adaptation':
self.exclude_from_layer_adaptation,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_piecewise_linear_lr(BaseOptimizer):
"""返回新的优化器类,加入分段线性学习率
"""
class NewOptimizer(BaseOptimizer):
"""带有分段线性学习率的优化器
其中schedule是形如{1000: 1, 2000: 0.1}的字典,
表示0~1000步内学习率线性地从零增加到100%,然后
1000~2000步内线性地降到10%,2000步以后保持10%
"""
@insert_arguments(lr_schedule={0: 1})
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
self.lr_schedule = {int(i): j for i, j in self.lr_schedule.items()}
@K.symbolic
def get_updates(self, loss, params):
lr_multiplier = piecewise_linear(self.iterations, self.lr_schedule)
old_update = K.update
def new_update(x, new_x):
if is_one_of(x, params):
new_x = x + (new_x - x) * lr_multiplier
return old_update(x, new_x)
K.update = new_update
updates = super(NewOptimizer, self).get_updates(loss, params)
K.update = old_update
return updates
def get_config(self):
config = {
'lr_schedule': self.lr_schedule,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_piecewise_linear_lr_v2(BaseOptimizer):
"""返回新的优化器类,加入分段线性学习率
"""
class NewOptimizer(BaseOptimizer):
"""带有分段线性学习率的优化器
其中schedule是形如{1000: 1, 2000: 0.1}的字典,
表示0~1000步内学习率线性地从零增加到100%,然后
1000~2000步内线性地降到10%,2000步以后保持10%
"""
@insert_arguments(lr_schedule={0: 1})
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
self.lr_schedule = {int(i): j for i, j in self.lr_schedule.items()}
def _decayed_lr(self, var_dtype):
lr_multiplier = piecewise_linear(self.iterations, self.lr_schedule)
lr_t = super(NewOptimizer, self)._decayed_lr(var_dtype)
return lr_t * K.cast(lr_multiplier, var_dtype)
def get_config(self):
config = {
'lr_schedule': self.lr_schedule,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_gradient_accumulation(BaseOptimizer):
"""返回新的优化器类,加入梯度累积
"""
class NewOptimizer(BaseOptimizer):
"""带有梯度累积的优化器
"""
@insert_arguments(grad_accum_steps=2)
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
self._first_get_gradients = True
def get_gradients(self, loss, params):
if self._first_get_gradients:
self._first_get_gradients = False
return super(NewOptimizer, self).get_gradients(loss, params)
else:
return [ag / self.grad_accum_steps for ag in self.accum_grads]
@K.symbolic
def get_updates(self, loss, params):
# 更新判据
cond = K.equal(self.iterations % self.grad_accum_steps, 0)
cond = K.cast(cond, K.floatx())
# 获取梯度
grads = self.get_gradients(loss, params)
self.accum_grads = [
K.zeros(
K.int_shape(p), dtype=K.dtype(p), name='accum_grad_%s' % i
) for i, p in enumerate(params)
]
old_update = K.update
def new_update(x, new_x):
new_x = cond * new_x + (1 - cond) * x
return old_update(x, new_x)
K.update = new_update
updates = super(NewOptimizer, self).get_updates(loss, params)
K.update = old_update
# 累积梯度
with tf.control_dependencies(updates):
accum_updates = [
K.update(ag, g + (1 - cond) * ag)
for g, ag in zip(grads, self.accum_grads)
]
return accum_updates
def get_config(self):
config = {
'grad_accum_steps': self.grad_accum_steps,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_gradient_accumulation_v2(BaseOptimizer):
"""返回新的优化器类,加入梯度累积
"""
class NewOptimizer(BaseOptimizer):
"""带有梯度累积的优化器
"""
@insert_arguments(grad_accum_steps=2)
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
def _create_slots(self, var_list):
super(NewOptimizer, self)._create_slots(var_list)
for var in var_list:
self.add_slot(var, 'ag')
def _resource_apply(self, grad, var, indices=None):
# 更新判据
cond = K.equal(self.iterations % self.grad_accum_steps, 0)
# 获取梯度
ag = self.get_slot(var, 'ag')
old_update = K.update
def new_update(x, new_x):
new_x = K.switch(cond, new_x, x)
return old_update(x, new_x)
K.update = new_update
ag_t = ag / self.grad_accum_steps
op = super(NewOptimizer, self)._resource_apply(ag_t, var)
K.update = old_update
# 累积梯度
with tf.control_dependencies([op]):
ag_t = K.switch(cond, K.zeros_like(ag), ag)
with tf.control_dependencies([K.update(ag, ag_t)]):
if indices is None:
ag_t = K.update(ag, ag + grad)
else:
ag_t = self._resource_scatter_add(ag, indices, grad)
return ag_t
def get_config(self):
config = {
'grad_accum_steps': self.grad_accum_steps,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_lookahead(BaseOptimizer):
"""返回新的优化器类,加入look ahead
"""
class NewOptimizer(BaseOptimizer):
"""带有look ahead的优化器
https://arxiv.org/abs/1907.08610
steps_per_slow_update: 即论文中的k;
slow_step_size: 即论文中的alpha。
"""
@insert_arguments(steps_per_slow_update=5, slow_step_size=0.5)
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
@K.symbolic
def get_updates(self, loss, params):
updates = super(NewOptimizer, self).get_updates(loss, params)
k, alpha = self.steps_per_slow_update, self.slow_step_size
cond = K.equal(self.iterations % k, 0)
slow_vars = [
K.zeros(
K.int_shape(p), dtype=K.dtype(p), name='slow_var_%s' % i
) for i, p in enumerate(params)
]
with tf.control_dependencies(updates):
slow_updates = [
K.update(q, K.switch(cond, q + alpha * (p - q), q))
for p, q in zip(params, slow_vars)
]
with tf.control_dependencies(slow_updates):
copy_updates = [
K.update(p, K.switch(cond, q, p))
for p, q in zip(params, slow_vars)
]
return copy_updates
def get_config(self):
config = {
'steps_per_slow_update': self.steps_per_slow_update,
'slow_step_size': self.slow_step_size,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_lookahead_v2(BaseOptimizer):
"""返回新的优化器类,加入look ahead
"""
class NewOptimizer(BaseOptimizer):
"""带有look ahead的优化器
https://arxiv.org/abs/1907.08610
steps_per_slow_update: 即论文中的k;
slow_step_size: 即论文中的alpha。
"""
@insert_arguments(steps_per_slow_update=5, slow_step_size=0.5)
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
def _create_slots(self, var_list):
super(NewOptimizer, self)._create_slots(var_list)
for var in var_list:
self.add_slot(var, 'slow_var')
def _resource_apply(self, grad, var, indices=None):
op = super(NewOptimizer, self)._resource_apply(grad, var, indices)
k, alpha = self.steps_per_slow_update, self.slow_step_size
cond = K.equal(self.iterations % k, 0)
slow_var = self.get_slot(var, 'slow_var')
slow_var_t = slow_var + alpha * (var - slow_var)
with tf.control_dependencies([op]):
slow_update = K.update(
slow_var, K.switch(cond, slow_var_t, slow_var)
)
with tf.control_dependencies([slow_update]):
copy_update = K.update(var, K.switch(cond, slow_var, var))
return copy_update
def get_config(self):
config = {
'steps_per_slow_update': self.steps_per_slow_update,
'slow_step_size': self.slow_step_size,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_lazy_optimization(BaseOptimizer):
"""返回新的优化器类,加入懒惰更新
"""
class NewOptimizer(BaseOptimizer):
"""带有懒惰更新的优化器
使得部分权重(尤其是embedding)只有在梯度不等于0时
才发生更新。
"""
@insert_arguments(include_in_lazy_optimization=[])
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
self._first_get_gradients = True
def get_gradients(self, loss, params):
if self._first_get_gradients:
self._first_get_gradients = False
return super(NewOptimizer, self).get_gradients(loss, params)
else:
return [self.grads[p] for p in params]
@K.symbolic
def get_updates(self, loss, params):
self.grads = dict(zip(params, self.get_gradients(loss, params)))
old_update = K.update
def new_update(x, new_x):
if is_one_of(x, params) and self._do_lazy_optimization(x):
g = self.grads[x]
r = K.any(K.not_equal(g, 0.0), axis=-1, keepdims=True)
new_x = x + (new_x - x) * K.cast(r, K.floatx())
return old_update(x, new_x)
K.update = new_update
updates = super(NewOptimizer, self).get_updates(loss, params)
K.update = old_update
return updates
def _do_lazy_optimization(self, w):
return string_matching(w.name, self.include_in_lazy_optimization)
def get_config(self):
config = {
'include_in_lazy_optimization':
self.include_in_lazy_optimization,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_lazy_optimization_v2(BaseOptimizer):
"""返回新的优化器类,加入懒惰更新
"""
class NewOptimizer(BaseOptimizer):
"""带有懒惰更新的优化器
使得部分权重(尤其是embedding)只有在梯度不等于0时
才发生更新。
"""
@insert_arguments(include_in_lazy_optimization=[])
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
def _resource_apply(self, grad, var, indices=None):
old_update = K.update
def new_update(x, new_x):
if x is var and self._do_lazy_optimization(x):
if indices is None:
r = K.any(
K.not_equal(grad, 0.0), axis=-1, keepdims=True
)
new_x = x + (new_x - x) * K.cast(r, K.floatx())
return old_update(x, new_x)
else:
return self._resource_scatter_add(
x, indices, K.gather(new_x - x, indices)
)
return old_update(x, new_x)
K.update = new_update
op = super(NewOptimizer, self)._resource_apply(grad, var, indices)
K.update = old_update
return op
def _do_lazy_optimization(self, w):
return string_matching(w.name, self.include_in_lazy_optimization)
def get_config(self):
config = {
'include_in_lazy_optimization':
self.include_in_lazy_optimization,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
return NewOptimizer
@export_to_custom_objects
def extend_with_exponential_moving_average(BaseOptimizer):
"""返回新的优化器类,加入EMA(权重滑动平均)
"""
class NewOptimizer(BaseOptimizer):
"""带EMA(权重滑动平均)的优化器
"""
@insert_arguments(ema_momentum=0.999)
def __init__(self, *args, **kwargs):
super(NewOptimizer, self).__init__(*args, **kwargs)
def get_updates(self, loss, params):
updates = super(NewOptimizer, self).get_updates(loss, params)
self.model_weights = params
self.ema_weights = [K.zeros(K.shape(w)) for w in params]
self.old_weights = K.batch_get_value(params)
K.batch_set_value(zip(self.ema_weights, self.old_weights))
ema_updates, ema_momentum = [], self.ema_momentum
with tf.control_dependencies(updates):
for w1, w2 in zip(self.ema_weights, params):
new_w = ema_momentum * w1 + (1 - ema_momentum) * w2
ema_updates.append(K.update(w1, new_w))
return ema_updates
def get_config(self):
config = {
'ema_momentum': self.ema_momentum,
}
base_config = super(NewOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def apply_ema_weights(self):
"""备份原模型权重,然后将平均权重应用到模型上去。
"""
self.old_weights = K.batch_get_value(self.model_weights)
ema_weights = K.batch_get_value(self.ema_weights)
K.batch_set_value(zip(self.model_weights, ema_weights))
def reset_old_weights(self):
"""恢复模型到旧权重。
"""
K.batch_set_value(zip(self.model_weights, self.old_weights))
return NewOptimizer
@export_to_custom_objects
def extend_with_gradient_centralization(BaseOptimizer):
"""返回新的优化器类,将梯度零中心化
"""
class NewOptimizer(BaseOptimizer):
"""带梯度零中心化的优化器
"""
def get_gradients(self, loss, params):
grads = []
for g in super(NewOptimizer, self).get_gradients(loss, params):
if isinstance(g, tf.IndexedSlices):
g = tf.convert_to_tensor(g)
if K.ndim(g) > 1:
g = g - K.mean(g, axis=range(1, K.ndim(g)), keepdims=True)
grads.append(g)
return grads
return NewOptimizer
if is_tf_keras:
extend_with_weight_decay = extend_with_weight_decay_v2
extend_with_layer_adaptation = extend_with_layer_adaptation_v2
extend_with_piecewise_linear_lr = extend_with_piecewise_linear_lr_v2
extend_with_gradient_accumulation = extend_with_gradient_accumulation_v2
extend_with_lookahead = extend_with_lookahead_v2
extend_with_lazy_optimization = extend_with_lazy_optimization_v2
AdaFactor = AdaFactorV2
else:
Adam = keras.optimizers.Adam
AdaFactor = AdaFactorV1
custom_objects = {
'Adam': Adam,
'AdaFactor': AdaFactor,
}
keras.utils.get_custom_objects().update(custom_objects)
| 34,935 | 34.360324 | 83 | py |
BiRTE | BiRTE-main/bert4keras/tokenizers.py | #! -*- coding: utf-8 -*-
# 工具函数
import unicodedata, re
from bert4keras.snippets import is_string, is_py2
from bert4keras.snippets import open
def load_vocab(dict_path, encoding='utf-8', simplified=False, startswith=None):
"""从bert的词典文件中读取词典
"""
token_dict = {}
with open(dict_path, encoding=encoding) as reader:
for line in reader:
token = line.strip()
token_dict[token] = len(token_dict)
if simplified: # 过滤冗余部分token
new_token_dict, keep_tokens = {}, []
startswith = startswith or []
for t in startswith:
new_token_dict[t] = len(new_token_dict)
keep_tokens.append(token_dict[t])
for t, _ in sorted(token_dict.items(), key=lambda s: s[1]):
if t not in new_token_dict:
keep = True
if len(t) > 1:
for c in Tokenizer.stem(t):
if (
Tokenizer._is_cjk_character(c) or
Tokenizer._is_punctuation(c)
):
keep = False
break
if keep:
new_token_dict[t] = len(new_token_dict)
keep_tokens.append(token_dict[t])
return new_token_dict, keep_tokens
else:
return token_dict
class BasicTokenizer(object):
"""分词器基类
"""
def __init__(self, token_start='[CLS]', token_end='[SEP]'):
"""初始化
"""
self._token_pad = '[PAD]'
self._token_unk = '[UNK]'
self._token_mask = '[MASK]'
self._token_start = token_start
self._token_end = token_end
def tokenize(self, text, max_length=None,mode="BERT"):
"""分词函数
"""
tokens = self._tokenize(text,mode)
if self._token_start is not None:
tokens.insert(0, self._token_start)
if self._token_end is not None:
tokens.append(self._token_end)
if max_length is not None:
index = int(self._token_end is not None) + 1
self.truncate_sequence(max_length, tokens, None, -index)
return tokens
def token_to_id(self, token):
"""token转换为对应的id
"""
raise NotImplementedError
def tokens_to_ids(self, tokens):
"""token序列转换为对应的id序列
"""
return [self.token_to_id(token) for token in tokens]
def truncate_sequence(
self, max_length, first_sequence, second_sequence=None, pop_index=-1
):
"""截断总长度
"""
if second_sequence is None:
second_sequence = []
while True:
total_length = len(first_sequence) + len(second_sequence)
if total_length <= max_length:
break
elif len(first_sequence) > len(second_sequence):
first_sequence.pop(pop_index)
else:
second_sequence.pop(pop_index)
def encode(
self,
first_text,
second_text=None,
max_length=None,
first_length=None,
second_length=None,
mode="BERT"
):
"""输出文本对应token id和segment id
如果传入first_length,则强行padding第一个句子到指定长度;
同理,如果传入second_length,则强行padding第二个句子到指定长度。
"""
if is_string(first_text):
first_tokens = self.tokenize(first_text,mode=mode)
else:
first_tokens = first_text
if second_text is None:
second_tokens = None
elif is_string(second_text):
idx = int(bool(self._token_start))
second_tokens = self.tokenize(second_text,mode=mode)[idx:]
else:
second_tokens = second_text
if max_length is not None:
self.truncate_sequence(max_length, first_tokens, second_tokens, -2)
first_token_ids = self.tokens_to_ids(first_tokens)
if first_length is not None:
first_token_ids = first_token_ids[:first_length]
first_token_ids.extend([self._token_pad_id] *
(first_length - len(first_token_ids)))
first_segment_ids = [0] * len(first_token_ids)
if second_text is not None:
second_token_ids = self.tokens_to_ids(second_tokens)
if second_length is not None:
second_token_ids = second_token_ids[:second_length]
second_token_ids.extend([self._token_pad_id] *
(second_length - len(second_token_ids)))
second_segment_ids = [1] * len(second_token_ids)
first_token_ids.extend(second_token_ids)
first_segment_ids.extend(second_segment_ids)
first_mask_ids=[1]*len(first_segment_ids)
return first_token_ids, first_segment_ids, first_mask_ids
def id_to_token(self, i):
"""id序列为对应的token
"""
raise NotImplementedError
def ids_to_tokens(self, ids):
"""id序列转换为对应的token序列
"""
return [self.id_to_token(i) for i in ids]
def decode(self, ids):
"""转为可读文本
"""
raise NotImplementedError
def _tokenize(self, text):
"""基本分词函数
"""
raise NotImplementedError
class Tokenizer(BasicTokenizer):
"""Bert原生分词器
纯Python实现,代码修改自keras_bert的tokenizer实现
"""
def __init__(self, token_dict, do_lower_case=False, *args, **kwargs):
"""初始化
"""
super(Tokenizer, self).__init__(*args, **kwargs)
if is_string(token_dict):
token_dict = load_vocab(token_dict)
self._do_lower_case = do_lower_case
self._token_dict = token_dict
self._token_dict_inv = {v: k for k, v in token_dict.items()}
self._vocab_size = len(token_dict)
for token in ['pad', 'unk', 'mask', 'start', 'end']:
try:
_token_id = token_dict[getattr(self, '_token_%s' % token)]
setattr(self, '_token_%s_id' % token, _token_id)
except:
pass
def token_to_id(self, token):
"""token转换为对应的id
"""
return self._token_dict.get(token, self._token_unk_id)
def id_to_token(self, i):
"""id转换为对应的token
"""
return self._token_dict_inv[i]
def decode(self, ids, tokens=None):
"""转为可读文本
"""
tokens = tokens or self.ids_to_tokens(ids)
tokens = [token for token in tokens if not self._is_special(token)]
text, flag = '', False
for i, token in enumerate(tokens):
if token[:2] == '##':
text += token[2:]
elif len(token) == 1 and self._is_cjk_character(token):
text += token
elif len(token) == 1 and self._is_punctuation(token):
text += token
text += ' '
elif i > 0 and self._is_cjk_character(text[-1]):
text += token
else:
text += ' '
text += token
text = re.sub(' +', ' ', text)
text = re.sub('\' (re|m|s|t|ve|d|ll) ', '\'\\1 ', text)
punctuation = self._cjk_punctuation() + '+-/={(<['
punctuation_regex = '|'.join([re.escape(p) for p in punctuation])
punctuation_regex = '(%s) ' % punctuation_regex
text = re.sub(punctuation_regex, '\\1', text)
text = re.sub('(\d\.) (\d)', '\\1\\2', text)
return text.strip()
def _tokenize(self, text,mode="BERT"):
"""基本分词函数
"""
mode=mode.upper()
assert mode in ['BERT','TOKEN']
if self._do_lower_case:
if is_py2:
text = unicode(text)
#if mode=="BERT":
text = unicodedata.normalize('NFD', text)
text = ''.join([
ch for ch in text if unicodedata.category(ch) != 'Mn'
])
text = text.lower()
spaced = ''
for ch in text:
if self._is_punctuation(ch) or self._is_cjk_character(ch):
spaced += ' ' + ch + ' '
elif self._is_space(ch):
spaced += ' '
elif ord(ch) == 0 or ord(ch) == 0xfffd or self._is_control(ch):
continue
elif mode=="BERT":
spaced += ch
else:
spaced += ' ' + ch + ' '
tokens = []
for word in spaced.strip().split():
tokens.extend(self._word_piece_tokenize(word))
return tokens
def _word_piece_tokenize(self, word):
"""word内分成subword
"""
if word in self._token_dict:
return [word]
tokens = []
start, stop = 0, 0
while start < len(word):
stop = len(word)
while stop > start:
sub = word[start:stop]
if start > 0:
sub = '##' + sub
if sub in self._token_dict:
break
stop -= 1
if start == stop:
stop += 1
tokens.append(sub)
start = stop
return tokens
@staticmethod
def stem(token):
"""获取token的“词干”(如果是##开头,则自动去掉##)
"""
if token[:2] == '##':
return token[2:]
else:
return token
@staticmethod
def _is_space(ch):
"""空格类字符判断
"""
return ch == ' ' or ch == '\n' or ch == '\r' or ch == '\t' or \
unicodedata.category(ch) == 'Zs'
@staticmethod
def _is_punctuation(ch):
"""标点符号类字符判断(全/半角均在此内)
提醒:unicodedata.category这个函数在py2和py3下的
表现可能不一样,比如u'§'字符,在py2下的结果为'So',
在py3下的结果是'Po'。
"""
code = ord(ch)
return 33 <= code <= 47 or \
58 <= code <= 64 or \
91 <= code <= 96 or \
123 <= code <= 126 or \
unicodedata.category(ch).startswith('P')
@staticmethod
def _cjk_punctuation():
return u'\uff02\uff03\uff04\uff05\uff06\uff07\uff08\uff09\uff0a\uff0b\uff0c\uff0d\uff0f\uff1a\uff1b\uff1c\uff1d\uff1e\uff20\uff3b\uff3c\uff3d\uff3e\uff3f\uff40\uff5b\uff5c\uff5d\uff5e\uff5f\uff60\uff62\uff63\uff64\u3000\u3001\u3003\u3008\u3009\u300a\u300b\u300c\u300d\u300e\u300f\u3010\u3011\u3014\u3015\u3016\u3017\u3018\u3019\u301a\u301b\u301c\u301d\u301e\u301f\u3030\u303e\u303f\u2013\u2014\u2018\u2019\u201b\u201c\u201d\u201e\u201f\u2026\u2027\ufe4f\ufe51\ufe54\xb7\uff01\uff1f\uff61\u3002'
@staticmethod
def _is_cjk_character(ch):
"""CJK类字符判断(包括中文字符也在此列)
参考:https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
"""
code = ord(ch)
return 0x4E00 <= code <= 0x9FFF or \
0x3400 <= code <= 0x4DBF or \
0x20000 <= code <= 0x2A6DF or \
0x2A700 <= code <= 0x2B73F or \
0x2B740 <= code <= 0x2B81F or \
0x2B820 <= code <= 0x2CEAF or \
0xF900 <= code <= 0xFAFF or \
0x2F800 <= code <= 0x2FA1F
@staticmethod
def _is_control(ch):
"""控制类字符判断
"""
return unicodedata.category(ch) in ('Cc', 'Cf')
@staticmethod
def _is_special(ch):
"""判断是不是有特殊含义的符号
"""
return bool(ch) and (ch[0] == '[') and (ch[-1] == ']')
def rematch(self, text, tokens):
"""给出原始的text和tokenize后的tokens的映射关系
"""
if is_py2:
text = unicode(text)
if self._do_lower_case:
text = text.lower()
normalized_text, char_mapping = '', []
for i, ch in enumerate(text):
if self._do_lower_case:
ch = unicodedata.normalize('NFD', ch)
ch = ''.join([c for c in ch if unicodedata.category(c) != 'Mn'])
ch = ''.join([
c for c in ch
if not (ord(c) == 0 or ord(c) == 0xfffd or self._is_control(c))
])
normalized_text += ch
char_mapping.extend([i] * len(ch))
text, token_mapping, offset = normalized_text, [], 0
for token in tokens:
if self._is_special(token):
token_mapping.append([])
else:
token = self.stem(token)
start = text[offset:].index(token) + offset
end = start + len(token)
token_mapping.append(char_mapping[start:end])
offset = end
return token_mapping
def subTokens2Token(self,tokens):
'''
给出分词结果,返回每个token对应的大的范围
:param tokens:
:return:
'''
mapping = []
last_word = []
for i, token in enumerate(tokens):
if token.startswith("##"):
assert last_word
mapping.append(last_word)
else:
last_word = []
if i != len(tokens) - 1 and tokens[i + 1].startswith("##"): # 更新last_word
last_word.append(i)
j = i
while j != len(tokens) - 1 and tokens[j + 1].startswith("##"):
last_word.append(j + 1)
j += 1
if last_word:
mapping.append(last_word)
else:
mapping.append([i])
assert len(mapping) == len(tokens)
return mapping
class SpTokenizer(BasicTokenizer):
"""基于SentencePiece模型的封装,使用上跟Tokenizer基本一致。
"""
def __init__(self, sp_model_path, *args, **kwargs):
super(SpTokenizer, self).__init__(*args, **kwargs)
import sentencepiece as spm
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(sp_model_path)
self._token_pad = self.sp_model.id_to_piece(self.sp_model.pad_id())
self._token_unk = self.sp_model.id_to_piece(self.sp_model.unk_id())
self._vocab_size = self.sp_model.get_piece_size()
for token in ['pad', 'unk', 'mask', 'start', 'end']:
try:
_token = getattr(self, '_token_%s' % token)
_token_id = self.sp_model.piece_to_id(_token)
setattr(self, '_token_%s_id' % token, _token_id)
except:
pass
def token_to_id(self, token):
"""token转换为对应的id
"""
return self.sp_model.piece_to_id(token)
def id_to_token(self, i):
"""id转换为对应的token
"""
if i < self._vocab_size:
return self.sp_model.id_to_piece(i)
else:
return ''
def decode(self, ids):
"""转为可读文本
"""
ids = [i for i in ids if self._is_decodable(i)]
text = self.sp_model.decode_ids(ids)
return text.decode('utf-8') if is_py2 else text
def _tokenize(self, text):
"""基本分词函数
"""
tokens = self.sp_model.encode_as_pieces(text)
return tokens
def _is_special(self, i):
"""判断是不是有特殊含义的符号
"""
return self.sp_model.is_control(i) or \
self.sp_model.is_unknown(i) or \
self.sp_model.is_unused(i)
def _is_decodable(self, i):
"""判断是否应该被解码输出
"""
return (i < self._vocab_size) and not self._is_special(i)
| 15,186 | 31.450855 | 502 | py |
BiRTE | BiRTE-main/bert4keras/layers.py | #! -*- coding: utf-8 -*-
# 自定义层
import numpy as np
import tensorflow as tf
from bert4keras.backend import keras, K
from bert4keras.backend import search_layer
from bert4keras.backend import sequence_masking
from bert4keras.backend import pool1d
from bert4keras.backend import divisible_temporal_padding
from bert4keras.snippets import is_string
from keras import initializers, activations
from keras.layers import *
def integerize_shape(func):
"""装饰器,保证input_shape一定是int或None
"""
def convert(item):
if hasattr(item, '__iter__'):
return [convert(i) for i in item]
elif hasattr(item, 'value'):
return item.value
else:
return item
def new_func(self, input_shape):
input_shape = convert(input_shape)
return func(self, input_shape)
return new_func
if keras.__version__[-2:] != 'tf' and keras.__version__ < '2.3':
class Layer(keras.layers.Layer):
"""重新定义Layer,赋予“层中层”功能
(仅keras 2.3以下版本需要)
"""
def __init__(self, **kwargs):
super(Layer, self).__init__(**kwargs)
self.supports_masking = True # 本项目的自定义层均可mask
def __setattr__(self, name, value):
if isinstance(value, keras.layers.Layer):
if not hasattr(self, '_layers'):
self._layers = []
if value not in self._layers:
self._layers.append(value)
super(Layer, self).__setattr__(name, value)
@property
def trainable_weights(self):
trainable = getattr(self, 'trainable', True)
if trainable:
trainable_weights = super(Layer, self).trainable_weights[:]
for l in getattr(self, '_layers', []):
trainable_weights += l.trainable_weights
return trainable_weights
else:
return []
@property
def non_trainable_weights(self):
trainable = getattr(self, 'trainable', True)
non_trainable_weights = super(Layer, self).non_trainable_weights[:]
for l in getattr(self, '_layers', []):
if trainable:
non_trainable_weights += l.non_trainable_weights
else:
non_trainable_weights += l.weights
return non_trainable_weights
else:
class Layer(keras.layers.Layer):
def __init__(self, **kwargs):
super(Layer, self).__init__(**kwargs)
self.supports_masking = True # 本项目的自定义层均可mask
class Embedding(keras.layers.Embedding):
"""为了适配T5,对Embedding的Mask做特殊处理
"""
def compute_mask(self, inputs, mask=None):
"""保证第一个token不被mask
"""
mask = super(Embedding, self).compute_mask(inputs, mask)
if mask is not None:
mask1 = K.ones_like(mask[:, :1], dtype='bool')
mask2 = mask[:, 1:]
return K.concatenate([mask1, mask2], 1)
class MultiHeadAttention(Layer):
"""多头注意力机制
"""
def __init__(
self,
heads,
head_size,
key_size=None,
use_bias=True,
scaled_dot_product=True,
kernel_initializer='glorot_uniform',
**kwargs
):
super(MultiHeadAttention, self).__init__(**kwargs)
self.heads = heads
self.head_size = head_size
self.out_dim = heads * head_size
self.key_size = key_size or head_size
self.use_bias = use_bias
self.scaled_dot_product = scaled_dot_product
self.kernel_initializer = initializers.get(kernel_initializer)
def build(self, input_shape):
super(MultiHeadAttention, self).build(input_shape)
self.q_dense = Dense(
units=self.key_size * self.heads,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
self.k_dense = Dense(
units=self.key_size * self.heads,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
self.v_dense = Dense(
units=self.out_dim,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
self.o_dense = Dense(
units=self.out_dim,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
def call(self, inputs, mask=None, a_mask=None, p_bias=None):
"""实现多头注意力
q_mask: 对输入的query序列的mask。
主要是将输出结果的padding部分置0。
v_mask: 对输入的value序列的mask。
主要是防止attention读取到padding信息。
a_mask: 对attention矩阵的mask。
不同的attention mask对应不同的应用。
p_bias: 在attention里的位置偏置。
一般用来指定相对位置编码的种类。
"""
q, k, v = inputs[:3]
q_mask, v_mask, n = None, None, 3
if mask is not None:
if mask[0] is not None:
q_mask = K.cast(mask[0], K.floatx())
if mask[2] is not None:
v_mask = K.cast(mask[2], K.floatx())
if a_mask:
a_mask = inputs[n]
n += 1
# 线性变换
qw = self.q_dense(q)
kw = self.k_dense(k)
vw = self.v_dense(v)
# 形状变换
qw = K.reshape(qw, (-1, K.shape(q)[1], self.heads, self.key_size))
kw = K.reshape(kw, (-1, K.shape(k)[1], self.heads, self.key_size))
vw = K.reshape(vw, (-1, K.shape(v)[1], self.heads, self.head_size))
# Attention
a = tf.einsum('bjhd,bkhd->bhjk', qw, kw)
# 处理位置编码
if p_bias == 'typical_relative':
pos_embeddings = inputs[n]
a = a + tf.einsum('bjhd,jkd->bhjk', qw, pos_embeddings)
elif p_bias == 't5_relative':
pos_embeddings = K.permute_dimensions(inputs[n], (2, 0, 1))
a = a + K.expand_dims(pos_embeddings, 0)
# Attention(续)
if self.scaled_dot_product:
a = a / self.key_size**0.5
a = sequence_masking(a, v_mask, 1, -1)
if a_mask is not None:
a = a - (1 - a_mask) * 1e12
a = K.softmax(a)
# 完成输出
o = tf.einsum('bhjk,bkhd->bjhd', a, vw)
if p_bias == 'typical_relative':
o = o + tf.einsum('bhjk,jkd->bjhd', a, pos_embeddings)
o = K.reshape(o, (-1, K.shape(o)[1], self.out_dim))
o = self.o_dense(o)
# 返回结果
o = sequence_masking(o, q_mask, 0)
return o
def compute_output_shape(self, input_shape):
return (input_shape[0][0], input_shape[0][1], self.out_dim)
def compute_mask(self, inputs, mask):
return mask[0]
def get_config(self):
config = {
'heads': self.heads,
'head_size': self.head_size,
'key_size': self.key_size,
'use_bias': self.use_bias,
'scaled_dot_product': self.scaled_dot_product,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
}
base_config = super(MultiHeadAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class LayerNormalization(Layer):
"""(Conditional) Layer Normalization
hidden_*系列参数仅为有条件输入时(conditional=True)使用
"""
def __init__(
self,
center=True,
scale=True,
epsilon=None,
conditional=False,
hidden_units=None,
hidden_activation='linear',
hidden_initializer='glorot_uniform',
**kwargs
):
super(LayerNormalization, self).__init__(**kwargs)
self.center = center
self.scale = scale
self.conditional = conditional
self.hidden_units = hidden_units
self.hidden_activation = activations.get(hidden_activation)
self.hidden_initializer = initializers.get(hidden_initializer)
self.epsilon = epsilon or 1e-12
def build(self, input_shape):
super(LayerNormalization, self).build(input_shape)
if self.conditional:
shape = (input_shape[0][-1],)
else:
shape = (input_shape[-1],)
if self.center:
self.beta = self.add_weight(
shape=shape, initializer='zeros', name='beta'
)
if self.scale:
self.gamma = self.add_weight(
shape=shape, initializer='ones', name='gamma'
)
if self.conditional:
if self.hidden_units is not None:
self.hidden_dense = Dense(
units=self.hidden_units,
activation=self.hidden_activation,
use_bias=False,
kernel_initializer=self.hidden_initializer
)
if self.center:
self.beta_dense = Dense(
units=shape[0], use_bias=False, kernel_initializer='zeros'
)
if self.scale:
self.gamma_dense = Dense(
units=shape[0], use_bias=False, kernel_initializer='zeros'
)
def call(self, inputs):
"""如果是条件Layer Norm,则默认以list为输入,第二个是condition
"""
if self.conditional:
inputs, cond = inputs
if self.hidden_units is not None:
cond = self.hidden_dense(cond)
for _ in range(K.ndim(inputs) - K.ndim(cond)):
cond = K.expand_dims(cond, 1)
if self.center:
beta = self.beta_dense(cond) + self.beta
if self.scale:
gamma = self.gamma_dense(cond) + self.gamma
else:
if self.center:
beta = self.beta
if self.scale:
gamma = self.gamma
outputs = inputs
if self.center:
mean = K.mean(outputs, axis=-1, keepdims=True)
outputs = outputs - mean
if self.scale:
variance = K.mean(K.square(outputs), axis=-1, keepdims=True)
std = K.sqrt(variance + self.epsilon)
outputs = outputs / std
outputs = outputs * gamma
if self.center:
outputs = outputs + beta
return outputs
def compute_output_shape(self, input_shape):
if self.conditional:
return input_shape[0]
else:
return input_shape
def get_config(self):
config = {
'center': self.center,
'scale': self.scale,
'epsilon': self.epsilon,
'conditional': self.conditional,
'hidden_units': self.hidden_units,
'hidden_activation': activations.serialize(self.hidden_activation),
'hidden_initializer':
initializers.serialize(self.hidden_initializer),
}
base_config = super(LayerNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class PositionEmbedding(Layer):
"""定义位置Embedding,这里的Embedding是可训练的。
"""
def __init__(
self,
input_dim,
output_dim,
merge_mode='add',
embeddings_initializer='zeros',
custom_position_ids=False,
**kwargs
):
super(PositionEmbedding, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.merge_mode = merge_mode
self.embeddings_initializer = initializers.get(embeddings_initializer)
self.custom_position_ids = custom_position_ids
def build(self, input_shape):
super(PositionEmbedding, self).build(input_shape)
self.embeddings = self.add_weight(
name='embeddings',
shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer
)
def call(self, inputs):
"""如果custom_position_ids,那么第二个输入为自定义的位置id
"""
if self.custom_position_ids:
inputs, position_ids = inputs
if K.dtype(position_ids) != 'int32':
position_ids = K.cast(position_ids, 'int32')
pos_embeddings = K.gather(self.embeddings, position_ids)
else:
input_shape = K.shape(inputs)
batch_size, seq_len = input_shape[0], input_shape[1]
pos_embeddings = self.embeddings[:seq_len]
pos_embeddings = K.expand_dims(pos_embeddings, 0)
if self.merge_mode != 'add':
pos_embeddings = K.tile(pos_embeddings, [batch_size, 1, 1])
if self.merge_mode == 'add':
return inputs + pos_embeddings
else:
return K.concatenate([inputs, pos_embeddings])
def compute_output_shape(self, input_shape):
if self.custom_position_ids:
input_shape = input_shape[0]
if self.merge_mode == 'add':
return input_shape
else:
return input_shape[:2] + (input_shape[2] + self.output_dim,)
def get_config(self):
config = {
'input_dim': self.input_dim,
'output_dim': self.output_dim,
'merge_mode': self.merge_mode,
'embeddings_initializer':
initializers.serialize(self.embeddings_initializer),
'custom_position_ids': self.custom_position_ids,
}
base_config = super(PositionEmbedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RelativePositionEmbedding(Layer):
"""相对位置编码
来自论文:https://arxiv.org/abs/1803.02155
"""
def __init__(
self, input_dim, output_dim, embeddings_initializer='zeros', **kwargs
):
super(RelativePositionEmbedding, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = initializers.get(embeddings_initializer)
def build(self, input_shape):
super(RelativePositionEmbedding, self).build(input_shape)
self.embeddings = self.add_weight(
name='embeddings',
shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer,
)
def call(self, inputs):
pos_ids = self.compute_position_ids(inputs)
return K.gather(self.embeddings, pos_ids)
def compute_position_ids(self, inputs):
q, v = inputs
# 计算位置差
q_idxs = K.arange(0, K.shape(q)[1], dtype='int32')
q_idxs = K.expand_dims(q_idxs, 1)
v_idxs = K.arange(0, K.shape(v)[1], dtype='int32')
v_idxs = K.expand_dims(v_idxs, 0)
pos_ids = v_idxs - q_idxs
# 后处理操作
max_position = (self.input_dim - 1) // 2
pos_ids = K.clip(pos_ids, -max_position, max_position)
pos_ids = pos_ids + max_position
return pos_ids
def compute_output_shape(self, input_shape):
return (None, None, self.output_dim)
def compute_mask(self, inputs, mask):
return mask[0]
def get_config(self):
config = {
'input_dim': self.input_dim,
'output_dim': self.output_dim,
'embeddings_initializer':
initializers.serialize(self.embeddings_initializer),
}
base_config = super(RelativePositionEmbedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RelativePositionEmbeddingT5(RelativePositionEmbedding):
"""Google T5的相对位置编码
来自论文:https://arxiv.org/abs/1910.10683
"""
def __init__(
self,
input_dim,
output_dim,
max_distance=128,
bidirectional=True,
embeddings_initializer='zeros',
**kwargs
):
super(RelativePositionEmbeddingT5,
self).__init__(input_dim, output_dim, **kwargs)
self.max_distance = max_distance
self.bidirectional = bidirectional
def compute_position_ids(self, inputs):
"""T5的相对位置分桶(直接翻译自官方T5源码)
"""
q, v = inputs
# 计算位置差
q_idxs = K.arange(0, K.shape(q)[1], dtype='int32')
q_idxs = K.expand_dims(q_idxs, 1)
v_idxs = K.arange(0, K.shape(v)[1], dtype='int32')
v_idxs = K.expand_dims(v_idxs, 0)
pos_ids = v_idxs - q_idxs
# 后处理操作
num_buckets, max_distance = self.input_dim, self.max_distance
ret = 0
n = -pos_ids
if self.bidirectional:
num_buckets //= 2
ret += K.cast(K.less(n, 0), 'int32') * num_buckets
n = K.abs(n)
else:
n = K.maximum(n, 0)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = K.less(n, max_exact)
val_if_large = max_exact + K.cast(
K.log(K.cast(n, K.floatx()) / max_exact) /
np.log(max_distance / max_exact) * (num_buckets - max_exact),
'int32',
)
val_if_large = K.minimum(val_if_large, num_buckets - 1)
ret += K.switch(is_small, n, val_if_large)
return ret
def get_config(self):
config = {
'max_distance': self.max_distance,
'bidirectional': self.bidirectional,
}
base_config = super(RelativePositionEmbeddingT5, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class FeedForward(Layer):
"""FeedForward层,其实就是两个Dense层的叠加
"""
def __init__(
self,
units,
activation='relu',
use_bias=True,
kernel_initializer='glorot_uniform',
**kwargs
):
super(FeedForward, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
@integerize_shape
def build(self, input_shape):
super(FeedForward, self).build(input_shape)
output_dim = input_shape[-1]
self.dense_1 = Dense(
units=self.units,
activation=self.activation,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
self.dense_2 = Dense(
units=output_dim,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
def call(self, inputs):
x = inputs
x = self.dense_1(x)
x = self.dense_2(x)
return x
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
}
base_config = super(FeedForward, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class EmbeddingDense(Layer):
"""运算跟Dense一致,但kernel用Embedding层的embeddings矩阵。
根据Embedding层的名字来搜索定位Embedding层。
"""
def __init__(
self, embedding_name, activation='softmax', use_bias=True, **kwargs
):
super(EmbeddingDense, self).__init__(**kwargs)
self.embedding_name = embedding_name
self.activation = activations.get(activation)
self.use_bias = use_bias
def call(self, inputs):
if not hasattr(self, 'kernel'):
embedding_layer = search_layer(inputs, self.embedding_name)
if embedding_layer is None:
raise Exception('Embedding layer not found')
self.kernel = K.transpose(embedding_layer.embeddings)
self.units = K.int_shape(self.kernel)[1]
if self.use_bias:
self.bias = self.add_weight(
name='bias', shape=(self.units,), initializer='zeros'
)
outputs = K.dot(inputs, self.kernel)
if self.use_bias:
outputs = K.bias_add(outputs, self.bias)
outputs = self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
return input_shape[:-1] + (self.units,)
def get_config(self):
config = {
'embedding_name': self.embedding_name,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
}
base_config = super(EmbeddingDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConditionalRandomField(Layer):
"""纯Keras实现CRF层
CRF层本质上是一个带训练参数的loss计算层。
"""
def __init__(self, lr_multiplier=1, **kwargs):
super(ConditionalRandomField, self).__init__(**kwargs)
self.lr_multiplier = lr_multiplier # 当前层学习率的放大倍数
@integerize_shape
def build(self, input_shape):
super(ConditionalRandomField, self).build(input_shape)
output_dim = input_shape[-1]
self.trans = self.add_weight(
name='trans',
shape=(output_dim, output_dim),
initializer='glorot_uniform',
trainable=True
)
if self.lr_multiplier != 1:
K.set_value(self.trans, K.eval(self.trans) / self.lr_multiplier)
self.trans = self.lr_multiplier * self.trans
def compute_mask(self, inputs, mask=None):
return None
def call(self, inputs, mask=None):
if mask is not None:
mask = K.cast(mask, K.floatx())
return sequence_masking(inputs, mask, 1, 1)
def target_score(self, y_true, y_pred):
"""计算目标路径的相对概率(还没有归一化)
要点:逐标签得分,加上转移概率得分。
"""
point_score = tf.einsum('bni,bni->b', y_true, y_pred) # 逐标签得分
trans_score = tf.einsum(
'bni,ij,bnj->b', y_true[:, :-1], self.trans, y_true[:, 1:]
) # 标签转移得分
return point_score + trans_score
def log_norm_step(self, inputs, states):
"""递归计算归一化因子
要点:1、递归计算;2、用logsumexp避免溢出。
"""
inputs, mask = inputs[:, :-1], inputs[:, -1:]
states = K.expand_dims(states[0], 2) # (batch_size, output_dim, 1)
trans = K.expand_dims(self.trans, 0) # (1, output_dim, output_dim)
outputs = tf.reduce_logsumexp(
states + trans, 1
) # (batch_size, output_dim)
outputs = outputs + inputs
outputs = mask * outputs + (1 - mask) * states[:, :, 0]
return outputs, [outputs]
def dense_loss(self, y_true, y_pred):
"""y_true需要是one hot形式
"""
# 导出mask并转换数据类型
mask = K.all(K.greater(y_pred, -1e6), axis=2, keepdims=True)
mask = K.cast(mask, K.floatx())
# 计算目标分数
y_true, y_pred = y_true * mask, y_pred * mask
target_score = self.target_score(y_true, y_pred)
# 递归计算log Z
init_states = [y_pred[:, 0]]
y_pred = K.concatenate([y_pred, mask], axis=2)
input_length = K.int_shape(y_pred[:, 1:])[1]
log_norm, _, _ = K.rnn(
self.log_norm_step,
y_pred[:, 1:],
init_states,
input_length=input_length
) # 最后一步的log Z向量
log_norm = tf.reduce_logsumexp(log_norm, 1) # logsumexp得标量
# 计算损失 -log p
return log_norm - target_score
def sparse_loss(self, y_true, y_pred):
"""y_true需要是整数形式(非one hot)
"""
# y_true需要重新明确一下shape和dtype
y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
y_true = K.cast(y_true, 'int32')
# 转为one hot
y_true = K.one_hot(y_true, K.shape(self.trans)[0])
return self.dense_loss(y_true, y_pred)
def dense_accuracy(self, y_true, y_pred):
"""训练过程中显示逐帧准确率的函数,排除了mask的影响
此处y_true需要是one hot形式
"""
y_true = K.argmax(y_true, 2)
return self.sparse_accuracy(y_true, y_pred)
def sparse_accuracy(self, y_true, y_pred):
"""训练过程中显示逐帧准确率的函数,排除了mask的影响
此处y_true需要是整数形式(非one hot)
"""
# 导出mask并转换数据类型
mask = K.all(K.greater(y_pred, -1e6), axis=2)
mask = K.cast(mask, K.floatx())
# y_true需要重新明确一下shape和dtype
y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
y_true = K.cast(y_true, 'int32')
# 逐标签取最大来粗略评测训练效果
y_pred = K.cast(K.argmax(y_pred, 2), 'int32')
isequal = K.cast(K.equal(y_true, y_pred), K.floatx())
return K.sum(isequal * mask) / K.sum(mask)
def get_config(self):
config = {
'lr_multiplier': self.lr_multiplier,
}
base_config = super(ConditionalRandomField, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MaximumEntropyMarkovModel(Layer):
"""(双向)最大熵隐马尔可夫模型
作用和用法都类似CRF,但是比CRF更快更简单。
"""
def __init__(self, lr_multiplier=1, hidden_dim=None, **kwargs):
super(MaximumEntropyMarkovModel, self).__init__(**kwargs)
self.lr_multiplier = lr_multiplier # 当前层学习率的放大倍数
self.hidden_dim = hidden_dim # 如果非None,则将转移矩阵低秩分解
@integerize_shape
def build(self, input_shape):
super(MaximumEntropyMarkovModel, self).build(input_shape)
output_dim = input_shape[-1]
if self.hidden_dim is None:
self.trans = self.add_weight(
name='trans',
shape=(output_dim, output_dim),
initializer='glorot_uniform',
trainable=True
)
if self.lr_multiplier != 1:
K.set_value(self.trans, K.eval(self.trans) / self.lr_multiplier)
self.trans = self.lr_multiplier * self.trans
else:
self.l_trans = self.add_weight(
name='l_trans',
shape=(output_dim, self.hidden_dim),
initializer='glorot_uniform',
trainable=True
)
self.r_trans = self.add_weight(
name='r_trans',
shape=(output_dim, self.hidden_dim),
initializer='glorot_uniform',
trainable=True
)
if self.lr_multiplier != 1:
K.set_value(
self.l_trans,
K.eval(self.l_trans) / self.lr_multiplier
)
self.l_trans = self.lr_multiplier * self.l_trans
K.set_value(
self.r_trans,
K.eval(self.r_trans) / self.lr_multiplier
)
self.r_trans = self.lr_multiplier * self.r_trans
def compute_mask(self, inputs, mask=None):
return None
def call(self, inputs, mask=None):
if mask is not None:
mask = K.cast(mask, K.floatx())
return sequence_masking(inputs, mask, 1, 1)
def reverse_sequence(self, inputs, mask=None):
if mask is None:
return [x[:, ::-1] for x in inputs]
else:
length = K.cast(K.sum(mask, 1), 'int32')
return [tf.reverse_sequence(x, length, seq_axis=1) for x in inputs]
def basic_loss(self, y_true, y_pred, go_backwards=False):
"""y_true需要是整数形式(非one hot)
"""
# 导出mask并转换数据类型
mask = K.all(K.greater(y_pred, -1e6), axis=2)
mask = K.cast(mask, K.floatx())
# y_true需要重新明确一下shape和dtype
y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
y_true = K.cast(y_true, 'int32')
# 反转相关
if self.hidden_dim is None:
if go_backwards: # 是否反转序列
y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)
trans = K.transpose(self.trans)
else:
trans = self.trans
histoty = K.gather(trans, y_true)
else:
if go_backwards: # 是否反转序列
y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)
r_trans, l_trans = self.l_trans, self.r_trans
else:
l_trans, r_trans = self.l_trans, self.r_trans
histoty = K.gather(l_trans, y_true)
histoty = tf.einsum('bnd,kd->bnk', histoty, r_trans)
# 计算loss
histoty = K.concatenate([y_pred[:, :1], histoty[:, :-1]], 1)
y_pred = (y_pred + histoty) / 2
loss = K.sparse_categorical_crossentropy(
y_true, y_pred, from_logits=True
)
return K.sum(loss * mask) / K.sum(mask)
def sparse_loss(self, y_true, y_pred):
"""y_true需要是整数形式(非one hot)
"""
loss = self.basic_loss(y_true, y_pred, False)
loss = loss + self.basic_loss(y_true, y_pred, True)
return loss / 2
def dense_loss(self, y_true, y_pred):
"""y_true需要是one hot形式
"""
y_true = K.argmax(y_true, 2)
return self.sparse_loss(y_true, y_pred)
def basic_accuracy(self, y_true, y_pred, go_backwards=False):
"""训练过程中显示逐帧准确率的函数,排除了mask的影响
此处y_true需要是整数形式(非one hot)
"""
# 导出mask并转换数据类型
mask = K.all(K.greater(y_pred, -1e6), axis=2)
mask = K.cast(mask, K.floatx())
# y_true需要重新明确一下shape和dtype
y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
y_true = K.cast(y_true, 'int32')
# 反转相关
if self.hidden_dim is None:
if go_backwards: # 是否反转序列
y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)
trans = K.transpose(self.trans)
else:
trans = self.trans
histoty = K.gather(trans, y_true)
else:
if go_backwards: # 是否反转序列
y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)
r_trans, l_trans = self.l_trans, self.r_trans
else:
l_trans, r_trans = self.l_trans, self.r_trans
histoty = K.gather(l_trans, y_true)
histoty = tf.einsum('bnd,kd->bnk', histoty, r_trans)
# 计算逐标签accuracy
histoty = K.concatenate([y_pred[:, :1], histoty[:, :-1]], 1)
y_pred = (y_pred + histoty) / 2
y_pred = K.cast(K.argmax(y_pred, 2), 'int32')
isequal = K.cast(K.equal(y_true, y_pred), K.floatx())
return K.sum(isequal * mask) / K.sum(mask)
def sparse_accuracy(self, y_true, y_pred):
"""训练过程中显示逐帧准确率的函数,排除了mask的影响
此处y_true需要是整数形式(非one hot)
"""
accuracy = self.basic_accuracy(y_true, y_pred, False)
accuracy = accuracy + self.basic_accuracy(y_true, y_pred, True)
return accuracy / 2
def dense_accuracy(self, y_true, y_pred):
"""训练过程中显示逐帧准确率的函数,排除了mask的影响
此处y_true需要是one hot形式
"""
y_true = K.argmax(y_true, 2)
return self.sparse_accuracy(y_true, y_pred)
def get_config(self):
config = {
'lr_multiplier': self.lr_multiplier,
'hidden_dim': self.hidden_dim,
}
base_config = super(MaximumEntropyMarkovModel, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
custom_objects = {
'Embedding': Embedding,
'MultiHeadAttention': MultiHeadAttention,
'LayerNormalization': LayerNormalization,
'PositionEmbedding': PositionEmbedding,
'RelativePositionEmbedding': RelativePositionEmbedding,
'RelativePositionEmbeddingT5': RelativePositionEmbeddingT5,
'FeedForward': FeedForward,
'EmbeddingDense': EmbeddingDense,
'ConditionalRandomField': ConditionalRandomField,
'MaximumEntropyMarkovModel': MaximumEntropyMarkovModel,
}
keras.utils.get_custom_objects().update(custom_objects)
| 31,362 | 33.464835 | 80 | py |
BiRTE | BiRTE-main/bert4keras/snippets.py | #! -*- coding: utf-8 -*-
# 代码合集
import six
import logging
import numpy as np
import re
import sys
_open_ = open
is_py2 = six.PY2
if not is_py2:
basestring = str
def is_string(s):
"""判断是否是字符串
"""
return isinstance(s, basestring)
def strQ2B(ustring):
"""全角符号转对应的半角符号
"""
rstring = ''
for uchar in ustring:
inside_code = ord(uchar)
# 全角空格直接转换
if inside_code == 12288:
inside_code = 32
# 全角字符(除空格)根据关系转化
elif (inside_code >= 65281 and inside_code <= 65374):
inside_code -= 65248
rstring += unichr(inside_code)
return rstring
def string_matching(s, keywords):
"""判断s是否至少包含keywords中的至少一个字符串
"""
for k in keywords:
if re.search(k, s):
return True
return False
def convert_to_unicode(text, encoding='utf-8'):
"""字符串转换为unicode格式(假设输入为utf-8格式)
"""
if is_py2:
if isinstance(text, str):
text = text.decode(encoding, 'ignore')
else:
if isinstance(text, bytes):
text = text.decode(encoding, 'ignore')
return text
def convert_to_str(text, encoding='utf-8'):
"""字符串转换为str格式(假设输入为utf-8格式)
"""
if is_py2:
if isinstance(text, unicode):
text = text.encode(encoding, 'ignore')
else:
if isinstance(text, bytes):
text = text.decode(encoding, 'ignore')
return text
class open:
"""模仿python自带的open函数,主要是为了同时兼容py2和py3
"""
def __init__(self, name, mode='r', encoding=None):
if is_py2:
self.file = _open_(name, mode)
else:
self.file = _open_(name, mode, encoding=encoding)
self.encoding = encoding
def __iter__(self):
for l in self.file:
if self.encoding:
l = convert_to_unicode(l, self.encoding)
yield l
def read(self):
text = self.file.read()
if self.encoding:
text = convert_to_unicode(text, self.encoding)
return text
def write(self, text):
if self.encoding:
text = convert_to_str(text, self.encoding)
self.file.write(text)
def flush(self):
self.file.flush()
def close(self):
self.file.close()
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
class Progress:
"""显示进度,自己简单封装,比tqdm更可控一些
iterable: 可迭代的对象;
period: 显示进度的周期;
steps: iterable可迭代的总步数,相当于len(iterable)
"""
def __init__(self, iterable, period=1, steps=None, desc=None):
self.iterable = iterable
self.period = period
if hasattr(iterable, '__len__'):
self.steps = len(iterable)
else:
self.steps = steps
self.desc = desc
if self.steps:
self._format_ = u'%s/%s passed' % ('%s', self.steps)
else:
self._format_ = u'%s passed'
if self.desc:
self._format_ = self.desc + ' - ' + self._format_
self.logger = logging.getLogger()
def __iter__(self):
for i, j in enumerate(self.iterable):
if (i + 1) % self.period == 0:
self.logger.info(self._format_ % (i + 1))
yield j
def parallel_apply(
func, iterable, workers, max_queue_size, callback=None, dummy=False
):
"""多进程或多线程地将func应用到iterable的每个元素中。
注意这个apply是异步且无序的,也就是说依次输入a,b,c,但是
输出可能是func(c), func(a), func(b)。
参数:
dummy: False是多进程/线性,True则是多线程/线性;
callback: 处理单个输出的回调函数;
"""
if dummy:
from multiprocessing.dummy import Pool, Queue
else:
from multiprocessing import Pool, Queue
in_queue, out_queue = Queue(max_queue_size), Queue()
def worker_step(in_queue, out_queue):
# 单步函数包装成循环执行
while True:
d = in_queue.get()
r = func(d)
out_queue.put(r)
# 启动多进程/线程
pool = Pool(workers, worker_step, (in_queue, out_queue))
if callback is None:
results = []
# 后处理函数
def process_out_queue():
out_count = 0
for _ in range(out_queue.qsize()):
d = out_queue.get()
out_count += 1
if callback is None:
results.append(d)
else:
callback(d)
return out_count
# 存入数据,取出结果
in_count, out_count = 0, 0
for d in iterable:
in_count += 1
while True:
try:
in_queue.put(d, block=False)
break
except six.moves.queue.Full:
out_count += process_out_queue()
if in_count % max_queue_size == 0:
out_count += process_out_queue()
while out_count != in_count:
out_count += process_out_queue()
pool.terminate()
if callback is None:
return results
def sequence_padding(inputs, length=None, padding=0):
"""Numpy函数,将序列padding到同一长度
"""
if length is None:
length = max([len(x) for x in inputs])
pad_width = [(0, 0) for _ in np.shape(inputs[0])]
outputs = []
for x in inputs:
x = x[:length]
pad_width[0] = (0, length - len(x))
x = np.pad(x, pad_width, 'constant', constant_values=padding)
outputs.append(x)
return np.array(outputs)
def is_one_of(x, ys):
"""判断x是否在ys之中
等价于x in ys,但有些情况下x in ys会报错
"""
for y in ys:
if x is y:
return True
return False
class DataGenerator(object):
"""数据生成器模版
"""
def __init__(self, data, batch_size=32, buffer_size=None):
self.data = data
self.batch_size = batch_size
if hasattr(self.data, '__len__'):
self.steps = len(self.data) // self.batch_size
if len(self.data) % self.batch_size != 0:
self.steps += 1
else:
self.steps = None
self.buffer_size = buffer_size or batch_size * 1000
def __len__(self):
return self.steps
def sample(self, random=False):
"""采样函数,每个样本同时返回一个is_end标记
"""
if random:
if self.steps is None:
def generator():
caches, isfull = [], False
for d in self.data:
caches.append(d)
if isfull:
i = np.random.randint(len(caches))
yield caches.pop(i)
elif len(caches) == self.buffer_size:
isfull = True
while caches:
i = np.random.randint(len(caches))
yield caches.pop(i)
else:
def generator():
indices = list(range(len(self.data)))
np.random.shuffle(indices)
for i in indices:
yield self.data[i]
data = generator()
else:
data = iter(self.data)
d_current = next(data)
for d_next in data:
yield False, d_current
d_current = d_next
yield True, d_current
def __iter__(self, random=False):
raise NotImplementedError
def forfit(self):
while True:
for d in self.__iter__(True):
yield d
def softmax(x, axis=-1):
"""numpy版softmax
"""
x = x - x.max(axis=axis, keepdims=True)
x = np.exp(x)
return x / x.sum(axis=axis, keepdims=True)
class AutoRegressiveDecoder(object):
"""通用自回归生成模型解码基类
包含beam search和random sample两种策略
"""
def __init__(self, start_id, end_id, maxlen, minlen=None):
self.start_id = start_id
self.end_id = end_id
self.maxlen = maxlen
self.minlen = minlen or 1
if start_id is None:
self.first_output_ids = np.empty((1, 0), dtype=int)
else:
self.first_output_ids = np.array([[self.start_id]])
@staticmethod
def set_rtype(default='probas'):
"""用来给predict方法加上rtype参数,并作相应的处理
"""
def actual_decorator(predict):
def new_predict(self, inputs, output_ids, step, rtype=default):
assert rtype in ['probas', 'logits']
result = predict(self, inputs, output_ids, step)
if default == 'probas':
if rtype == 'probas':
return result
else:
return np.log(result + 1e-12)
else:
if rtype == 'probas':
return softmax(result, -1)
else:
return result
return new_predict
return actual_decorator
def predict(self, inputs, output_ids, step, rtype='logits'):
"""用户需自定义递归预测函数
rtype为字符串logits或probas,用户定义的时候,应当根据rtype来
返回不同的结果,rtype=probas时返回归一化的概率,rtype=logits时
则返回softmax前的结果或者概率对数。
"""
raise NotImplementedError
def beam_search(self, inputs, topk):
"""beam search解码
说明:这里的topk即beam size;
返回:最优解码序列。
"""
inputs = [np.array([i]) for i in inputs]
output_ids, output_scores = self.first_output_ids, np.zeros(1)
for step in range(self.maxlen):
scores = self.predict(inputs, output_ids, step, 'logits') # 计算当前得分
if step == 0: # 第1步预测后将输入重复topk次
inputs = [np.repeat(i, topk, axis=0) for i in inputs]
scores = output_scores.reshape((-1, 1)) + scores # 综合累积得分
indices = scores.argpartition(-topk, axis=None)[-topk:] # 仅保留topk
indices_1 = indices // scores.shape[1] # 行索引
indices_2 = (indices % scores.shape[1]).reshape((-1, 1)) # 列索引
output_ids = np.concatenate([output_ids[indices_1], indices_2],
1) # 更新输出
output_scores = np.take_along_axis(
scores, indices, axis=None
) # 更新得分
if output_ids.shape[1] >= self.minlen: # 最短长度判断
best_one = output_scores.argmax() # 得分最大的那个
if indices_2[best_one, 0] == self.end_id: # 如果已经终止
return output_ids[best_one] # 直接输出
else: # 否则,只保留未完成部分
flag = (indices_2[:, 0] != self.end_id) # 标记未完成序列
if not flag.all(): # 如果有已完成的
inputs = [i[flag] for i in inputs] # 扔掉已完成序列
output_ids = output_ids[flag] # 扔掉已完成序列
output_scores = output_scores[flag] # 扔掉已完成序列
topk = flag.sum() # topk相应变化
# 达到长度直接输出
return output_ids[output_scores.argmax()]
def random_sample(self, inputs, n, topk=None, topp=None):
"""随机采样n个结果
说明:非None的topk表示每一步只从概率最高的topk个中采样;而非None的topp
表示每一步只从概率最高的且概率之和刚好达到topp的若干个token中采样。
返回:n个解码序列组成的list。
"""
inputs = [np.array([i]) for i in inputs]
output_ids = self.first_output_ids
results = []
for step in range(self.maxlen):
probas = self.predict(inputs, output_ids, step, 'probas') # 计算当前概率
probas /= probas.sum(axis=1, keepdims=True) # 确保归一化
if step == 0: # 第1步预测后将结果重复n次
probas = np.repeat(probas, n, axis=0)
inputs = [np.repeat(i, n, axis=0) for i in inputs]
output_ids = np.repeat(output_ids, n, axis=0)
if topk is not None:
k_indices = probas.argpartition(-topk,
axis=1)[:, -topk:] # 仅保留topk
probas = np.take_along_axis(probas, k_indices, axis=1) # topk概率
probas /= probas.sum(axis=1, keepdims=True) # 重新归一化
if topp is not None:
p_indices = probas.argsort(axis=1)[:, ::-1] # 从高到低排序
probas = np.take_along_axis(probas, p_indices, axis=1) # 排序概率
cumsum_probas = np.cumsum(probas, axis=1) # 累积概率
flag = np.roll(cumsum_probas >= topp, 1, axis=1) # 标记超过topp的部分
flag[:, 0] = False # 结合上面的np.roll,实现平移一位的效果
probas[flag] = 0 # 后面的全部置零
probas /= probas.sum(axis=1, keepdims=True) # 重新归一化
sample_func = lambda p: np.random.choice(len(p), p=p) # 按概率采样函数
sample_ids = np.apply_along_axis(sample_func, 1, probas) # 执行采样
sample_ids = sample_ids.reshape((-1, 1)) # 对齐形状
if topp is not None:
sample_ids = np.take_along_axis(
p_indices, sample_ids, axis=1
) # 对齐原id
if topk is not None:
sample_ids = np.take_along_axis(
k_indices, sample_ids, axis=1
) # 对齐原id
output_ids = np.concatenate([output_ids, sample_ids], 1) # 更新输出
if output_ids.shape[1] >= self.minlen: # 最短长度判断
flag = (sample_ids[:, 0] == self.end_id) # 标记已完成序列
if flag.any(): # 如果有已完成的
for ids in output_ids[flag]: # 存好已完成序列
results.append(ids)
flag = (flag == False) # 标记未完成序列
inputs = [i[flag] for i in inputs] # 只保留未完成部分输入
output_ids = output_ids[flag] # 只保留未完成部分候选集
if len(output_ids) == 0:
break
# 如果还有未完成序列,直接放入结果
for ids in output_ids:
results.append(ids)
# 返回结果
return results
def insert_arguments(**arguments):
"""装饰器,为类方法增加参数
(主要用于类的__init__方法)
"""
def actual_decorator(func):
def new_func(self, *args, **kwargs):
for k, v in arguments.items():
if k in kwargs:
v = kwargs.pop(k)
setattr(self, k, v)
return func(self, *args, **kwargs)
return new_func
return actual_decorator
def delete_arguments(*arguments):
"""装饰器,为类方法删除参数
(主要用于类的__init__方法)
"""
def actual_decorator(func):
def new_func(self, *args, **kwargs):
for k in arguments:
if k in kwargs:
raise TypeError(
'%s got an unexpected keyword argument \'%s\'' %
(self.__class__.__name__, k)
)
return func(self, *args, **kwargs)
return new_func
return actual_decorator
def groupby(iterable, key=None):
"""类似itertools.groupby,但这里的key是iterable对象
"""
if key is None:
key = iterable
result = []
for i, (k, v) in enumerate(zip(key, iterable)):
if i == 0:
result.append((k, [v]))
last_k = k
else:
if k == last_k:
result[-1][1].append(v)
else:
result.append((k, [v]))
last_k = k
return result
class Hook:
"""注入uniout模块,实现import时才触发
"""
def __init__(self, module):
self.module = module
def __getattr__(self, attr):
"""使得 from bert4keras.backend import uniout
等效于 import uniout (自动识别Python版本,Python3
下则无操作。)
"""
if attr == 'uniout':
if is_py2:
import uniout
else:
return getattr(self.module, attr)
Hook.__name__ = __name__
sys.modules[__name__] = Hook(sys.modules[__name__])
del Hook
| 15,499 | 28.807692 | 80 | py |
BiRTE | BiRTE-main/bert4keras/backend.py | # -*- coding: utf-8 -*-
# 分离后端函数,主要是为了同时兼容原生keras和tf.keras
# 通过设置环境变量TF_KERAS=1来切换tf.keras
import os, sys
from distutils.util import strtobool
import numpy as np
import tensorflow as tf
# 判断是tf.keras还是纯keras的标记
is_tf_keras = strtobool(os.environ.get('TF_KERAS', '0'))
if is_tf_keras:
import tensorflow.keras as keras
import tensorflow.keras.backend as K
sys.modules['keras'] = keras
else:
import keras
import keras.backend as K
def gelu_erf(x):
"""基于Erf直接计算的gelu函数
"""
return 0.5 * x * (1.0 + tf.math.erf(x / np.sqrt(2.0)))
def gelu_tanh(x):
"""基于Tanh近似计算的gelu函数
"""
cdf = 0.5 * (
1.0 + K.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * K.pow(x, 3))))
)
return x * cdf
def set_gelu(version):
"""设置gelu版本
"""
version = version.lower()
assert version in ['erf', 'tanh'], 'gelu version must be erf or tanh'
if version == 'erf':
keras.utils.get_custom_objects()['gelu'] = gelu_erf
else:
keras.utils.get_custom_objects()['gelu'] = gelu_tanh
def piecewise_linear(t, schedule):
"""分段线性函数
其中schedule是形如{1000: 1, 2000: 0.1}的字典,
表示 t ∈ [0, 1000]时,输出从0均匀增加至1,而
t ∈ [1000, 2000]时,输出从1均匀降低到0.1,最后
t > 2000时,保持0.1不变。
"""
schedule = sorted(schedule.items())
if schedule[0][0] != 0:
schedule = [(0, 0.0)] + schedule
x = K.constant(schedule[0][1], dtype=K.floatx())
t = K.cast(t, K.floatx())
for i in range(len(schedule)):
t_begin = schedule[i][0]
x_begin = x
if i != len(schedule) - 1:
dx = schedule[i + 1][1] - schedule[i][1]
dt = schedule[i + 1][0] - schedule[i][0]
slope = 1.0 * dx / dt
x = schedule[i][1] + slope * (t - t_begin)
else:
x = K.constant(schedule[i][1], dtype=K.floatx())
x = K.switch(t >= t_begin, x, x_begin)
return x
def search_layer(inputs, name, exclude_from=None):
"""根据inputs和name来搜索层
说明:inputs为某个层或某个层的输出;name为目标层的名字。
实现:根据inputs一直往上递归搜索,直到发现名字为name的层为止;
如果找不到,那就返回None。
"""
if exclude_from is None:
exclude_from = set()
if isinstance(inputs, keras.layers.Layer):
layer = inputs
else:
layer = inputs._keras_history[0]
if layer.name == name:
return layer
elif layer in exclude_from:
return None
else:
exclude_from.add(layer)
if isinstance(layer, keras.models.Model):
model = layer
for layer in model.layers:
if layer.name == name:
return layer
inbound_layers = layer._inbound_nodes[0].inbound_layers
if not isinstance(inbound_layers, list):
inbound_layers = [inbound_layers]
if len(inbound_layers) > 0:
for layer in inbound_layers:
layer = search_layer(layer, name, exclude_from)
if layer is not None:
return layer
def sequence_masking(x, mask, mode=0, axis=None):
"""为序列条件mask的函数
mask: 形如(batch_size, seq_len)的0-1矩阵;
mode: 如果是0,则直接乘以mask;
如果是1,则在padding部分减去一个大正数。
axis: 序列所在轴,默认为1;
"""
if mask is None or mode not in [0, 1]:
return x
else:
if axis is None:
axis = 1
if axis == -1:
axis = K.ndim(x) - 1
assert axis > 0, 'axis muse be greater than 0'
for _ in range(axis - 1):
mask = K.expand_dims(mask, 1)
for _ in range(K.ndim(x) - K.ndim(mask) - axis + 1):
mask = K.expand_dims(mask, K.ndim(mask))
if mode == 0:
return x * mask
else:
return x - (1 - mask) * 1e12
def batch_gather(params, indices):
"""同tf旧版本的batch_gather
"""
try:
return tf.gather(params, indices, batch_dims=-1)
except Exception as e1:
try:
return tf.batch_gather(params, indices)
except Exception as e2:
raise ValueError('%s\n%s\n' % (e1.message, e2.message))
def pool1d(
x,
pool_size,
strides=1,
padding='valid',
data_format=None,
pool_mode='max'
):
"""向量序列的pool函数
"""
x = K.expand_dims(x, 1)
x = K.pool2d(
x,
pool_size=(1, pool_size),
strides=(1, strides),
padding=padding,
data_format=data_format,
pool_mode=pool_mode
)
return x[:, 0]
def divisible_temporal_padding(x, n):
"""将一维向量序列右padding到长度能被n整除
"""
r_len = K.shape(x)[1] % n
p_len = K.switch(r_len > 0, n - r_len, 0)
return K.temporal_padding(x, (0, p_len))
def swish(x):
"""swish函数(这样封装过后才有 __name__ 属性)
"""
return tf.nn.swish(x)
def leaky_relu(x, alpha=0.2):
"""leaky relu函数(这样封装过后才有 __name__ 属性)
"""
return tf.nn.leaky_relu(x, alpha=alpha)
def symbolic(f):
"""恒等装饰器(兼容旧版本keras用)
"""
return f
# 给旧版本keras新增symbolic方法(装饰器),
# 以便兼容optimizers.py中的代码
K.symbolic = getattr(K, 'symbolic', None) or symbolic
custom_objects = {
'gelu_erf': gelu_erf,
'gelu_tanh': gelu_tanh,
'gelu': gelu_erf,
'swish': swish,
'leaky_relu': leaky_relu,
}
keras.utils.get_custom_objects().update(custom_objects)
| 5,182 | 23.799043 | 73 | py |
BiRTE | BiRTE-main/bert4keras/models.py | #! -*- coding: utf-8 -*-
# 主要模型
import numpy as np
from bert4keras.layers import *
from bert4keras.snippets import delete_arguments
from keras.models import Model
import json
class Transformer(object):
"""模型基类
"""
def __init__(
self,
vocab_size, # 词表大小
hidden_size, # 编码维度
num_hidden_layers, # Transformer总层数
num_attention_heads, # Attention的头数
intermediate_size, # FeedForward的隐层维度
hidden_act, # FeedForward隐层的激活函数
dropout_rate=None, # Dropout比例
embedding_size=None, # 是否指定embedding_size
attention_key_size=None, # Attention中Q,K的head_size
sequence_length=None, # 是否固定序列长度
keep_tokens=None, # 要保留的词ID列表
layers=None, # 外部传入的Keras层
name=None, # 模型名称
**kwargs
):
if keep_tokens is None:
self.vocab_size = vocab_size
else:
self.vocab_size = len(keep_tokens)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.attention_head_size = hidden_size // num_attention_heads
self.attention_key_size = attention_key_size or self.attention_head_size
self.intermediate_size = intermediate_size
self.dropout_rate = dropout_rate or 0
self.hidden_act = hidden_act
self.embedding_size = embedding_size or hidden_size
self.sequence_length = sequence_length
self.keep_tokens = keep_tokens
self.attention_mask = None
self.position_bias = None
self.layers = {} if layers is None else layers
self.name = name
self.built = False
def build(
self,
layer_norm_cond=None,
layer_norm_cond_hidden_size=None,
layer_norm_cond_hidden_act=None,
additional_input_layers=None,
**kwargs
):
"""模型构建函数
layer_norm_*系列参数为实现Conditional Layer Normalization时使用,
用来实现以“固定长度向量”为条件的条件Bert。
"""
if self.built:
return None
# Input
inputs = self.get_inputs()
self.set_inputs(inputs, additional_input_layers)
# Other
self.layer_norm_conds = [
layer_norm_cond,
layer_norm_cond_hidden_size,
layer_norm_cond_hidden_act or 'linear',
]
# Call
outputs = self.call(inputs)
self.set_outputs(outputs)
# Model
self.model = Model(self.inputs, self.outputs, name=self.name)
self.built = True
def call(self, inputs):
"""定义模型的执行流程
"""
# Embedding
outputs = self.apply_embeddings(inputs)
# Main
for i in range(self.num_hidden_layers):
outputs = self.apply_main_layers(outputs, i)
# Final
outputs = self.apply_final_layers(outputs)
return outputs
def apply(self, inputs, layer=None, arguments=None, **kwargs):
"""通过apply调用层会自动重用同名层
inputs: 上一层的输出;
layer: 要调用的层类名;
arguments: 传递给layer.call的参数;
kwargs: 传递给层初始化的参数。
"""
if layer is Dropout and self.dropout_rate == 0:
return inputs
arguments = arguments or {}
name = kwargs.get('name')
if name not in self.layers:
layer = layer(**kwargs)
name = layer.name
self.layers[name] = layer
return self.layers[name](inputs, **arguments)
def get_inputs(self):
raise NotImplementedError
def apply_embeddings(self, inputs):
raise NotImplementedError
def apply_main_layers(self, inputs, index):
raise NotImplementedError
def apply_final_layers(self, inputs):
raise NotImplementedError
def compute_attention_mask(self, inputs=None):
"""定义每一层的Attention Mask
"""
return self.attention_mask
def compute_position_bias(self, inputs=None):
"""定义每一层的Position Bias(一般相对位置编码用)
"""
return self.position_bias
def set_inputs(self, inputs, additional_input_layers=None):
"""设置input和inputs属性
"""
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
inputs = [inputs]
inputs = inputs[:]
if additional_input_layers is not None:
if not isinstance(additional_input_layers, list):
additional_input_layers = [additional_input_layers]
inputs.extend(additional_input_layers)
self.inputs = inputs
if len(inputs) > 1:
self.input = inputs
else:
self.input = inputs[0]
def set_outputs(self, outputs):
"""设置output和oututs属性
"""
if not isinstance(outputs, list):
outputs = [outputs]
outputs = outputs[:]
self.outputs = outputs
if len(outputs) > 1:
self.output = outputs
else:
self.output = outputs[0]
@property
def initializer(self):
"""默认使用截断正态分布初始化
"""
return keras.initializers.TruncatedNormal(stddev=0.02)
def simplify(self, inputs):
"""将list中的None过滤掉
"""
inputs = [i for i in inputs if i is not None]
if len(inputs) == 1:
inputs = inputs[0]
return inputs
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
return tf.train.load_variable(checkpoint, name)
def create_variable(self, name, value):
"""在tensorflow中创建一个变量
"""
return tf.Variable(value, name=name)
def variable_mapping(self):
"""构建keras层与checkpoint的变量名之间的映射表
"""
return {}
def load_weights_from_checkpoint(self, checkpoint, mapping=None):
"""根据mapping从checkpoint加载权重
"""
mapping = mapping or self.variable_mapping()
mapping = {k: v for k, v in mapping.items() if k in self.layers}
weight_value_pairs = []
for layer, variables in mapping.items():
layer = self.layers[layer]
weights = layer.trainable_weights
values = [self.load_variable(checkpoint, v) for v in variables]
if isinstance(layer, MultiHeadAttention):
"""如果key_size不等于head_size,则可以通过
正交矩阵将相应的权重投影到合适的shape。
"""
count = 2
if layer.use_bias:
count += 2
heads = self.num_attention_heads
head_size = self.attention_head_size
key_size = self.attention_key_size
W = np.linalg.qr(np.random.randn(key_size, head_size))[0].T
if layer.scaled_dot_product:
W = W * key_size**0.25 / head_size**0.25
for i in range(count):
w, v = weights[i], values[i]
w_shape, v_shape = K.int_shape(w), v.shape
if w_shape[-1] != v_shape[-1]:
pre_shape = w_shape[:-1]
v = v.reshape(pre_shape + (heads, head_size))
v = np.dot(v, W)
v = v.reshape(pre_shape + (heads * key_size,))
values[i] = v
weight_value_pairs.extend(zip(weights, values))
K.batch_set_value(weight_value_pairs)
def save_weights_as_checkpoint(self, filename, mapping=None):
"""根据mapping将权重保存为checkpoint格式
"""
mapping = mapping or self.variable_mapping()
mapping = {k: v for k, v in mapping.items() if k in self.layers}
with tf.Graph().as_default():
for layer, variables in mapping.items():
layer = self.layers[layer]
values = K.batch_get_value(layer.trainable_weights)
for name, value in zip(variables, values):
self.create_variable(name, value)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.save(sess, filename, write_meta_graph=False)
class BERT(Transformer):
"""构建BERT模型
"""
def __init__(
self,
max_position, # 序列最大长度
with_pool=False, # 是否包含Pool部分
with_nsp=False, # 是否包含NSP部分
with_mlm=False, # 是否包含MLM部分
custom_position_ids=False, # 是否自行传入位置id
**kwargs # 其余参数
):
super(BERT, self).__init__(**kwargs)
self.max_position = max_position
self.with_pool = with_pool
self.with_nsp = with_nsp
self.with_mlm = with_mlm
self.custom_position_ids = custom_position_ids
def get_inputs(self):
"""BERT的输入是token_ids和segment_ids
(但允许自行传入位置id,以实现一些特殊需求)
"""
x_in = Input(shape=(self.sequence_length,), name='Input-Token')
s_in = Input(shape=(self.sequence_length,), name='Input-Segment')
if self.custom_position_ids:
p_in = Input(shape=(self.sequence_length,), name='Input-Position')
return [x_in, s_in, p_in]
else:
return [x_in, s_in]
def apply_embeddings(self, inputs):
"""BERT的embedding是token、position、segment三者embedding之和
"""
x, s = inputs[:2]
z = self.layer_norm_conds[0]
if self.custom_position_ids:
p = inputs[2]
else:
p = None
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=2,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name='Embedding-Segment'
)
x = self.apply(inputs=[x, s], layer=Add, name='Embedding-Token-Segment')
x = self.apply(
inputs=self.simplify([x, p]),
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
embeddings_initializer=self.initializer,
custom_position_ids=self.custom_position_ids,
name='Embedding-Position'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""BERT的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_mask()
# Self Attention
xi, x, arguments = x, [x, x, x], {'a_mask': None}
if attention_mask is not None:
arguments['a_mask'] = True
x.append(attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
"""根据剩余参数决定输出
"""
x = inputs
z = self.layer_norm_conds[0]
outputs = [x]
if self.with_pool or self.with_nsp:
# Pooler部分(提取CLS向量)
x = outputs[0]
x = self.apply(
inputs=x,
layer=Lambda,
function=lambda x: x[:, 0],
name='Pooler'
)
pool_activation = 'tanh' if self.with_pool is True else self.with_pool
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
activation=pool_activation,
kernel_initializer=self.initializer,
name='Pooler-Dense'
)
if self.with_nsp:
# Next Sentence Prediction部分
x = self.apply(
inputs=x,
layer=Dense,
units=2,
activation='softmax',
kernel_initializer=self.initializer,
name='NSP-Proba'
)
outputs.append(x)
if self.with_mlm:
# Masked Language Model部分
x = outputs[0]
x = self.apply(
inputs=x,
layer=Dense,
units=self.embedding_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name='MLM-Dense'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='MLM-Norm'
)
mlm_activation = 'softmax' if self.with_mlm is True else self.with_mlm
x = self.apply(
inputs=x,
layer=EmbeddingDense,
embedding_name='Embedding-Token',
activation=mlm_activation,
name='MLM-Proba'
)
outputs.append(x)
if len(outputs) == 1:
outputs = outputs[0]
elif len(outputs) == 2:
outputs = outputs[1]
else:
outputs = outputs[1:]
return outputs
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(BERT, self).load_variable(checkpoint, name)
if name in [
'bert/embeddings/word_embeddings',
'cls/predictions/output_bias',
]:
if self.keep_tokens is None:
return variable
else:
return variable[self.keep_tokens]
elif name == 'cls/seq_relationship/output_weights':
return variable.T
else:
return variable
def create_variable(self, name, value):
"""在tensorflow中创建一个变量
"""
if name == 'cls/seq_relationship/output_weights':
value = value.T
return super(BERT, self).create_variable(name, value)
def variable_mapping(self):
"""映射到官方BERT权重格式
"""
mapping = {
'Embedding-Token': ['bert/embeddings/word_embeddings'],
'Embedding-Segment': ['bert/embeddings/token_type_embeddings'],
'Embedding-Position': ['bert/embeddings/position_embeddings'],
'Embedding-Norm': [
'bert/embeddings/LayerNorm/beta',
'bert/embeddings/LayerNorm/gamma',
],
'Embedding-Mapping': [
'bert/encoder/embedding_hidden_mapping_in/kernel',
'bert/encoder/embedding_hidden_mapping_in/bias',
],
'Pooler-Dense': [
'bert/pooler/dense/kernel',
'bert/pooler/dense/bias',
],
'NSP-Proba': [
'cls/seq_relationship/output_weights',
'cls/seq_relationship/output_bias',
],
'MLM-Dense': [
'cls/predictions/transform/dense/kernel',
'cls/predictions/transform/dense/bias',
],
'MLM-Norm': [
'cls/predictions/transform/LayerNorm/beta',
'cls/predictions/transform/LayerNorm/gamma',
],
'MLM-Proba': ['cls/predictions/output_bias'],
}
for i in range(self.num_hidden_layers):
prefix = 'bert/encoder/layer_%d/' % i
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'attention/self/query/kernel',
prefix + 'attention/self/query/bias',
prefix + 'attention/self/key/kernel',
prefix + 'attention/self/key/bias',
prefix + 'attention/self/value/kernel',
prefix + 'attention/self/value/bias',
prefix + 'attention/output/dense/kernel',
prefix + 'attention/output/dense/bias',
],
'Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'attention/output/LayerNorm/beta',
prefix + 'attention/output/LayerNorm/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'intermediate/dense/kernel',
prefix + 'intermediate/dense/bias',
prefix + 'output/dense/kernel',
prefix + 'output/dense/bias',
],
'Transformer-%d-FeedForward-Norm' % i: [
prefix + 'output/LayerNorm/beta',
prefix + 'output/LayerNorm/gamma',
],
})
return mapping
class ALBERT(BERT):
"""构建ALBERT模型
"""
def apply_main_layers(self, inputs, index):
"""ALBERT的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-MultiHeadSelfAttention'
feed_forward_name = 'Transformer-FeedForward'
attention_mask = self.compute_attention_mask(0)
# Self Attention
xi, x, arguments = x, [x, x, x], {'a_mask': None}
if attention_mask is not None:
arguments['a_mask'] = True
x.append(attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def variable_mapping(self):
"""映射到官方ALBERT权重格式
"""
mapping = super(ALBERT, self).variable_mapping()
prefix = 'bert/encoder/transformer/group_0/inner_group_0/'
mapping.update({
'Transformer-MultiHeadSelfAttention': [
prefix + 'attention_1/self/query/kernel',
prefix + 'attention_1/self/query/bias',
prefix + 'attention_1/self/key/kernel',
prefix + 'attention_1/self/key/bias',
prefix + 'attention_1/self/value/kernel',
prefix + 'attention_1/self/value/bias',
prefix + 'attention_1/output/dense/kernel',
prefix + 'attention_1/output/dense/bias',
],
'Transformer-MultiHeadSelfAttention-Norm': [
prefix + 'LayerNorm/beta',
prefix + 'LayerNorm/gamma',
],
'Transformer-FeedForward': [
prefix + 'ffn_1/intermediate/dense/kernel',
prefix + 'ffn_1/intermediate/dense/bias',
prefix + 'ffn_1/intermediate/output/dense/kernel',
prefix + 'ffn_1/intermediate/output/dense/bias',
],
'Transformer-FeedForward-Norm': [
prefix + 'LayerNorm_1/beta',
prefix + 'LayerNorm_1/gamma',
],
})
return mapping
class ALBERT_Unshared(BERT):
"""解开ALBERT共享约束,当成BERT用
"""
def variable_mapping(self):
"""映射到官方ALBERT权重格式
"""
mapping = super(ALBERT_Unshared, self).variable_mapping()
prefix = 'bert/encoder/transformer/group_0/inner_group_0/'
for i in range(self.num_hidden_layers):
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'attention_1/self/query/kernel',
prefix + 'attention_1/self/query/bias',
prefix + 'attention_1/self/key/kernel',
prefix + 'attention_1/self/key/bias',
prefix + 'attention_1/self/value/kernel',
prefix + 'attention_1/self/value/bias',
prefix + 'attention_1/output/dense/kernel',
prefix + 'attention_1/output/dense/bias',
],
'Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'LayerNorm/beta',
prefix + 'LayerNorm/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'ffn_1/intermediate/dense/kernel',
prefix + 'ffn_1/intermediate/dense/bias',
prefix + 'ffn_1/intermediate/output/dense/kernel',
prefix + 'ffn_1/intermediate/output/dense/bias',
],
'Transformer-%d-FeedForward-Norm' % i: [
prefix + 'LayerNorm_1/beta',
prefix + 'LayerNorm_1/gamma',
],
})
return mapping
class NEZHA(BERT):
"""华为推出的NAZHA模型
链接:https://arxiv.org/abs/1909.00204
"""
def apply_embeddings(self, inputs):
"""NEZHA的embedding是token、segment两者embedding之和
"""
x, s = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=2,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name='Embedding-Segment'
)
x = self.apply(inputs=[x, s], layer=Add, name='Embedding-Token-Segment')
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""NEZHA的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_mask()
position_bias = self.compute_position_bias(x)
# Self Attention
xi, x = x, [x, x, x, position_bias]
arguments = {'a_mask': None, 'p_bias': 'typical_relative'}
if attention_mask is not None:
arguments['a_mask'] = True
x.insert(3, attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def compute_position_bias(self, inputs=None):
"""经典相对位置编码
"""
if self.position_bias is None:
def sinusoidal(shape, dtype=None):
"""NEZHA直接使用Sin-Cos形式的位置向量
"""
vocab_size, depth = shape
embeddings = np.zeros(shape)
for pos in range(vocab_size):
for i in range(depth // 2):
theta = pos / np.power(10000, 2. * i / depth)
embeddings[pos, 2 * i] = np.sin(theta)
embeddings[pos, 2 * i + 1] = np.cos(theta)
return embeddings
x = inputs
self.position_bias = self.apply(
inputs=[x, x],
layer=RelativePositionEmbedding,
input_dim=2 * 64 + 1,
output_dim=self.attention_head_size,
embeddings_initializer=sinusoidal,
name='Embedding-Relative-Position',
trainable=False
)
return self.position_bias
class ELECTRA(BERT):
"""Google推出的ELECTRA模型
链接:https://arxiv.org/abs/2003.10555
"""
@delete_arguments('with_pool', 'with_mlm')
def __init__(
self,
max_position, # 序列最大长度
**kwargs # 其余参数
):
if 'keep_tokens' in kwargs:
del kwargs['keep_tokens']
super(ELECTRA, self).__init__(max_position, **kwargs)
def apply_final_layers(self, inputs):
x = inputs
z = self.layer_norm_conds[0]
return x
def variable_mapping(self):
mapping = super(ELECTRA, self).variable_mapping()
mapping['Embedding-Mapping'] = [
'electra/embeddings_project/kernel',
'electra/embeddings_project/bias',
]
mapping = {
k: [i.replace('bert/', 'electra/') for i in v]
for k, v in mapping.items()
}
return mapping
class GPT2_ML(Transformer):
"""构建GPT2_ML模型
链接: https://github.com/imcaspar/gpt2-ml
"""
def __init__(
self,
max_position, # 序列最大长度
final_activation='softmax', # 预测分布的激活函数
**kwargs # 其余参数
):
super(GPT2_ML, self).__init__(**kwargs)
self.max_position = max_position
self.final_activation = final_activation
def get_inputs(self):
"""GPT2_ML的输入是token_ids和segment_ids
"""
x_in = Input(shape=(self.sequence_length,), name='Input-Token')
return x_in
def apply_embeddings(self, inputs):
"""GPT2_ML的embedding是token、position两者embedding之和
"""
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
embeddings_initializer=self.initializer,
name='Embedding-Position'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""GPT2_ML的主体是基于Self-Attention的模块
顺序:Att --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_mask()
# Self Attention
xi, x, arguments = x, [x, x, x, attention_mask], {'a_mask': True}
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm-0' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm-1' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
"""剩余部分
"""
x = inputs
z = self.layer_norm_conds[0]
# Language Model部分
x = self.apply(
inputs=x,
layer=EmbeddingDense,
embedding_name='Embedding-Token',
activation=self.final_activation,
name='LM-Proba'
)
return x
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(GPT2_ML, self).load_variable(checkpoint, name)
if name == 'newslm/embeddings/word_embed':
if self.keep_tokens is None:
return variable
else:
return variable[self.keep_tokens]
else:
return variable
def compute_attention_mask(self, inputs=None):
"""添加下三角形式的attention mask
"""
if self.attention_mask is None:
def lm_mask(s):
import tensorflow as tf
seq_len = K.shape(s)[1]
with K.name_scope('attention_mask'):
# 用K.ones可能会有问题
# 参考 https://github.com/tensorflow/tensorflow/issues/24938
ones = tf.ones((1, 1, seq_len, seq_len))
a_mask = tf.linalg.band_part(ones, -1, 0)
return a_mask
self.attention_mask = self.apply(
inputs=self.inputs[0],
layer=Lambda,
function=lm_mask,
name='Attention-LM-Mask'
)
return self.attention_mask
def variable_mapping(self):
"""映射到官方GPT2_ML权重格式
"""
mapping = {
'Embedding-Token': ['newslm/embeddings/word_embed'],
'Embedding-Position': ['newslm/embeddings/pos_embed'],
'Embedding-Norm': [
'newslm/embeddings/LayerNorm_embed_norm/beta',
'newslm/embeddings/LayerNorm_embed_norm/gamma',
],
}
for i in range(self.num_hidden_layers):
prefix = 'newslm/layer%02d/' % i
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'query_layer/kernel',
prefix + 'query_layer/bias',
prefix + 'key_layer/kernel',
prefix + 'key_layer/bias',
prefix + 'value_layer/kernel',
prefix + 'value_layer/bias',
prefix + 'context_projection_layer/kernel',
prefix + 'context_projection_layer/bias',
],
'Transformer-%d-FeedForward-Norm-0' % i: [
prefix + 'LayerNorm_mlp_ln0/beta',
prefix + 'LayerNorm_mlp_ln0/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'intermediate/kernel',
prefix + 'intermediate/bias',
prefix + 'output/kernel',
prefix + 'output/bias',
],
'Transformer-%d-FeedForward-Norm-1' % i: [
prefix + 'LayerNorm_mlp_ln1/beta',
prefix + 'LayerNorm_mlp_ln1/gamma',
],
})
return mapping
class T5_Base(Transformer):
"""Google的T5模型(基类)
"""
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(T5_Base, self).load_variable(checkpoint, name)
if name == 'shared/embedding':
if self.keep_tokens is None:
return variable
else:
return variable[self.keep_tokens]
elif 'relative_attention_bias' in name:
return variable.T
else:
return variable
def create_variable(self, name, value):
"""在tensorflow中创建一个变量
"""
if 'relative_attention_bias' in name:
value = value.T
return super(T5_Base, self).create_variable(name, value)
def variable_mapping(self):
"""映射到官方T5权重格式
"""
mapping = {
'Embedding-Token': ['shared/embedding'],
'Encoder-Embedding-Relative-Position': [
'encoder/block_000/layer_000/SelfAttention/relative_attention_bias'
],
'Encoder-Output-Norm': ['encoder/final_layer_norm/scale'],
'Decoder-Embedding-Relative-Position': [
'decoder/block_000/layer_000/SelfAttention/relative_attention_bias',
],
'Decoder-Output-Norm': ['decoder/final_layer_norm/scale'],
}
for i in range(self.num_hidden_layers):
# Encoder主体
prefix = 'encoder/block_%03d/' % i
mapping.update({
'Encoder-Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'layer_000/SelfAttention/q',
prefix + 'layer_000/SelfAttention/k',
prefix + 'layer_000/SelfAttention/v',
prefix + 'layer_000/SelfAttention/o',
],
'Encoder-Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'layer_000/layer_norm/scale',
],
'Encoder-Transformer-%d-FeedForward' % i: [
prefix + 'layer_001/DenseReluDense/wi/kernel',
prefix + 'layer_001/DenseReluDense/wo/kernel',
],
'Encoder-Transformer-%d-FeedForward-Norm' % i: [
prefix + 'layer_001/layer_norm/scale',
],
})
# Decoder主体
prefix = 'decoder/block_%03d/' % i
mapping.update({
'Decoder-Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'layer_000/SelfAttention/q',
prefix + 'layer_000/SelfAttention/k',
prefix + 'layer_000/SelfAttention/v',
prefix + 'layer_000/SelfAttention/o',
],
'Decoder-Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'layer_000/layer_norm/scale',
],
'Decoder-Transformer-%d-MultiHeadCrossAttention' % i: [
prefix + 'layer_001/EncDecAttention/q',
prefix + 'layer_001/EncDecAttention/k',
prefix + 'layer_001/EncDecAttention/v',
prefix + 'layer_001/EncDecAttention/o',
],
'Decoder-Transformer-%d-MultiHeadCrossAttention-Norm' % i: [
prefix + 'layer_001/layer_norm/scale',
],
'Decoder-Transformer-%d-FeedForward' % i: [
prefix + 'layer_002/DenseReluDense/wi/kernel',
prefix + 'layer_002/DenseReluDense/wo/kernel',
],
'Decoder-Transformer-%d-FeedForward-Norm' % i: [
prefix + 'layer_002/layer_norm/scale',
],
})
return mapping
class T5_Encoder(T5_Base):
"""Google的T5模型(Encoder)
"""
def get_inputs(self):
"""T5的Encoder的输入只有token_ids
"""
x_in = Input(shape=(self.sequence_length,), name='Encoder-Input-Token')
return x_in
def apply_embeddings(self, inputs):
"""T5的embedding只有token embedding,
并把relative position embedding准备好,待attention使用。
"""
x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Encoder-Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Encoder-Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""T5的Encoder的主体是基于Self-Attention的模块
顺序:LN --> Att --> Add --> LN --> FFN --> Add
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Encoder-Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Encoder-Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_mask()
position_bias = self.compute_position_bias(x)
# Self Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
x = self.apply(
inputs=[x, x, x, position_bias],
layer=MultiHeadAttention,
arguments={'p_bias': 't5_relative'},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
key_size=self.attention_key_size,
use_bias=False,
scaled_dot_product=False,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
use_bias=False,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
"""剩余部分
"""
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Encoder-Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Encoder-Output-Dropout'
)
return x
def compute_position_bias(self, inputs=None):
"""T5相对位置编码
"""
if self.position_bias is None:
x = inputs
p = self.apply(
inputs=[x, x],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=True,
embeddings_initializer=self.initializer,
name='Encoder-Embedding-Relative-Position'
)
self.position_bias = p
return self.position_bias
class T5_Decoder(Transformer):
"""Google的T5模型(Decoder)
"""
def __init__(self, with_lm=True, **kwargs):
super(T5_Decoder, self).__init__(**kwargs)
if with_lm is True:
self.with_lm = 'softmax'
else:
self.with_lm = with_lm
def get_inputs(self):
"""T5的Decoder的输入为context序列和token_ids
"""
c_in = Input(
shape=(self.sequence_length, self.hidden_size),
name='Input-Context'
)
x_in = Input(shape=(self.sequence_length,), name='Decoder-Input-Token')
return [c_in, x_in]
def apply_embeddings(self, inputs):
"""T5的embedding只有token embedding,
并把relative position embedding准备好,待attention使用。
"""
c, x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Decoder-Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Decoder-Embedding-Mapping'
)
return [c, x]
def apply_main_layers(self, inputs, index):
"""T5的Dencoder主体是基于Self-Attention、Cross-Attention的模块
顺序:LN --> Att1 --> Add --> LN --> Att2 --> Add --> LN --> FFN --> Add
"""
c, x = inputs
z = self.layer_norm_conds[0]
self_attention_name = 'Decoder-Transformer-%d-MultiHeadSelfAttention' % index
cross_attention_name = 'Decoder-Transformer-%d-MultiHeadCrossAttention' % index
feed_forward_name = 'Decoder-Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_mask()
position_bias = self.compute_position_bias([x, c])
# Self Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % self_attention_name
)
x = self.apply(
inputs=[x, x, x, attention_mask, position_bias[0]],
layer=MultiHeadAttention,
arguments={
'a_mask': True,
'p_bias': 't5_relative'
},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
key_size=self.attention_key_size,
use_bias=False,
scaled_dot_product=False,
kernel_initializer=self.initializer,
name=self_attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % self_attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % self_attention_name
)
# Cross Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % cross_attention_name
)
x = self.apply(
inputs=[x, c, c, position_bias[1]],
layer=MultiHeadAttention,
arguments={
'a_mask': None,
'p_bias': 't5_relative'
},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
key_size=self.attention_key_size,
use_bias=False,
scaled_dot_product=False,
kernel_initializer=self.initializer,
name=cross_attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % cross_attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % cross_attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
use_bias=False,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return [c, x]
def apply_final_layers(self, inputs):
"""剩余部分
"""
c, x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Decoder-Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Decoder-Output-Dropout'
)
x = self.apply(
inputs=x,
layer=Lambda,
function=lambda x: x / np.sqrt(self.hidden_size),
name='Decoder-Output-Scale'
)
if self.with_lm:
# 预测token概率部分
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.embedding_size,
kernel_initializer=self.initializer,
name='Decoder-Output-Mapping'
)
x = self.apply(
inputs=x,
layer=EmbeddingDense,
embedding_name='Embedding-Token',
activation=self.with_lm,
use_bias=False,
name='Dencoder-Output-LM-Proba'
)
return x
def compute_attention_mask(self, inputs=None):
"""添加下三角形式的attention mask
"""
if self.attention_mask is None:
def lm_mask(s):
import tensorflow as tf
seq_len = K.shape(s)[1]
with K.name_scope('attention_mask'):
# 用K.ones可能会有问题
# 参考 https://github.com/tensorflow/tensorflow/issues/24938
ones = tf.ones((1, 1, seq_len, seq_len))
a_mask = tf.linalg.band_part(ones, -1, 0)
return a_mask
self.attention_mask = self.apply(
inputs=self.inputs[1],
layer=Lambda,
function=lm_mask,
name='Attention-LM-Mask'
)
return self.attention_mask
def compute_position_bias(self, inputs=None):
"""T5相对位置编码
"""
if self.position_bias is None:
x, c = inputs
p1 = self.apply(
inputs=[x, x],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=False,
embeddings_initializer=self.initializer,
name='Decoder-Embedding-Relative-Position'
)
p2 = self.apply(
inputs=[x, c],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=False,
embeddings_initializer=self.initializer,
name='Decoder-Embedding-Relative-Position'
)
self.position_bias = (p1, p2)
return self.position_bias
class T5(T5_Base):
"""Google的T5模型(Encoder-Decoder)
"""
def __init__(self, **kwargs):
super(T5, self).__init__(**kwargs)
kwargs['layers'] = self.layers
e_name, d_name = 'Encoder', 'Decoder'
if 'name' in kwargs:
e_name = '%s_%s' % (kwargs['name'], e_name)
d_name = '%s_%s' % (kwargs['name'], d_name)
del kwargs['name'] # 防止重复传参
self._encoder = T5_Encoder(name=e_name, **kwargs)
self._decoder = T5_Decoder(name=d_name, **kwargs)
def build(self, **kwargs):
"""同时构建Encoder和Decoder
"""
self._encoder.build(**kwargs)
self._decoder.build(**kwargs)
self.encoder = self._encoder.model
self.decoder = self._decoder.model
self.inputs = self.encoder.inputs + self.decoder.inputs[1:]
self.outputs = self.decoder(
self.encoder.outputs + self.decoder.inputs[1:]
)
self.model = Model(self.inputs, self.outputs)
def extend_with_language_model(BaseModel):
"""添加下三角的Attention Mask(语言模型用)
"""
class LanguageModel(BaseModel):
"""带下三角Attention Mask的派生模型
"""
def __init__(self, *args, **kwargs):
super(LanguageModel, self).__init__(*args, **kwargs)
self.with_mlm = self.with_mlm or True
def compute_attention_mask(self, inputs=None):
"""重载此函数即可
"""
if self.attention_mask is None:
def lm_mask(s):
import tensorflow as tf
seq_len = K.shape(s)[1]
with K.name_scope('attention_mask'):
# 用K.ones可能会有问题
# 参考 https://github.com/tensorflow/tensorflow/issues/24938
ones = tf.ones((1, 1, seq_len, seq_len))
a_mask = tf.linalg.band_part(ones, -1, 0)
return a_mask
self.attention_mask = self.apply(
inputs=self.inputs[1],
layer=Lambda,
function=lm_mask,
name='Attention-LM-Mask'
)
return self.attention_mask
return LanguageModel
def extend_with_unified_language_model(BaseModel):
"""添加UniLM的Attention Mask(UnifiedLanguageModel用)
"""
class UnifiedLanguageModel(BaseModel):
"""带UniLM的Attention Mask的派生模型
UniLM: https://arxiv.org/abs/1905.03197
"""
def __init__(self, *args, **kwargs):
super(UnifiedLanguageModel, self).__init__(*args, **kwargs)
self.with_mlm = self.with_mlm or True
def compute_attention_mask(self, inputs=None):
"""重载此函数即可
"""
if self.attention_mask is None:
def unilm_mask(s):
import tensorflow as tf
s = K.cast(s, K.floatx())
seq_len = K.shape(s)[1]
with K.name_scope('attention_mask'):
# 用K.ones可能会有问题
# 参考 https://github.com/tensorflow/tensorflow/issues/24938
ones = tf.ones((1, 1, seq_len, seq_len))
a_mask = tf.linalg.band_part(ones, -1, 0)
s_ex12 = K.expand_dims(K.expand_dims(s, 1), 2)
s_ex13 = K.expand_dims(K.expand_dims(s, 1), 3)
a_mask = (1 - s_ex13) * (1 - s_ex12) + s_ex13 * a_mask
return a_mask
self.attention_mask = self.apply(
inputs=self.inputs[1],
layer=Lambda,
function=unilm_mask,
name='Attention-UniLM-Mask'
)
return self.attention_mask
return UnifiedLanguageModel
def build_transformer_model(
config_path=None,
checkpoint_path=None,
model='bert',
application='encoder',
return_keras_model=True,
**kwargs
):
"""根据配置文件构建模型,可选加载checkpoint权重
"""
configs = {}
if config_path is not None:
configs.update(json.load(open(config_path)))
configs.update(kwargs)
if 'max_position' not in configs:
configs['max_position'] = configs.get('max_position_embeddings')
if 'dropout_rate' not in configs:
configs['dropout_rate'] = configs.get('hidden_dropout_prob')
model, application = model.lower(), application.lower()
models = {
'bert': BERT,
'albert': ALBERT,
'albert_unshared': ALBERT_Unshared,
'nezha': NEZHA,
'electra': ELECTRA,
'gpt2_ml': GPT2_ML,
't5': T5,
}
MODEL = models[model]
if model != 't5':
if application == 'lm':
MODEL = extend_with_language_model(MODEL)
elif application == 'unilm':
MODEL = extend_with_unified_language_model(MODEL)
transformer = MODEL(**configs)
transformer.build(**configs)
if checkpoint_path is not None:
transformer.load_weights_from_checkpoint(checkpoint_path)
if return_keras_model:
return transformer.model
else:
return transformer
| 62,335 | 32.157447 | 87 | py |
rnn-seq2seq-learning | rnn-seq2seq-learning-main/scripts/dataloader.py | '''
Author: Zhengxiang (Jack) Wang
GitHub: https://github.com/jaaack-wang
Website: https://jaaack-wang.eu.org
About: Code for creating dataloader in PyTorch
'''
import torch
from functools import partial
from torch.utils.data import Dataset, DataLoader
import sys
import pathlib
# import from local script
sys.path.insert(0, str(pathlib.Path(__file__).parent))
from utils import read_data
class Transform(Dataset):
'''Creates a Dataset class that encode data into sequences of integers.
Args:
- data (list): contains a list of [input, output].
- in_seq_encoder (method): a function to encode input
- out_seq_encoder (method): a function to encode output
'''
def __init__(self, data, in_seq_encoder, out_seq_encoder):
self.data = data
self.in_seq_encoder = in_seq_encoder
self.out_seq_encoder = out_seq_encoder
def __len__(self):
return len(self.data)
def __getitem__(self, index):
in_seq, out_seq = self.data[index]
x = self.in_seq_encoder(in_seq)
y = self.out_seq_encoder(out_seq)
return x, y
def make_map(vocab):
'''Makes a vocab to idx and idx to vocab map.
Args:
- vocab (iterable): vocabulary to be used.
'''
v2idx= {"<s>": 0, "</s>": 1}
for v in vocab:
v2idx.update({v: len(v2idx)})
idx2v = {idx: v for v, idx in v2idx.items()}
return v2idx, idx2v
def get_text_encoder_decoder(vocab, tokenizer=None):
'''Returns two methods, a text encoder that converts
text into a list of indices, and a text decoder that
converts a list of indices into a list of tokens.
Typically, in transduction tasks, there are no unknown tokens.
For simplicity, the last index that equals the vocab size
is preserved for all unknown tokens. If a pad index is given,
please add the token into vocab and specify the index when
using create_dataloader. Otherwise, the padding index also
defaults to the vocab size.
Args:
- vocab (iterable): vocabulary.
- tokenizer (method/None): a method that converts
a text into a list of tokens. Defaults to None.
'''
v2idx, idx2v = make_map(vocab)
if not tokenizer:
tokenizer = lambda t: t
encoder = lambda text: [0] + [v2idx.get(t, len(v2idx))
for t in tokenizer(text)] + [1]
decoder = lambda idx: [idx2v.get(i, "<unk>")
for i in idx]
return encoder, decoder
def make_tensors(size, fill_idx):
'''Makes tensors of a given size filled with a given fill_idx.
The type of the returned tensors is torch.int64.'''
tensors = torch.fill_(torch.empty(size), fill_idx)
return tensors.long()
def collate_fn(batch, padding_idx,
in_max_seq_len=None,
out_max_seq_len=None):
'''Collation function that collates a batch of data.
Args:
- batch: transformed batched data.
- padding_idx (integer): integers to pad a batch.
- in/out_max_seq_len (None/integer): max in/output
sequence length. If the given length is shorter than
the actual max in/output sequence length, the later is
used. Defaults to None, using the actual max length.
'''
N = len(batch)
X, Y = zip(*batch)
in_max_len = max([len(x) for x in X])
out_max_len = max([len(y) for y in Y])
if in_max_seq_len and in_max_seq_len > in_max_len:
in_max_len = in_max_seq_len
inputs = make_tensors((in_max_len, N), padding_idx)
if out_max_seq_len and out_max_seq_len > out_max_len:
out_max_len = out_max_seq_len
outputs = make_tensors((out_max_len, N), padding_idx)
for idx, (x, y) in enumerate(batch):
inputs[:len(x), idx] = torch.Tensor(x).long()
outputs[:len(y), idx] = torch.Tensor(y).long()
return inputs, outputs
def create_dataloader(data,
in_seq_encoder,
out_seq_encoder,
padding_idx,
shuffle=False,
batch_size=256,
in_max_seq_len=None,
out_max_seq_len=None):
'''Creates a dataloader object.
Args:
- data (list/str): contains a list of [input, output].
Can also be a filepath path to the data. See read_data.
- in_seq_encoder (method): a function to encode input.
- out_seq_encoder (method): a function to encode output.
- padding_idx (integer): integers to pad a batch.
- shuffle (bool): whether to shuffle the data. Defaults to False.
- batch_size(integer): batch size. Defaults to 256.
- in/out_max_seq_len (None/integer): max in/output
sequence length. If the given length is shorter than
the actual max in/output sequence length, the later is
used. Defaults to None, using the actual max length.
'''
if isinstance(data, str):
data = read_data(data)
collate = lambda batch: collate_fn(batch, padding_idx,
in_max_seq_len,
out_max_seq_len)
dataset = Transform(data, in_seq_encoder, out_seq_encoder)
dataloader = DataLoader(dataset, batch_size,
shuffle, collate_fn=collate)
return dataloader
def customize_dataloader_func(in_seq_encoder,
out_seq_encoder,
padding_idx,
shuffle=False,
batch_size=256,
in_max_seq_len=None,
out_max_seq_len=None):
return partial(create_dataloader,
shuffle=shuffle,
batch_size=batch_size,
padding_idx=padding_idx,
in_seq_encoder=in_seq_encoder,
out_seq_encoder=out_seq_encoder,
in_max_seq_len=in_max_seq_len,
out_max_seq_len=out_max_seq_len)
| 6,202 | 32.711957 | 75 | py |
rnn-seq2seq-learning | rnn-seq2seq-learning-main/scripts/model.py | '''
Author: Zhengxiang (Jack) Wang
GitHub: https://github.com/jaaack-wang
Website: https://jaaack-wang.eu.org
About: RNN Seq2Seq models (Simple RNN, GRU, LSTM)
in PyTorch. Allows: attention, bidirectional RNN,
as well as multilayered RNN etc.
'''
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
def __init__(self,
in_vocab_size, hidden_size,
embd_dim, num_layers=1, rnn_type="SRNN",
dropout_rate=0.0, bidirectional=False,
reduction_method=torch.sum):
super(Encoder, self).__init__()
self.embedding = nn.Embedding(in_vocab_size, embd_dim)
self.num_layers = num_layers
self.rnn_type = rnn_type.upper()
self.bidirectional = bidirectional
if self.rnn_type == "GRU": rnn_ = nn.GRU
elif self.rnn_type == "LSTM": rnn_ = nn.LSTM
elif self.rnn_type == "SRNN": rnn_ = nn.RNN
else: raise ValueError("Only supports SRNN, GRU, LSTM," \
" but {self.rnn_type} was given.")
self.rnn = rnn_(embd_dim,
hidden_size,
num_layers,
bidirectional=bidirectional)
self.reduce = reduction_method
self.dropout = nn.Dropout(dropout_rate)
def forward(self, X):
# X: (max input seq len, batch size)
# embd: (max input seq len, batch size, embd dim)
embd = self.dropout(self.embedding(X))
# outputs: (max input seq len, batch size,
# hidden size * num directions)
# hidden: (num directions * num layers, batch size, hidden size)
# cell: (num directions * num layers, batch size, hidden size)
if self.rnn_type == "LSTM":
outputs, (hidden, cell) = self.rnn(embd)
else:
outputs, hidden = self.rnn(embd)
cell = None # placeholder
if self.bidirectional:
seq_len, batch_size = X.shape
hidden = hidden.view(2, self.num_layers, batch_size, -1)
hidden = self.reduce(hidden, dim=0)
if self.rnn_type == "LSTM":
cell = cell.view(2, self.num_layers, batch_size, -1)
cell = self.reduce(cell, dim=0)
outputs = outputs.view(seq_len, batch_size, 2, -1)
outputs = self.reduce(outputs, dim=2)
# outputs: (max input seq len, batch size, hidden size)
# hidden: (num layers, batch size, hidden size)
# cell: (num layers, batch size, hidden size)
return outputs, hidden, cell
class Attention(nn.Module):
def __init__(self, hidden_size):
super(Attention, self).__init__()
self.attn = nn.Linear(2 * hidden_size, hidden_size)
self.v = nn.Linear(hidden_size, 1, bias=False)
def forward(self, hidden, encoder_outputs):
# hidden: (batch size, hidden size)
# encoder_outputs: (max input seq len,
# batch size, hidden size)
seq_len = encoder_outputs.shape[0]
batch_size = encoder_outputs.shape[1]
# hidden: (batch size, max input seq len, hidden size)
# encoder_outputs: same as hidden above
hidden = hidden.unsqueeze(1).repeat(1, seq_len, 1)
encoder_outputs = encoder_outputs.permute(1, 0, 2)
# cat: (batch size, max input seq len, 2 * hidden size)
# energy: (batch size, max input seq len, hidden size)
# attention: (batch size, max input seq len)
cat = torch.cat((hidden, encoder_outputs), dim = 2)
energy = torch.tanh(self.attn(cat))
attention = self.v(energy).squeeze(2)
return F.softmax(attention, dim=1)
class Decoder(nn.Module):
def __init__(self,
out_vocab_size, hidden_size,
embd_dim, num_layers=1, rnn_type="RNN",
attention=None, use_attention=True,
dropout_rate=0.0, reduction_method=torch.sum):
super(Decoder, self).__init__()
self.embedding = nn.Embedding(out_vocab_size, embd_dim)
self.rnn_type = rnn_type.upper()
self.use_attention = use_attention
if self.rnn_type == "GRU": rnn_ = nn.GRU
elif self.rnn_type == "LSTM": rnn_ = nn.LSTM
elif self.rnn_type == "SRNN": rnn_ = nn.RNN
else: raise ValueError("Only supports SRNN, GRU, LSTM," \
" but {self.rnn_type} was given.")
if use_attention:
self.rnn = rnn_(embd_dim + hidden_size,
hidden_size, num_layers)
else:
self.rnn = rnn_(embd_dim, hidden_size, num_layers)
self.attention = attention
self.reduce = reduction_method
self.dropout = nn.Dropout(dropout_rate)
self.fc_out = nn.Linear(hidden_size, out_vocab_size)
def forward(self, y, hidden, cell, encoder_outputs):
# y: (1, batch size)
# hidden: (num layers, batch size, hidden size)
# cell: (num layers, batch size, hidden size) or a 3-D placeholder
# encoder_outputs: (max input seq len, batch size, hidden size)
# embd: (num layers, batch size, embd dim)
embd = self.dropout(self.embedding(y))
if self.use_attention and self.attention:
# reduced_hidden: (batch size, hidden size)
# attn_weights: (batch size, 1, max input seq len)
reduced_hidden = self.reduce(hidden, dim=0)
attn_weights = self.attention(reduced_hidden,
encoder_outputs).unsqueeze(1)
# encoder_outputs: (batch size, max input seq len, hidden size)
encoder_outputs = encoder_outputs.permute(1, 0, 2)
# weighted: (1, batch size, hidden size)
weighted = torch.bmm(attn_weights, encoder_outputs).permute(1, 0, 2)
# cat: (1, batch size, embd dim + hidden size)
rnn_input = torch.cat((embd, weighted), dim = 2)
else:
rnn_input = embd
attn_weights = None # placeholder
# hidden/cell: (num layers, batch size, hidden size)
# output: (1, batch size, hidden size)
if self.rnn_type == "LSTM":
output, (hidden, cell) = self.rnn(rnn_input, (hidden, cell))
else:
output, hidden = self.rnn(rnn_input, hidden)
# output: (batch size, out vocab size)
output = self.fc_out(output.squeeze(0))
return output, hidden, cell, attn_weights
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
def forward(self, X, Y, teacher_forcing_ratio=0.0):
# X: (max input seq len, batch size)
# Y: (max output seq len, batch size)
y = Y[0:1] # y: (1, batch size)
outputs, attn_weights = [], []
encoder_outputs, hidden, cell = self.encoder(X)
for t in range(1, Y.shape[0]):
output, hidden, cell, attn_w = \
self.decoder(y, hidden, cell, encoder_outputs)
outputs.append(output); attn_weights.append(attn_w)
teacher_force = random.random() < teacher_forcing_ratio
if teacher_force: y = Y[t:t+1]
else: y = output.argmax(1).unsqueeze(0)
# outputs: ((max output seq len-1) * batch size, out vocab size)
outputs = torch.cat(outputs).to(self.device)
return outputs, attn_weights
| 7,864 | 38.522613 | 80 | py |
rnn-seq2seq-learning | rnn-seq2seq-learning-main/scripts/pytorch_utils.py | '''
Author: Zhengxiang (Jack) Wang
GitHub: https://github.com/jaaack-wang
Website: https://jaaack-wang.eu.org
About: Utility functions for training, evaluation,
and deployment (i.e., prediction).
'''
import torch
import torch.nn as nn
import torch.nn.init as init
from functools import partial
import matplotlib.pyplot as plt
import sys
import pathlib
# import from local script
sys.path.insert(0, str(pathlib.Path(__file__).parent))
from model import *
def init_weights(model, init_method=init.xavier_uniform_):
'''Initialize model's weights by a given method. Defaults to
Xavier initialization.'''
for name, param in model.named_parameters():
if 'weight' in name:
init_method(param.data)
else:
init.constant_(param.data, 0)
def count_parameters(model):
'''Count the number of trainable parameters.'''
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def get_model(ModelConfig, init_method=init.xavier_uniform_):
'''Customized function to initialze a model given ModelConfig.'''
rnn_type = ModelConfig['rnn_type']
hidden_size = ModelConfig['hidden_size']
embd_dim = ModelConfig['embd_dim']
num_layers = ModelConfig['num_layers']
dropout_rate = ModelConfig['dropout_rate']
use_attention = ModelConfig['use_attention']
bidirectional = ModelConfig['bidirectional']
in_vocab_size = ModelConfig['in_vocab_size']
out_vocab_size = ModelConfig['out_vocab_size']
device = torch.device(ModelConfig["device"])
reduction_method = ModelConfig['reduction_method']
if reduction_method == "sum":
reduction_method = torch.sum
elif reduction_method == "mean":
reduction_method = torch.mean
else:
raise TypeError(f"unknown reduction method: {reduction_method}")
encoder = Encoder(in_vocab_size, hidden_size,
embd_dim, num_layers, rnn_type,
dropout_rate, bidirectional,
reduction_method)
attention = Attention(hidden_size)
decoder = Decoder(out_vocab_size, hidden_size,
embd_dim, num_layers, rnn_type,
attention, use_attention,
dropout_rate)
model = Seq2Seq(encoder, decoder, device).to(device)
if init_method != None:
init_weights(model, init_method)
n = count_parameters(model)
print(f'The model has {n:,} trainable parameters')
return model
def metrics(Y, Ypred):
'''Computer the following three metrics:
- full sequence accuracy: % of sequences correctly generated from end to end
- first n-symbol accuracy: % of first n symbols correctly generated
- overlap rate: % of pairwise overlapping symbols
'''
# pairwise overlap
pairwise_overlap = (Y == Ypred).to(torch.float64)
# pairwise overlap over across sequences (within the given batch)
per_seq_overlap = pairwise_overlap.mean(dim=0)
# overlap rate
overlap_rate = per_seq_overlap.mean().item()
# full sequence accuracy
abs_correct = per_seq_overlap.isclose(torch.tensor(1.0, dtype=torch.float64))
full_seq_accu = abs_correct.to(torch.float64).mean().item()
# if the n-th symbol does not match, set the following overlapping values to 0
if pairwise_overlap.dim() <= 1:
min_idx = pairwise_overlap.argmin(0)
if pairwise_overlap[min_idx] == 0:
pairwise_overlap[min_idx:] = 0
else:
for col_idx, min_idx in enumerate(pairwise_overlap.argmin(0)):
if pairwise_overlap[min_idx, col_idx] == 0:
pairwise_overlap[min_idx:, col_idx] = 0
# first n-symbol accuracy
first_n_accu = pairwise_overlap.mean().item()
return full_seq_accu, first_n_accu, overlap_rate
def _get_results(dic):
loss = dic["loss"]
overlap_rate = dic["overlap rate"]
full_seq_acc = dic["full sequence accuracy"]
first_n_acc = dic["first n-symbol accuracy"]
return [loss, full_seq_acc, first_n_acc, overlap_rate]
def get_results(log, train_log=True):
'''Return the results of the four metrics: loss,
full sequence accuracy, first n-symbol accuracy,
overlap rate, given a result dictionary.'''
if train_log:
best = log["Best eval accu"]
best_train = best["Train"]
best_dev = best["Eval"]
train_res = _get_results(best_train)
dev_res = _get_results(best_dev)
return train_res, dev_res
return _get_results(log)
def evaluate(model, dataloader, criterion,
per_seq_len_performance=False):
'''Evaluate model performance on a given dataloader.
"per_seq_len_performance" can be reported if each batch
in the dataloader only consists of a specific length.
'''
model.eval()
if per_seq_len_performance:
seq_len = set(X.shape[0] for X, _ in dataloader)
assert len(seq_len) == len(dataloader), "Each batch" \
" must contain sequences of a specific length. "
perf_log = dict()
# aggragate performance
aggr_perf = {"loss": 0.0,
"full sequence accuracy": 0.0,
"first n-symbol accuracy": 0.0,
"overlap rate": 0.0}
with torch.no_grad():
for X, Y in dataloader:
x_seq_len = X.shape[0] - 2 # not counting <s> and </s>
seq_len, batch_size = Y.shape
seq_len -= 1 # logits does not have <s>
X = X.to(model.device)
Y = Y.to(model.device)
logits, _ = model(X, Y, teacher_forcing_ratio=0.0)
Ypred = logits.view(seq_len, batch_size, -1).argmax(2)
full_seq_accu, first_n_accu, overlap_rate = metrics(Y[1:], Ypred)
loss = criterion(logits, Y[1:].view(-1))
aggr_perf["loss"] += loss.item()
aggr_perf["full sequence accuracy"] += full_seq_accu
aggr_perf["first n-symbol accuracy"] += first_n_accu
aggr_perf["overlap rate"] += overlap_rate
if per_seq_len_performance:
perf_log[f"Len-{x_seq_len}"] = {"loss": loss.item(),
"full sequence accuracy": full_seq_accu,
"first n-symbol accuracy": first_n_accu,
"overlap rate": overlap_rate}
aggr_perf = {k:v/len(dataloader) for k,v in aggr_perf.items()}
if per_seq_len_performance:
perf_log[f"Aggregated"] = aggr_perf
return aggr_perf, perf_log
return aggr_perf
def train_loop(model, dataloader, optimizer, criterion, teacher_forcing_ratio):
'''A single training loop (for am epoch).
'''
model.train()
for X, Y in dataloader:
seq_len, batch_size = Y.shape
seq_len -= 1 # logits does not have <s>
X = X.to(model.device)
Y = Y.to(model.device)
optimizer.zero_grad()
logits, _ = model(X, Y, teacher_forcing_ratio)
Ypred = logits.view(seq_len, batch_size, -1).argmax(2)
loss = criterion(logits, Y[1:].view(-1))
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
def train_and_evaluate(model, train_dl, eval_dl,
criterion, optimizer,
saved_model_fp="model.pt",
acc_threshold=0.0,
print_eval_freq=5,
max_epoch_num=10,
train_exit_acc=1.0,
eval_exit_acc=1.0,
teacher_forcing_ratio=1.0):
'''Trains and evaluates model while training and returns
the training log. The best model with highest full sequence
accuracy is saved and returned.
Args:
- model (nn.Module): a neural network model in PyTorch.
- train_dl (Dataset): train set dataloader.
- eval_dl (Dataset): dataloader for evaluation.
- criterion (method): loss function for computing loss.
- optimizer (method): Optimization method.
- saved_model_fp (str): filepath for the saved model (.pt).
- acc_threshold (float): the min accuracy to save model.
Defaults to 0.0. If set greater than 1, no model will be saved.
- print_eval_freq (int): print and evaluation frequency.
- max_epoch_num (int): max epoch number. Defaults to 10. Training
is stopped if the max epoch number is run out.
- train_exit_acc (float): the min train accuracy to exit training.
Defaults to 1.0. Only takes effect if eval_exit_acc is also met.
- eval_exit_acc (float): the min eval accu to exit training. Defaults
to 1.0. Training is stopped if the eval accuracy if 1.0 or both
train_exit_acc and eval_exit_acc are met.
- teacher_forcing_ratio (float): the probability of using the real next
symbols from the output sequences at decoding time during training.
'''
log = dict()
best_acc, best_epoch = acc_threshold, 0
epoch, train_acc, eval_acc = 0, 0, 0
while (epoch < max_epoch_num) and (eval_acc != 1.0) and (
train_acc < train_exit_acc or eval_acc < eval_exit_acc):
epoch += 1
train_loop(model, train_dl, optimizer, criterion,
teacher_forcing_ratio)
if epoch % print_eval_freq == 0:
train_perf = evaluate(model, train_dl, criterion)
train_acc = train_perf['full sequence accuracy']
eval_perf = evaluate(model, eval_dl, criterion)
eval_acc = eval_perf['full sequence accuracy']
print(f"Current epoch: {epoch}, \ntraining performance: " \
f"{train_perf}\nevaluation performance: {eval_perf}\n")
log[f"Epoch#{epoch}"] = {"Train": train_perf, "Eval": eval_perf}
if eval_acc > best_acc:
best_acc = eval_acc
best_epoch = epoch
torch.save(model.state_dict(), saved_model_fp)
if best_acc > acc_threshold:
log["Best eval accu"] = {"Epoch Number": epoch}
log["Best eval accu"].update(log[f"Epoch#{best_epoch}"])
print(saved_model_fp + " saved!\n")
model.load_state_dict(torch.load(saved_model_fp))
return log
def predict(text, model, in_seq_encoder, out_seq_decoder,
in_seq_decoder=None, max_output_len=None,
visualize=True, show_plot=True, saved_plot_fp=None):
if isinstance(text, str):
pass
elif isinstance(text, (list, tuple,)):
assert all(isinstance(t, str)
for t in text), "must be a list of strs"
output_seqs, attn_weights = [], []
for t in text:
o, w = predict(t, model, in_seq_encoder,
out_seq_decoder, visualize=False,
max_output_len=max_output_len)
output_seqs.append(o)
attn_weights.append(w)
return output_seqs, attn_weights
else:
raise TypeError("texts must be a str or a list of strs," \
f" {type(text)} was given.")
device = model.device
in_seq = in_seq_encoder(text)
in_seq_tensor = torch.Tensor(in_seq).long().unsqueeze(1).to(device)
model.eval()
y = torch.Tensor([[0]]).long().to(device)
outputs, attn_ws = [], []
encoder_outputs, hidden, cell = model.encoder(in_seq_tensor)
if max_output_len == None:
max_output_len = len(in_seq) + 3
while y.item() != 1 and len(outputs) < max_output_len:
output, hidden, cell, attn_w = \
model.decoder(y, hidden, cell, encoder_outputs)
y = output.argmax(1).unsqueeze(0)
outputs.append(y.item()); attn_ws.append(attn_w)
if attn_ws[0] != None:
attn_ws = torch.cat(attn_ws).squeeze(1)
if device.type != "cpu":
attn_ws = attn_ws.cpu().detach().numpy()
else:
attn_ws = attn_ws.detach().numpy()
else:
visualize = False
output_seq = out_seq_decoder(outputs)
if visualize:
if in_seq_decoder == None:
in_seq_decoder = out_seq_decoder
in_seq_len, out_seq_len = len(in_seq)-1, len(outputs)
width = max(int(in_seq_len * 0.3), 1)
height = max(int(out_seq_len * 0.3), 1)
plt.figure(figsize=(width, height))
plt.imshow(attn_ws[:, 1:], cmap='BuGn')
plt.xticks(range(in_seq_len),
in_seq_decoder(in_seq)[1:], rotation=45)
plt.yticks(range(out_seq_len), output_seq)
plt.xlabel("Input")
plt.ylabel("Output")
plt.grid(True, alpha=0.05)
if saved_plot_fp != None:
plt.savefig(saved_plot_fp, dpi=600, bbox_inches='tight')
if show_plot:
plt.show()
else:
plt.close()
return output_seq, attn_ws
def customize_predictor(model, in_seq_encoder, out_seq_decoder,
in_seq_decoder=None, max_output_len=None,
visualize=True, show_plot=True, saved_plot_fp=None):
'''Customize a predictor function so that the func can be used more easily.'''
return partial(predict, model=model,
in_seq_encoder=in_seq_encoder,
out_seq_decoder=out_seq_decoder,
in_seq_decoder=in_seq_decoder,
max_output_len=max_output_len,
visualize=visualize,
show_plot=show_plot,
saved_plot_fp=saved_plot_fp)
| 13,983 | 35.511749 | 89 | py |
libai | libai-main/libai/models/utils/model_loader/base_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import copy
import logging
import os
import omegaconf
import oneflow as flow
from termcolor import colored
import libai.utils.distributed as dist
from libai.config import LazyCall
from libai.models.build import build_model
logger = logging.getLogger(__name__)
WEIGHTS_NAME_PT = "pytorch_model.bin"
CONFIG_NAME = "config.json"
def _load_state_dict_into_model(model_to_load, state_dict, start_prefix):
"""load state dict into model
Args:
model_to_load (nn.Module): Model to be loaded.
state_dict (OrderedDict): State dict of pretrained model.
start_prefix (str): Start prefix.
Returns:
list: error message about loading.
"""
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
error_msgs = []
def load(module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
args = (state_dict, prefix, local_metadata, True, [], [], error_msgs)
module._load_from_state_dict(*args)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
load(model_to_load, prefix=start_prefix)
return error_msgs
class ModelLoader(object):
def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs):
"""Class used to load the [`transformers`](https://huggingface.co/models) pretrained model
or `OneFlow` pretrained model.
Args:
model (libai.models): Model to be loaded in Libai.
libai_cfg (dict): The config of model in LiBai, you can import it from
`libai.config.configs.common.models`.
pretrained_model_path (str): The directory path of pretrained model,
which contains model weights file and config file.
output_loading_info (`bool`, *optional*, defaults to `False`):
Whether to return a dictionary containing missing keys, unexpected keys
and error messages.
"""
self.model = model
self.libai_cfg = libai_cfg
self.pretrained_model_path = pretrained_model_path
self.kwargs = kwargs
self.output_loading_info = kwargs.pop("output_loading_info", False)
def _state_dict_to_global(self, flow_state_dict=None, mode="libai"):
"""Tensor in OneFlow state dict to global according to model's sbp and placement.
Args:
flow_state_dict (OrderedDict): State dict of OneFlow's pretrained model.
"""
assert mode in ["libai", "pytorch"], f"not support for mode {mode}"
if mode == "libai" or dist.is_main_process():
prefix = self.base_model_prefix_2
# Checkpoint
has_prefix_module = any(
s.startswith(self.base_model_prefix_2) for s in flow_state_dict.keys()
)
# Module
expects_prefix_module = any(
s.startswith(prefix) for s in self.model.state_dict().keys()
)
start_prefix = "" if has_prefix_module else prefix + "."
loaded_keys = [start_prefix + key for key in flow_state_dict.keys()]
else:
prefix, has_prefix_module, expects_prefix_module, loaded_keys = [None] * 4
flow_state_dict = collections.OrderedDict()
prefix = dist.broadcast_py_object(prefix, src=0)
has_prefix_module = dist.broadcast_py_object(has_prefix_module, src=0)
expects_prefix_module = dist.broadcast_py_object(expects_prefix_module, src=0)
loaded_keys = dist.broadcast_py_object(loaded_keys, src=0)
# to global
for key, value in self.model.state_dict().items():
if not expects_prefix_module:
key = prefix + "." + key
if key in loaded_keys:
if not has_prefix_module:
key = ".".join(key.split(".")[1:])
if mode == "pytorch":
flow_state_dict[key] = flow.to_global(
flow_state_dict[key] if dist.is_main_process() else flow.Tensor(None),
sbp=flow.sbp.broadcast,
placement=flow.placement("cpu", ranks=[0]),
)
flow_state_dict[key] = flow.to_global(
flow_state_dict[key],
sbp=value.sbp,
placement=flow.placement("cpu", ranks=list(value.placement.ranks)),
)
return flow_state_dict
def _load_pretrained_model(
self,
model,
state_dict,
pretrained_model_path,
ignore_mismatched_sizes=False,
):
"""Load pretrained model.
Args:
model (libai.models): The model to be loaded.
state_dict (OrderedDict): state dict.
loaded_keys (list): keys of state dict.
pretrained_model_path (str): pretrained modelE path.
ignore_mismatched_sizes (bool):
Whether or not to raise an error if some of the weights
from the checkpoint do not have the same size as the
weights of the model, defaults to `False`.
"""
model_state_dict = model.state_dict()
expected_keys = list(model_state_dict.keys())
prefix = self.base_model_prefix_2
loaded_keys = state_dict.keys()
if len(prefix) > 0:
has_prefix_module = any(s.startswith(prefix) for s in loaded_keys)
expects_prefix_module = any(s.startswith(prefix) for s in expected_keys)
else:
has_prefix_module = False
expects_prefix_module = False
remove_prefix_from_model = not has_prefix_module and expects_prefix_module
add_prefix_to_model = has_prefix_module and not expects_prefix_module
if remove_prefix_from_model:
expected_keys_not_prefixed = [s for s in expected_keys if not s.startswith(prefix)]
expected_keys = [
".".join(s.split(".")[1:]) if s.startswith(prefix) else s for s in expected_keys
]
elif add_prefix_to_model:
expected_keys = [".".join([prefix, s]) for s in expected_keys]
missing_keys = list(set(expected_keys) - set(loaded_keys))
unexpected_keys = list(set(loaded_keys) - set(expected_keys))
start_prefix = ""
model_to_load = model
if (
len(self.base_model_prefix_2) > 0
and not hasattr(model, self.base_model_prefix_2)
and has_prefix_module
):
start_prefix = self.base_model_prefix_2 + "."
if (
len(self.base_model_prefix_2) > 0
and hasattr(model, self.base_model_prefix_2)
and not has_prefix_module
):
model_to_load = getattr(model, self.base_model_prefix_2)
if any(key in expected_keys_not_prefixed for key in loaded_keys):
raise ValueError("The state dict of the model you are loading is corrupted.")
def _find_mismatched_keys(
state_dict,
model_state_dict,
loaded_keys,
add_prefix_to_model,
remove_prefix_from_model,
ignore_mismatched_sizes,
):
mismatched_keys = []
if ignore_mismatched_sizes:
for checkpoint_key in loaded_keys:
model_key = checkpoint_key
if remove_prefix_from_model:
model_key = f"{prefix}.{checkpoint_key}"
elif add_prefix_to_model:
model_key = ".".join(checkpoint_key.split(".")[1:])
if (
model_key in model_state_dict
and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape
):
mismatched_keys.append(
(
checkpoint_key,
state_dict[checkpoint_key].shape,
model_state_dict[model_key].shape,
)
)
del state_dict[checkpoint_key]
return mismatched_keys
if state_dict is not None:
mismatched_keys = _find_mismatched_keys(
state_dict,
model_state_dict,
loaded_keys,
add_prefix_to_model,
remove_prefix_from_model,
ignore_mismatched_sizes,
)
error_msgs = _load_state_dict_into_model(model_to_load, state_dict, start_prefix)
if dist.get_local_rank() == 0:
if len(error_msgs) > 0:
error_msg = "\n\t".join(error_msgs)
raise RuntimeError(
f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}"
)
if len(unexpected_keys) > 0:
logger.warning(
f"Some weights of the model checkpoint at {pretrained_model_path} "
"were not used when "
f"initializing {model.__class__.__name__}:\n {unexpected_keys}\n"
)
else:
logger.info(
f"All model checkpoint weights were used when initializing "
f"{model.__class__.__name__}.\n"
)
if len(missing_keys) > 0:
logger.warning(
f"Some weights of {model.__class__.__name__} were not initialized "
f"from the model checkpoint at {pretrained_model_path}:\n "
f"{missing_keys} \n"
)
elif len(mismatched_keys) == 0:
logger.info(
f"All the weights of {model.__class__.__name__} were initialized "
f"from the model checkpoint at {pretrained_model_path}.\n"
)
if len(mismatched_keys) > 0:
mismatched_warning = "\n".join(
[
f"- {key}: found shape {shape1} in the checkpoint and {shape2}"
"in the model instantiated"
for key, shape1, shape2 in mismatched_keys
]
)
logger.warning(
f"Some weights of {model.__class__.__name__} were not initialized"
f"from the model checkpoint at {pretrained_model_path} "
f"and are newly initialized because the shapes did not"
f"match:\n{mismatched_warning}\n"
)
return model, missing_keys, unexpected_keys, mismatched_keys, error_msgs
class ModelLoaderLiBai(ModelLoader):
"""Class used to load `OneFlow` pretrained model.
Args:
model (libai.models): Model to be loaded in Libai.
libai_cfg (dict): The config of model in LiBai, you can import it from
`libai.config.configs.common.models`.
pretrained_model_path (str): The file path of pretrained model.
output_loading_info (`bool`, *optional*, defaults to `False`):
Whether to return a dictionary containing missing keys, unexpected keys
and error messages.
"""
def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs):
super().__init__(model, libai_cfg, pretrained_model_path, **kwargs)
self.base_model_prefix_2 = None # prefix in LiBai
def _load_flow_state_dict(self, state_dict_file):
# load oneflow_model
state_dict = flow.load(state_dict_file, global_src_rank=0)
return state_dict
def load(self):
"""Load model.
# For example:
# .. code-block:: python
>>> import libai
>>> from libai.config.configs.common.models.bert import cfg
>>> from model_loader import BertLoaderLiBai
>>> loder = BertLoaderLiBai(
libai.models.BertModel,
cfg,
'path/bert-base-chinese'
)
>>> bert = loder.load()
"""
flow_state_dict = self._load_flow_state_dict(self.pretrained_model_path)
# Instance model
if isinstance(self.model, omegaconf.dictconfig.DictConfig):
self.model.cfg = self.libai_cfg
self.model = build_model(self.model)
else:
self.model = build_model(LazyCall(self.model)(cfg=self.libai_cfg))
# State_dict to global
self._state_dict_to_global(flow_state_dict, mode="libai")
# Load
(
model,
missing_keys,
unexpected_keys,
mismatched_keys,
error_msgs,
) = self._load_pretrained_model(self.model, flow_state_dict, self.pretrained_model_path)
if self.output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"mismatched_keys": mismatched_keys,
"error_msgs": error_msgs,
}
return model, loading_info
return model
class ModelLoaderHuggerFace(ModelLoader):
"""Class used to load the [`transformers`](https://huggingface.co/models)
pretrained model.
"""
def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs):
super().__init__(model, libai_cfg, pretrained_model_path, **kwargs)
self.base_model_prefix_1 = None # prefix in Transformers
self.base_model_prefix_2 = None # prefix in LiBai
self.origin_libai_cfg = copy.deepcopy(self.libai_cfg)
self.changed_keys = set() # Store the changed configuration
def _convert_tensor(self, tensor):
"""Convert PyTorch tensor to OneFlow tensor.
Args:
tensor (torch.Tensor): The source tensor.
Returns:
flow.Tensor: The target tensor.
"""
tensor = tensor.float()
return flow.Tensor(tensor.detach().cpu().numpy())
def _convert_tensors(self, torch_state_dict):
for k, v in torch_state_dict.items():
torch_state_dict[k] = self._convert_tensor(v)
return torch_state_dict
def _fix_key(self, state_dict):
"""Fix the key in state dict: Convert "gamma" to "weight" and "beta" to "bias".
Args:
state_dict (OrderedDict): state dict of pretrained model.
Returns:
OrderedDict: State dict after fix key.
"""
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
return state_dict
def _fix_qkv_ordering(
self, qkv, head_size, num_heads, hidden_size=None, checkpoint_version=0.0
):
# TODO(xzp): Different versions checkpoint
hidden_size = (head_size * num_heads) if hidden_size is None else hidden_size
num_of_qkv = qkv.shape[0] // (head_size * num_heads)
mode = "weight" if qkv.ndim > 1 else "bias"
if mode == "weight":
qkv = qkv.view([num_of_qkv, num_heads, head_size, hidden_size])
qkv = (
qkv.permute(1, 0, 2, 3)
.contiguous()
.view(num_of_qkv * head_size * num_heads, hidden_size)
)
elif mode == "bias":
qkv = qkv.view(num_of_qkv, num_heads, head_size)
qkv = qkv.permute(1, 0, 2).contiguous().view(-1)
return qkv
def _convert_state_dict(self, flow_state_dict, cfg):
"""A function used to convert the checkpoint file of Huggingface to LiBai.
Args:
torch_state_dict (OrderedDict): torch state dict.
cfg (dict): model's default config dict in LiBai.
Returns:
OrderedDict: flow state dict.
"""
raise NotImplementedError("_convert_state_dict not implemented")
def _load_config_from_json(self, config_file):
"""load config from `config.json`, and update default config.
Args:
config_file (str): Path of config file.
"""
raise NotImplementedError("_load_config_from_json not implemented")
def _load_torch_state_dict(self, state_dict_file):
try:
import torch
except ImportError:
raise ImportError("Load torch state dict need torch.")
# load pytorch_model.bin
state_dict = torch.load(state_dict_file, map_location="cpu")
return state_dict
def _update_cfg(self, keys_libai, value_target):
"""Update the libai_cfg according to target_cfg.
Args:
keys_libai (str): The key of libai_cfg.
value_target (int | float): The value of target_cfg.
"""
if keys_libai not in self.libai_cfg.keys():
return
if self.libai_cfg[keys_libai] != value_target:
self.libai_cfg[keys_libai] = value_target
def _update_cfg_log(self):
if dist.get_local_rank() == 0:
for key in sorted(self.libai_cfg):
if self.origin_libai_cfg[key] == self.libai_cfg[key]:
continue
self.changed_keys.add(key)
temp_key = colored(key, "yellow")
logger.info(
f"changed libai model cfg {temp_key} : "
f"{self.origin_libai_cfg[key]} -> {self.libai_cfg[key]} "
)
logger.warning(
"The following model configurations has been modified according "
"to `config.json` or kwargs: \n"
f"{self.changed_keys} \n"
)
if dist.get_pipeline_parallel_size() > 1:
logger.warning(
colored(
"If you use pipeline parallel, please "
"confirm the setting of `train.dist.pipeline_num_layers` \n",
"red",
)
)
def load(self):
"""Load model.
# For example:
# .. code-block:: python
>>> import libai
>>> from configs.common.models.bert import cfg
>>> from libai.models.utils import BertLoaderHuggerFace
>>> loader = BertLoaderHuggerFace(
libai.models.BertModel,
cfg,
'path/bert-base-chinese'
)
>>> bert = loader.load()
"""
if dist.is_main_process():
if os.path.isdir(self.pretrained_model_path):
# state_dict file pytorch
if os.path.isfile(os.path.join(self.pretrained_model_path, WEIGHTS_NAME_PT)):
model_file = os.path.join(self.pretrained_model_path, WEIGHTS_NAME_PT)
else:
raise EnvironmentError(
f"Error no file named {WEIGHTS_NAME_PT} found"
f"in directory {self.pretrained_model_path}."
)
# config file
if os.path.isfile(os.path.join(self.pretrained_model_path, CONFIG_NAME)):
config_file = os.path.join(self.pretrained_model_path, CONFIG_NAME)
# Load config and update config.
self._load_config_from_json(config_file)
else:
import warnings
warnings.warn(
f"Error no file named {CONFIG_NAME} found in directory"
f"{self.pretrained_model_path}",
RuntimeWarning,
)
else:
raise EnvironmentError(f"{self.pretrained_model_path} is not a directory.")
logger.info("loading torch model...")
torch_state_dict = self._load_torch_state_dict(model_file)
torch_state_dict = self._fix_key(torch_state_dict)
logger.info("transfering torch model into oneflow model...")
flow_state_dict = self._convert_tensors(torch_state_dict)
flow_state_dict = self._convert_state_dict(torch_state_dict, self.libai_cfg)
else:
flow_state_dict = None
self.libai_cfg = dist.broadcast_py_object(self.libai_cfg, src=0)
# Instance model
logger.info("building LiBai model...")
if isinstance(self.model, omegaconf.dictconfig.DictConfig):
self.model.cfg = self.libai_cfg
self.model = build_model(self.model)
else:
self.model = build_model(LazyCall(self.model)(cfg=self.libai_cfg))
# State_dict to global
logger.info("transfering state_dict local to global...")
flow_state_dict = self._state_dict_to_global(flow_state_dict, mode="pytorch")
logger.info("loading model weights into LiBai...")
# Load
(
model,
missing_keys,
unexpected_keys,
mismatched_keys,
error_msgs,
) = self._load_pretrained_model(self.model, flow_state_dict, self.pretrained_model_path)
if self.output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"mismatched_keys": mismatched_keys,
"error_msgs": error_msgs,
}
return model, loading_info
return model
| 22,702 | 36.964883 | 100 | py |
libai | libai-main/libai/tokenizer/tokenization_bert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for bert (wordpieces)."""
import collections
import logging
import os
import re
import unicodedata
from io import open
from typing import List, Optional
from .tokenization_base import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"bert-base-uncased": 512,
"bert-large-uncased": 512,
"bert-base-cased": 512,
"bert-large-cased": 512,
"bert-base-chinese": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
}
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
def _is_chinese_substr(char):
return re.findall("##[\u4E00-\u9FA5]", char)
class BertTokenizer(PreTrainedTokenizer):
"""
Construct a BERT tokenizer. Based on WordPiece.
Args:
vocab_file (:obj:`str`):
Path to a one-wordpiece-per-line vocabulary file.
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to lower case the input.
Only has an effect when do_basic_tokenize=True.
do_basic_tokenize (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to do basic tokenization before wordpiece.
never_split (:obj:`Iterable`, `optional`):
List of tokens which will never be split during tokenization.
Only has an effect when do_basic_tokenize=True.
tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese,
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328.
do_chinese_wwm (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to do whole word masking for Chinese.
Chinese sentence will be segmented by a third-party tool first.
Each substr will be added '##' prefix and its index will be calucated by
id(##A) = id(A) + vocab_size.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
do_lower_case=True,
do_basic_tokenize=True,
never_split=None,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
tokenize_chinese_chars=True,
do_chinese_wwm=False,
add_bos_token=False,
**kwargs,
):
super(BertTokenizer, self).__init__(
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs,
)
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the "
"vocabulary from a Google pretrained model use "
"`tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
vocab_file
)
)
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()]
)
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
if do_chinese_wwm:
self.basic_tokenizer = BasicTokenizerWithChineseWWM(
do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars,
)
else:
self.basic_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars,
)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
self.add_bos_token = add_bos_token
@property
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab.
For Chinese substr, id = vocab_size + id(substr.remove(##)).
"""
index = self.vocab.get(token, self.vocab.get(self.unk_token))
return index
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab.
For Chinese substr, id = vocab_size + id(substr.remove(##)).
"""
token = self.ids_to_tokens.get(index, self.unk_token)
return token
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) to a single string."""
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""Add special tokens to a sequence or a pair of sequence.
BERT format sentence input:
- single sequence: [CLS] tokens_a [SEP]
- pair of sequences: [CLS] tokens_a [SEP] tokens_b [SEP]
Args:
token_ids_0 (List[int]): The token ids of sentence 0.
token_ids_1 (List[int], optional): The token ids of sentence 1. Defaults to None.
Returns:
:obj:`List[str]`: The sequence after adding special toekens.
"""
if self.add_bos_token:
cls = [self.cls_token_id]
sep = [self.sep_token_id]
else:
cls = []
sep = []
if token_ids_1 is None:
return cls + token_ids_0 + sep
return cls + token_ids_0 + sep + token_ids_1 + sep
def save_vocabulary(self, save_directory, filename_prefix=None):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "")
+ VOCAB_FILES_NAMES["vocab_file"],
)
else:
vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
"Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file)
)
index = token_index
writer.write(token + "\n")
index += 1
return (vocab_file,)
class BasicTokenizer(object):
"""
Constructs a BasicTokenizer that will run basic
tokenization (punctuation splitting, lower casing, etc.).
"""
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True):
"""Constructs a BasicTokenizer.
Args:
**do_lower_case**: Whether to lower case the input.
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level
(see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = set(never_split)
self.tokenize_chinese_chars = tokenize_chinese_chars
def tokenize(self, text, never_split=None):
"""
Basic Tokenization of a piece of text.
Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer.
Args:
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level
(see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
"""
# union() returns a new set by concatenating the two sets.
never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token, never_split))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text, never_split=None):
"""Splits punctuation on a piece of text."""
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class BasicTokenizerWithChineseWWM(BasicTokenizer):
"""Pre-segmentation for Chinese sentences, which will be used in whole word mask."""
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True):
super(BasicTokenizerWithChineseWWM, self).__init__(
do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars,
)
try:
import jieba
self.pre_tokenizer = lambda x: jieba.lcut(x, HMM=False)
except ImportError:
raise (ImportError("Chinese whole word mask need jieba"))
def _tokenize_chinese_chars(self, text):
"""For Chinese pieces, uses jieba to segment the words and
adds whitespace around CJK character."""
output = []
piece = ""
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
piece += char
else:
chinese_words = self.pre_tokenizer(piece)
for word in chinese_words:
output.append(" ")
output.append(word)
output.append(" ")
output.append(char)
piece = ""
chinese_words = self.pre_tokenizer(piece)
for word in chinese_words:
output.append(" ")
output.append(word)
output.append(" ")
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
input = "有没有"
output = ["有", "##没", "##有"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr.startswith("##"):
if _is_chinese_substr(substr):
if substr[2:] in self.vocab: # for Chinese substr
cur_substr = substr
break
else:
if substr in self.vocab: # for English substr
cur_substr = substr
break
else:
if (
substr in self.vocab
): # non-substr, maybe character or whole Chinese word
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
| 19,425 | 36.720388 | 99 | py |
libai | libai-main/libai/utils/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import fnmatch
import hashlib
import json
import logging
import os
import shutil
import sys
import tempfile
from functools import wraps
from io import open
from pathlib import Path
import boto3
import requests
import wget
from botocore.config import Config
from botocore.exceptions import ClientError
from tqdm import tqdm
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
cache_home = Path(os.getenv("OF_CACHE_ROOT", Path.home() / ".of_cache"))
default_cache_path = str(cache_home / "libai")
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
If the url ends with .h5 (Keras HDF5 weights) ands '.h5' to the name
so that TF 2.0 can identify it as a HDF5 file
(see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3
/tensorflow/python/keras/engine/network.py#L1380)
"""
url_bytes = url.encode("utf-8")
url_hash = hashlib.sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = hashlib.sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5"):
filename += ".h5"
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = default_cache_path
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
Args:
cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).
force_download: if True, re-dowload the file even if it's already cached in the cache dir.
"""
if cache_dir is None:
cache_dir = default_cache_path
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https", "s3"):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url, proxies=None):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3", config=Config(proxies=proxies))
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file, proxies=None):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3", config=Config(proxies=proxies))
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file, proxies=None):
req = requests.get(url, stream=True, proxies=proxies)
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None, force_download=False, proxies=None, etag_timeout=10):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = default_cache_path
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url, proxies=proxies)
else:
try:
response = requests.head(
url, allow_redirects=True, proxies=proxies, timeout=etag_timeout
)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except (EnvironmentError, requests.exceptions.Timeout):
etag = None
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + ".*")
matching_files = list(filter(lambda s: not s.endswith(".json"), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path) or force_download:
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info(
"%s not found in cache or force_download set to True, downloading to %s",
url,
temp_file.name,
)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file, proxies=proxies)
else:
http_get(url, temp_file, proxies=proxies)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, "wb") as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
output_string = json.dumps(meta)
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def get_md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
result = hash_md5.hexdigest()
return result
def download_file(out_path: str, url):
logger.info(f"downloading from {url} to {out_path}")
wget.download(url, out=out_path)
def get_data_from_cache(url, cache_dir=None, force_download=False, md5=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = default_cache_path
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
filename = url.split("/")[-1]
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we have already get the file, just check the md5 if provided
if os.path.exists(cache_path) and md5 is not None:
local_file_md5 = get_md5(cache_path)
if local_file_md5 != md5:
os.unlink(cache_path)
download_file(cache_path, url)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path):
download_file(cache_path, url)
if not os.path.exists(cache_path) or force_download:
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info(
"%s not found in cache or force_download set to True, downloading to %s",
url,
temp_file.name,
)
# GET file object
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, "wb") as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
output_string = json.dumps(meta)
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
| 11,913 | 33.734694 | 99 | py |
libai | libai-main/libai/inference/utils/imagenet_class.py | IMAGENET_LABELS = [
"tench, Tinca tinca",
"goldfish, Carassius auratus",
"great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias", # noqa: E501
"tiger shark, Galeocerdo cuvieri",
"hammerhead, hammerhead shark",
"electric ray, crampfish, numbfish, torpedo",
"stingray",
"cock",
"hen",
"ostrich, Struthio camelus",
"brambling, Fringilla montifringilla",
"goldfinch, Carduelis carduelis",
"house finch, linnet, Carpodacus mexicanus",
"junco, snowbird",
"indigo bunting, indigo finch, indigo bird, Passerina cyanea",
"robin, American robin, Turdus migratorius",
"bulbul",
"jay",
"magpie",
"chickadee",
"water ouzel, dipper",
"kite",
"bald eagle, American eagle, Haliaeetus leucocephalus",
"vulture",
"great grey owl, great gray owl, Strix nebulosa",
"European fire salamander, Salamandra salamandra",
"common newt, Triturus vulgaris",
"eft",
"spotted salamander, Ambystoma maculatum",
"axolotl, mud puppy, Ambystoma mexicanum",
"bullfrog, Rana catesbeiana",
"tree frog, tree-frog",
"tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui",
"loggerhead, loggerhead turtle, Caretta caretta",
"leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea", # noqa: E501
"mud turtle",
"terrapin",
"box turtle, box tortoise",
"banded gecko",
"common iguana, iguana, Iguana iguana",
"American chameleon, anole, Anolis carolinensis",
"whiptail, whiptail lizard",
"agama",
"frilled lizard, Chlamydosaurus kingi",
"alligator lizard",
"Gila monster, Heloderma suspectum",
"green lizard, Lacerta viridis",
"African chameleon, Chamaeleo chamaeleon",
"Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis", # noqa: E501
"African crocodile, Nile crocodile, Crocodylus niloticus",
"American alligator, Alligator mississipiensis",
"triceratops",
"thunder snake, worm snake, Carphophis amoenus",
"ringneck snake, ring-necked snake, ring snake",
"hognose snake, puff adder, sand viper",
"green snake, grass snake",
"king snake, kingsnake",
"garter snake, grass snake",
"water snake",
"vine snake",
"night snake, Hypsiglena torquata",
"boa constrictor, Constrictor constrictor",
"rock python, rock snake, Python sebae",
"Indian cobra, Naja naja",
"green mamba",
"sea snake",
"horned viper, cerastes, sand viper, horned asp, Cerastes cornutus",
"diamondback, diamondback rattlesnake, Crotalus adamanteus",
"sidewinder, horned rattlesnake, Crotalus cerastes",
"trilobite",
"harvestman, daddy longlegs, Phalangium opilio",
"scorpion",
"black and gold garden spider, Argiope aurantia",
"barn spider, Araneus cavaticus",
"garden spider, Aranea diademata",
"black widow, Latrodectus mactans",
"tarantula",
"wolf spider, hunting spider",
"tick",
"centipede",
"black grouse",
"ptarmigan",
"ruffed grouse, partridge, Bonasa umbellus",
"prairie chicken, prairie grouse, prairie fowl",
"peacock",
"quail",
"partridge",
"African grey, African gray, Psittacus erithacus",
"macaw",
"sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita",
"lorikeet",
"coucal",
"bee eater",
"hornbill",
"hummingbird",
"jacamar",
"toucan",
"drake",
"red-breasted merganser, Mergus serrator",
"goose",
"black swan, Cygnus atratus",
"tusker",
"echidna, spiny anteater, anteater",
"platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus", # noqa: E501
"wallaby, brush kangaroo",
"koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus", # noqa: E501
"wombat",
"jellyfish",
"sea anemone, anemone",
"brain coral",
"flatworm, platyhelminth",
"nematode, nematode worm, roundworm",
"conch",
"snail",
"slug",
"sea slug, nudibranch",
"chiton, coat-of-mail shell, sea cradle, polyplacophore",
"chambered nautilus, pearly nautilus, nautilus",
"Dungeness crab, Cancer magister",
"rock crab, Cancer irroratus",
"fiddler crab",
"king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica", # noqa: E501
"American lobster, Northern lobster, Maine lobster, Homarus americanus", # noqa: E501
"spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish", # noqa: E501
"crayfish, crawfish, crawdad, crawdaddy",
"hermit crab",
"isopod",
"white stork, Ciconia ciconia",
"black stork, Ciconia nigra",
"spoonbill",
"flamingo",
"little blue heron, Egretta caerulea",
"American egret, great white heron, Egretta albus",
"bittern",
"crane",
"limpkin, Aramus pictus",
"European gallinule, Porphyrio porphyrio",
"American coot, marsh hen, mud hen, water hen, Fulica americana",
"bustard",
"ruddy turnstone, Arenaria interpres",
"red-backed sandpiper, dunlin, Erolia alpina",
"redshank, Tringa totanus",
"dowitcher",
"oystercatcher, oyster catcher",
"pelican",
"king penguin, Aptenodytes patagonica",
"albatross, mollymawk",
"grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus", # noqa: E501
"killer whale, killer, orca, grampus, sea wolf, Orcinus orca",
"dugong, Dugong dugon",
"sea lion",
"Chihuahua",
"Japanese spaniel",
"Maltese dog, Maltese terrier, Maltese",
"Pekinese, Pekingese, Peke",
"Shih-Tzu",
"Blenheim spaniel",
"papillon",
"toy terrier",
"Rhodesian ridgeback",
"Afghan hound, Afghan",
"basset, basset hound",
"beagle",
"bloodhound, sleuthhound",
"bluetick",
"black-and-tan coonhound",
"Walker hound, Walker foxhound",
"English foxhound",
"redbone",
"borzoi, Russian wolfhound",
"Irish wolfhound",
"Italian greyhound",
"whippet",
"Ibizan hound, Ibizan Podenco",
"Norwegian elkhound, elkhound",
"otterhound, otter hound",
"Saluki, gazelle hound",
"Scottish deerhound, deerhound",
"Weimaraner",
"Staffordshire bullterrier, Staffordshire bull terrier",
"American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier", # noqa: E501
"Bedlington terrier",
"Border terrier",
"Kerry blue terrier",
"Irish terrier",
"Norfolk terrier",
"Norwich terrier",
"Yorkshire terrier",
"wire-haired fox terrier",
"Lakeland terrier",
"Sealyham terrier, Sealyham",
"Airedale, Airedale terrier",
"cairn, cairn terrier",
"Australian terrier",
"Dandie Dinmont, Dandie Dinmont terrier",
"Boston bull, Boston terrier",
"miniature schnauzer",
"giant schnauzer",
"standard schnauzer",
"Scotch terrier, Scottish terrier, Scottie",
"Tibetan terrier, chrysanthemum dog",
"silky terrier, Sydney silky",
"soft-coated wheaten terrier",
"West Highland white terrier",
"Lhasa, Lhasa apso",
"flat-coated retriever",
"curly-coated retriever",
"golden retriever",
"Labrador retriever",
"Chesapeake Bay retriever",
"German short-haired pointer",
"vizsla, Hungarian pointer",
"English setter",
"Irish setter, red setter",
"Gordon setter",
"Brittany spaniel",
"clumber, clumber spaniel",
"English springer, English springer spaniel",
"Welsh springer spaniel",
"cocker spaniel, English cocker spaniel, cocker",
"Sussex spaniel",
"Irish water spaniel",
"kuvasz",
"schipperke",
"groenendael",
"malinois",
"briard",
"kelpie",
"komondor",
"Old English sheepdog, bobtail",
"Shetland sheepdog, Shetland sheep dog, Shetland",
"collie",
"Border collie",
"Bouvier des Flandres, Bouviers des Flandres",
"Rottweiler",
"German shepherd, German shepherd dog, German police dog, alsatian",
"Doberman, Doberman pinscher",
"miniature pinscher",
"Greater Swiss Mountain dog",
"Bernese mountain dog",
"Appenzeller",
"EntleBucher",
"boxer",
"bull mastiff",
"Tibetan mastiff",
"French bulldog",
"Great Dane",
"Saint Bernard, St Bernard",
"Eskimo dog, husky",
"malamute, malemute, Alaskan malamute",
"Siberian husky",
"dalmatian, coach dog, carriage dog",
"affenpinscher, monkey pinscher, monkey dog",
"basenji",
"pug, pug-dog",
"Leonberg",
"Newfoundland, Newfoundland dog",
"Great Pyrenees",
"Samoyed, Samoyede",
"Pomeranian",
"chow, chow chow",
"keeshond",
"Brabancon griffon",
"Pembroke, Pembroke Welsh corgi",
"Cardigan, Cardigan Welsh corgi",
"toy poodle",
"miniature poodle",
"standard poodle",
"Mexican hairless",
"timber wolf, grey wolf, gray wolf, Canis lupus",
"white wolf, Arctic wolf, Canis lupus tundrarum",
"red wolf, maned wolf, Canis rufus, Canis niger",
"coyote, prairie wolf, brush wolf, Canis latrans",
"dingo, warrigal, warragal, Canis dingo",
"dhole, Cuon alpinus",
"African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus",
"hyena, hyaena",
"red fox, Vulpes vulpes",
"kit fox, Vulpes macrotis",
"Arctic fox, white fox, Alopex lagopus",
"grey fox, gray fox, Urocyon cinereoargenteus",
"tabby, tabby cat",
"tiger cat",
"Persian cat",
"Siamese cat, Siamese",
"Egyptian cat",
"cougar, puma, catamount, mountain lion, painter, panther, Felis concolor", # noqa: E501
"lynx, catamount",
"leopard, Panthera pardus",
"snow leopard, ounce, Panthera uncia",
"jaguar, panther, Panthera onca, Felis onca",
"lion, king of beasts, Panthera leo",
"tiger, Panthera tigris",
"cheetah, chetah, Acinonyx jubatus",
"brown bear, bruin, Ursus arctos",
"American black bear, black bear, Ursus americanus, Euarctos americanus", # noqa: E501
"ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus",
"sloth bear, Melursus ursinus, Ursus ursinus",
"mongoose",
"meerkat, mierkat",
"tiger beetle",
"ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle",
"ground beetle, carabid beetle",
"long-horned beetle, longicorn, longicorn beetle",
"leaf beetle, chrysomelid",
"dung beetle",
"rhinoceros beetle",
"weevil",
"fly",
"bee",
"ant, emmet, pismire",
"grasshopper, hopper",
"cricket",
"walking stick, walkingstick, stick insect",
"cockroach, roach",
"mantis, mantid",
"cicada, cicala",
"leafhopper",
"lacewing, lacewing fly",
"dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", # noqa: E501
"damselfly",
"admiral",
"ringlet, ringlet butterfly",
"monarch, monarch butterfly, milkweed butterfly, Danaus plexippus",
"cabbage butterfly",
"sulphur butterfly, sulfur butterfly",
"lycaenid, lycaenid butterfly",
"starfish, sea star",
"sea urchin",
"sea cucumber, holothurian",
"wood rabbit, cottontail, cottontail rabbit",
"hare",
"Angora, Angora rabbit",
"hamster",
"porcupine, hedgehog",
"fox squirrel, eastern fox squirrel, Sciurus niger",
"marmot",
"beaver",
"guinea pig, Cavia cobaya",
"sorrel",
"zebra",
"hog, pig, grunter, squealer, Sus scrofa",
"wild boar, boar, Sus scrofa",
"warthog",
"hippopotamus, hippo, river horse, Hippopotamus amphibius",
"ox",
"water buffalo, water ox, Asiatic buffalo, Bubalus bubalis",
"bison",
"ram, tup",
"bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis", # noqa: E501
"ibex, Capra ibex",
"hartebeest",
"impala, Aepyceros melampus",
"gazelle",
"Arabian camel, dromedary, Camelus dromedarius",
"llama",
"weasel",
"mink",
"polecat, fitch, foulmart, foumart, Mustela putorius",
"black-footed ferret, ferret, Mustela nigripes",
"otter",
"skunk, polecat, wood pussy",
"badger",
"armadillo",
"three-toed sloth, ai, Bradypus tridactylus",
"orangutan, orang, orangutang, Pongo pygmaeus",
"gorilla, Gorilla gorilla",
"chimpanzee, chimp, Pan troglodytes",
"gibbon, Hylobates lar",
"siamang, Hylobates syndactylus, Symphalangus syndactylus",
"guenon, guenon monkey",
"patas, hussar monkey, Erythrocebus patas",
"baboon",
"macaque",
"langur",
"colobus, colobus monkey",
"proboscis monkey, Nasalis larvatus",
"marmoset",
"capuchin, ringtail, Cebus capucinus",
"howler monkey, howler",
"titi, titi monkey",
"spider monkey, Ateles geoffroyi",
"squirrel monkey, Saimiri sciureus",
"Madagascar cat, ring-tailed lemur, Lemur catta",
"indri, indris, Indri indri, Indri brevicaudatus",
"Indian elephant, Elephas maximus",
"African elephant, Loxodonta africana",
"lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens",
"giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca",
"barracouta, snoek",
"eel",
"coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch", # noqa: E501
"rock beauty, Holocanthus tricolor",
"anemone fish",
"sturgeon",
"gar, garfish, garpike, billfish, Lepisosteus osseus",
"lionfish",
"puffer, pufferfish, blowfish, globefish",
"abacus",
"abaya",
"academic gown, academic robe, judge's robe",
"accordion, piano accordion, squeeze box",
"acoustic guitar",
"aircraft carrier, carrier, flattop, attack aircraft carrier",
"airliner",
"airship, dirigible",
"altar",
"ambulance",
"amphibian, amphibious vehicle",
"analog clock",
"apiary, bee house",
"apron",
"ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin", # noqa: E501
"assault rifle, assault gun",
"backpack, back pack, knapsack, packsack, rucksack, haversack",
"bakery, bakeshop, bakehouse",
"balance beam, beam",
"balloon",
"ballpoint, ballpoint pen, ballpen, Biro",
"Band Aid",
"banjo",
"bannister, banister, balustrade, balusters, handrail",
"barbell",
"barber chair",
"barbershop",
"barn",
"barometer",
"barrel, cask",
"barrow, garden cart, lawn cart, wheelbarrow",
"baseball",
"basketball",
"bassinet",
"bassoon",
"bathing cap, swimming cap",
"bath towel",
"bathtub, bathing tub, bath, tub",
"beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon", # noqa: E501
"beacon, lighthouse, beacon light, pharos",
"beaker",
"bearskin, busby, shako",
"beer bottle",
"beer glass",
"bell cote, bell cot",
"bib",
"bicycle-built-for-two, tandem bicycle, tandem",
"bikini, two-piece",
"binder, ring-binder",
"binoculars, field glasses, opera glasses",
"birdhouse",
"boathouse",
"bobsled, bobsleigh, bob",
"bolo tie, bolo, bola tie, bola",
"bonnet, poke bonnet",
"bookcase",
"bookshop, bookstore, bookstall",
"bottlecap",
"bow",
"bow tie, bow-tie, bowtie",
"brass, memorial tablet, plaque",
"brassiere, bra, bandeau",
"breakwater, groin, groyne, mole, bulwark, seawall, jetty",
"breastplate, aegis, egis",
"broom",
"bucket, pail",
"buckle",
"bulletproof vest",
"bullet train, bullet",
"butcher shop, meat market",
"cab, hack, taxi, taxicab",
"caldron, cauldron",
"candle, taper, wax light",
"cannon",
"canoe",
"can opener, tin opener",
"cardigan",
"car mirror",
"carousel, carrousel, merry-go-round, roundabout, whirligig",
"carpenter's kit, tool kit",
"carton",
"car wheel",
"cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM", # noqa: E501
"cassette",
"cassette player",
"castle",
"catamaran",
"CD player",
"cello, violoncello",
"cellular telephone, cellular phone, cellphone, cell, mobile phone",
"chain",
"chainlink fence",
"chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour", # noqa: E501
"chain saw, chainsaw",
"chest",
"chiffonier, commode",
"chime, bell, gong",
"china cabinet, china closet",
"Christmas stocking",
"church, church building",
"cinema, movie theater, movie theatre, movie house, picture palace",
"cleaver, meat cleaver, chopper",
"cliff dwelling",
"cloak",
"clog, geta, patten, sabot",
"cocktail shaker",
"coffee mug",
"coffeepot",
"coil, spiral, volute, whorl, helix",
"combination lock",
"computer keyboard, keypad",
"confectionery, confectionary, candy store",
"container ship, containership, container vessel",
"convertible",
"corkscrew, bottle screw",
"cornet, horn, trumpet, trump",
"cowboy boot",
"cowboy hat, ten-gallon hat",
"cradle",
"crane",
"crash helmet",
"crate",
"crib, cot",
"Crock Pot",
"croquet ball",
"crutch",
"cuirass",
"dam, dike, dyke",
"desk",
"desktop computer",
"dial telephone, dial phone",
"diaper, nappy, napkin",
"digital clock",
"digital watch",
"dining table, board",
"dishrag, dishcloth",
"dishwasher, dish washer, dishwashing machine",
"disk brake, disc brake",
"dock, dockage, docking facility",
"dogsled, dog sled, dog sleigh",
"dome",
"doormat, welcome mat",
"drilling platform, offshore rig",
"drum, membranophone, tympan",
"drumstick",
"dumbbell",
"Dutch oven",
"electric fan, blower",
"electric guitar",
"electric locomotive",
"entertainment center",
"envelope",
"espresso maker",
"face powder",
"feather boa, boa",
"file, file cabinet, filing cabinet",
"fireboat",
"fire engine, fire truck",
"fire screen, fireguard",
"flagpole, flagstaff",
"flute, transverse flute",
"folding chair",
"football helmet",
"forklift",
"fountain",
"fountain pen",
"four-poster",
"freight car",
"French horn, horn",
"frying pan, frypan, skillet",
"fur coat",
"garbage truck, dustcart",
"gasmask, respirator, gas helmet",
"gas pump, gasoline pump, petrol pump, island dispenser",
"goblet",
"go-kart",
"golf ball",
"golfcart, golf cart",
"gondola",
"gong, tam-tam",
"gown",
"grand piano, grand",
"greenhouse, nursery, glasshouse",
"grille, radiator grille",
"grocery store, grocery, food market, market",
"guillotine",
"hair slide",
"hair spray",
"half track",
"hammer",
"hamper",
"hand blower, blow dryer, blow drier, hair dryer, hair drier",
"hand-held computer, hand-held microcomputer",
"handkerchief, hankie, hanky, hankey",
"hard disc, hard disk, fixed disk",
"harmonica, mouth organ, harp, mouth harp",
"harp",
"harvester, reaper",
"hatchet",
"holster",
"home theater, home theatre",
"honeycomb",
"hook, claw",
"hoopskirt, crinoline",
"horizontal bar, high bar",
"horse cart, horse-cart",
"hourglass",
"iPod",
"iron, smoothing iron",
"jack-o'-lantern",
"jean, blue jean, denim",
"jeep, landrover",
"jersey, T-shirt, tee shirt",
"jigsaw puzzle",
"jinrikisha, ricksha, rickshaw",
"joystick",
"kimono",
"knee pad",
"knot",
"lab coat, laboratory coat",
"ladle",
"lampshade, lamp shade",
"laptop, laptop computer",
"lawn mower, mower",
"lens cap, lens cover",
"letter opener, paper knife, paperknife",
"library",
"lifeboat",
"lighter, light, igniter, ignitor",
"limousine, limo",
"liner, ocean liner",
"lipstick, lip rouge",
"Loafer",
"lotion",
"loudspeaker, speaker, speaker unit, loudspeaker system, speaker system", # noqa: E501
"loupe, jeweler's loupe",
"lumbermill, sawmill",
"magnetic compass",
"mailbag, postbag",
"mailbox, letter box",
"maillot",
"maillot, tank suit",
"manhole cover",
"maraca",
"marimba, xylophone",
"mask",
"matchstick",
"maypole",
"maze, labyrinth",
"measuring cup",
"medicine chest, medicine cabinet",
"megalith, megalithic structure",
"microphone, mike",
"microwave, microwave oven",
"military uniform",
"milk can",
"minibus",
"miniskirt, mini",
"minivan",
"missile",
"mitten",
"mixing bowl",
"mobile home, manufactured home",
"Model T",
"modem",
"monastery",
"monitor",
"moped",
"mortar",
"mortarboard",
"mosque",
"mosquito net",
"motor scooter, scooter",
"mountain bike, all-terrain bike, off-roader",
"mountain tent",
"mouse, computer mouse",
"mousetrap",
"moving van",
"muzzle",
"nail",
"neck brace",
"necklace",
"nipple",
"notebook, notebook computer",
"obelisk",
"oboe, hautboy, hautbois",
"ocarina, sweet potato",
"odometer, hodometer, mileometer, milometer",
"oil filter",
"organ, pipe organ",
"oscilloscope, scope, cathode-ray oscilloscope, CRO",
"overskirt",
"oxcart",
"oxygen mask",
"packet",
"paddle, boat paddle",
"paddlewheel, paddle wheel",
"padlock",
"paintbrush",
"pajama, pyjama, pj's, jammies",
"palace",
"panpipe, pandean pipe, syrinx",
"paper towel",
"parachute, chute",
"parallel bars, bars",
"park bench",
"parking meter",
"passenger car, coach, carriage",
"patio, terrace",
"pay-phone, pay-station",
"pedestal, plinth, footstall",
"pencil box, pencil case",
"pencil sharpener",
"perfume, essence",
"Petri dish",
"photocopier",
"pick, plectrum, plectron",
"pickelhaube",
"picket fence, paling",
"pickup, pickup truck",
"pier",
"piggy bank, penny bank",
"pill bottle",
"pillow",
"ping-pong ball",
"pinwheel",
"pirate, pirate ship",
"pitcher, ewer",
"plane, carpenter's plane, woodworking plane",
"planetarium",
"plastic bag",
"plate rack",
"plow, plough",
"plunger, plumber's helper",
"Polaroid camera, Polaroid Land camera",
"pole",
"police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria", # noqa: E501
"poncho",
"pool table, billiard table, snooker table",
"pop bottle, soda bottle",
"pot, flowerpot",
"potter's wheel",
"power drill",
"prayer rug, prayer mat",
"printer",
"prison, prison house",
"projectile, missile",
"projector",
"puck, hockey puck",
"punching bag, punch bag, punching ball, punchball",
"purse",
"quill, quill pen",
"quilt, comforter, comfort, puff",
"racer, race car, racing car",
"racket, racquet",
"radiator",
"radio, wireless",
"radio telescope, radio reflector",
"rain barrel",
"recreational vehicle, RV, R.V.",
"reel",
"reflex camera",
"refrigerator, icebox",
"remote control, remote",
"restaurant, eating house, eating place, eatery",
"revolver, six-gun, six-shooter",
"rifle",
"rocking chair, rocker",
"rotisserie",
"rubber eraser, rubber, pencil eraser",
"rugby ball",
"rule, ruler",
"running shoe",
"safe",
"safety pin",
"saltshaker, salt shaker",
"sandal",
"sarong",
"sax, saxophone",
"scabbard",
"scale, weighing machine",
"school bus",
"schooner",
"scoreboard",
"screen, CRT screen",
"screw",
"screwdriver",
"seat belt, seatbelt",
"sewing machine",
"shield, buckler",
"shoe shop, shoe-shop, shoe store",
"shoji",
"shopping basket",
"shopping cart",
"shovel",
"shower cap",
"shower curtain",
"ski",
"ski mask",
"sleeping bag",
"slide rule, slipstick",
"sliding door",
"slot, one-armed bandit",
"snorkel",
"snowmobile",
"snowplow, snowplough",
"soap dispenser",
"soccer ball",
"sock",
"solar dish, solar collector, solar furnace",
"sombrero",
"soup bowl",
"space bar",
"space heater",
"space shuttle",
"spatula",
"speedboat",
"spider web, spider's web",
"spindle",
"sports car, sport car",
"spotlight, spot",
"stage",
"steam locomotive",
"steel arch bridge",
"steel drum",
"stethoscope",
"stole",
"stone wall",
"stopwatch, stop watch",
"stove",
"strainer",
"streetcar, tram, tramcar, trolley, trolley car",
"stretcher",
"studio couch, day bed",
"stupa, tope",
"submarine, pigboat, sub, U-boat",
"suit, suit of clothes",
"sundial",
"sunglass",
"sunglasses, dark glasses, shades",
"sunscreen, sunblock, sun blocker",
"suspension bridge",
"swab, swob, mop",
"sweatshirt",
"swimming trunks, bathing trunks",
"swing",
"switch, electric switch, electrical switch",
"syringe",
"table lamp",
"tank, army tank, armored combat vehicle, armoured combat vehicle",
"tape player",
"teapot",
"teddy, teddy bear",
"television, television system",
"tennis ball",
"thatch, thatched roof",
"theater curtain, theatre curtain",
"thimble",
"thresher, thrasher, threshing machine",
"throne",
"tile roof",
"toaster",
"tobacco shop, tobacconist shop, tobacconist",
"toilet seat",
"torch",
"totem pole",
"tow truck, tow car, wrecker",
"toyshop",
"tractor",
"trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi", # noqa: E501
"tray",
"trench coat",
"tricycle, trike, velocipede",
"trimaran",
"tripod",
"triumphal arch",
"trolleybus, trolley coach, trackless trolley",
"trombone",
"tub, vat",
"turnstile",
"typewriter keyboard",
"umbrella",
"unicycle, monocycle",
"upright, upright piano",
"vacuum, vacuum cleaner",
"vase",
"vault",
"velvet",
"vending machine",
"vestment",
"viaduct",
"violin, fiddle",
"volleyball",
"waffle iron",
"wall clock",
"wallet, billfold, notecase, pocketbook",
"wardrobe, closet, press",
"warplane, military plane",
"washbasin, handbasin, washbowl, lavabo, wash-hand basin",
"washer, automatic washer, washing machine",
"water bottle",
"water jug",
"water tower",
"whiskey jug",
"whistle",
"wig",
"window screen",
"window shade",
"Windsor tie",
"wine bottle",
"wing",
"wok",
"wooden spoon",
"wool, woolen, woollen",
"worm fence, snake fence, snake-rail fence, Virginia fence",
"wreck",
"yawl",
"yurt",
"web site, website, internet site, site",
"comic book",
"crossword puzzle, crossword",
"street sign",
"traffic light, traffic signal, stoplight",
"book jacket, dust cover, dust jacket, dust wrapper",
"menu",
"plate",
"guacamole",
"consomme",
"hot pot, hotpot",
"trifle",
"ice cream, icecream",
"ice lolly, lolly, lollipop, popsicle",
"French loaf",
"bagel, beigel",
"pretzel",
"cheeseburger",
"hotdog, hot dog, red hot",
"mashed potato",
"head cabbage",
"broccoli",
"cauliflower",
"zucchini, courgette",
"spaghetti squash",
"acorn squash",
"butternut squash",
"cucumber, cuke",
"artichoke, globe artichoke",
"bell pepper",
"cardoon",
"mushroom",
"Granny Smith",
"strawberry",
"orange",
"lemon",
"fig",
"pineapple, ananas",
"banana",
"jackfruit, jak, jack",
"custard apple",
"pomegranate",
"hay",
"carbonara",
"chocolate sauce, chocolate syrup",
"dough",
"meat loaf, meatloaf",
"pizza, pizza pie",
"potpie",
"burrito",
"red wine",
"espresso",
"cup",
"eggnog",
"alp",
"bubble",
"cliff, drop, drop-off",
"coral reef",
"geyser",
"lakeside, lakeshore",
"promontory, headland, head, foreland",
"sandbar, sand bar",
"seashore, coast, seacoast, sea-coast",
"valley, vale",
"volcano",
"ballplayer, baseball player",
"groom, bridegroom",
"scuba diver",
"rapeseed",
"daisy",
"yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum", # noqa: E501
"corn",
"acorn",
"hip, rose hip, rosehip",
"buckeye, horse chestnut, conker",
"coral fungus",
"agaric",
"gyromitra",
"stinkhorn, carrion fungus",
"earthstar",
"hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa", # noqa: E501
"bolete",
"ear, spike, capitulum",
"toilet tissue, toilet paper, bathroom tissue",
]
| 29,033 | 27.947159 | 142 | py |
libai | libai-main/projects/mock_transformers/dist_infer_opt.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import init_env # noqa
import oneflow as flow
from omegaconf import DictConfig
from oneflow.utils.global_view import global_mode
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.models.opt import modeling_opt
from libai.layers import Linear
from libai.utils import distributed as dist
# ------replace attention to libai------
temp_class = modeling_opt.OPTAttention
class LiBaiOPTAttention(temp_class):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
embed_dim = kwargs["embed_dim"]
bias = kwargs["bias"]
self.k_proj = Linear(embed_dim, embed_dim, bias=bias, parallel="col", dtype=flow.float16)
self.v_proj = Linear(embed_dim, embed_dim, bias=bias, parallel="col", dtype=flow.float16)
self.q_proj = Linear(embed_dim, embed_dim, bias=bias, parallel="col", dtype=flow.float16)
self.out_proj = Linear(embed_dim, embed_dim, bias=bias, parallel="row", dtype=flow.float16)
modeling_opt.OPTAttention = LiBaiOPTAttention
# ----------replace Decoder to libai -----
temp_class = modeling_opt.OPTDecoderLayer
class LiBaiOPTDecoderLayer(temp_class):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
config = args[0]
self.fc1 = Linear(
self.embed_dim,
config.ffn_dim,
bias=config.enable_bias,
parallel="col",
dtype=flow.float16,
)
self.fc2 = Linear(
config.ffn_dim,
self.embed_dim,
bias=config.enable_bias,
parallel="row",
dtype=flow.float16,
)
modeling_opt.OPTDecoderLayer = LiBaiOPTDecoderLayer
if __name__ == "__main__":
# set dist config
parallel_config = DictConfig(
dict(
data_parallel_size=1,
tensor_parallel_size=2,
pipeline_parallel_size=1, # set to 1, unsupport pipeline parallel now
pipeline_num_layers=None,
device_type="cpu",
)
)
dist.setup_dist_util(parallel_config)
# initial and load model
model = AutoModelForCausalLM.from_pretrained("facebook/opt-2.7b", torch_dtype=flow.float16)
# set model to cuda
dist.set_device_type("cuda")
model._apply(dist.convert_to_distributed_default_setting)
# initial tokenizer
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-2.7b", use_fast=False)
# get input_ids
prompt = "Hello, I'm am conscious and"
input_ids = tokenizer(prompt, return_tensors="np").input_ids
input_ids = flow.from_numpy(input_ids)
input_ids = input_ids.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
# generate id
placement_sbp_dict = dict(
placement=flow.env.all_device_placement("cuda"),
sbp=flow.sbp.broadcast,
)
with global_mode(True, **placement_sbp_dict):
generated_ids = model.generate(input_ids, max_length=30)
out_put_ids = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
if dist.is_main_process():
print(out_put_ids)
| 3,789 | 32.839286 | 99 | py |
libai | libai-main/projects/mock_transformers/dist_infer_llama.py | # coding=utf-8
# Copyright 2021 The Sugon Authors. All rights reserved.
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import init_env # noqa
import oneflow as flow
from omegaconf import DictConfig
from oneflow.utils.global_view import global_mode
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.models.llama import modeling_llama
from libai.layers import Linear
from libai.utils import distributed as dist
# ------replace attention to libai------
temp_class = modeling_llama.LlamaAttention
class LiBaiLlamaAttention(temp_class):
def __init__(self, config):
super().__init__(config)
self.q_proj = Linear(
self.hidden_size,
self.num_heads * self.head_dim,
bias=False,
parallel="col",
dtype=flow.float16,
)
self.k_proj = Linear(
self.hidden_size,
self.num_heads * self.head_dim,
bias=False,
parallel="col",
dtype=flow.float16,
)
self.v_proj = Linear(
self.hidden_size,
self.num_heads * self.head_dim,
bias=False,
parallel="col",
dtype=flow.float16,
)
self.o_proj = Linear(
self.num_heads * self.head_dim,
self.hidden_size,
bias=False,
parallel="row",
dtype=flow.float16,
)
modeling_llama.LlamaAttention = LiBaiLlamaAttention
# ----------replace mlp to libai -----
temp_class = modeling_llama.LlamaMLP
class LiBaiLlamaMLP(temp_class):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
hidden_size = kwargs["hidden_size"]
intermediate_size = kwargs["intermediate_size"]
self.gate_proj = Linear(
hidden_size, intermediate_size, bias=False, parallel="col", dtype=flow.float16
)
self.down_proj = Linear(
intermediate_size, hidden_size, bias=False, parallel="col", dtype=flow.float16
)
self.up_proj = Linear(
hidden_size, intermediate_size, bias=False, parallel="row", dtype=flow.float16
)
modeling_llama.LlamaMLP = LiBaiLlamaMLP
if __name__ == "__main__":
# set dist config
parallel_config = DictConfig(
dict(
data_parallel_size=1,
tensor_parallel_size=4,
pipeline_parallel_size=1, # set to 1, unsupport pipeline parallel now
pipeline_num_layers=None,
device_type="cpu",
)
)
dist.setup_dist_util(parallel_config)
# initial and load model
model = AutoModelForCausalLM.from_pretrained(
"decapoda-research/llama-13b-hf", torch_dtype=flow.float16
)
# set model to cuda
dist.set_device_type("cuda")
model._apply(dist.convert_to_distributed_default_setting)
# initial tokenizer
tokenizer = AutoTokenizer.from_pretrained("decapoda-research/llama-13b-hf", use_fast=False)
# get input_ids
prompt = "Hello, I'm am conscious and"
input_ids = tokenizer(prompt, return_tensors="np").input_ids
input_ids = flow.from_numpy(input_ids)
input_ids = input_ids.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
# generate id
placement_sbp_dict = dict(
placement=flow.env.all_device_placement("cuda"),
sbp=flow.sbp.broadcast,
)
with global_mode(True, **placement_sbp_dict):
generated_ids = model.generate(input_ids, max_length=30)
out_put_ids = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
if dist.is_main_process():
print(out_put_ids)
| 4,277 | 31.656489 | 95 | py |
libai | libai-main/projects/mock_transformers/init_env.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------mock torch, put it in the first line-----------
import oneflow as flow
flow.mock_torch.enable(lazy=True)
from oneflow import Tensor, nn # noqa
from transformers import modeling_utils # noqa
from transformers.modeling_utils import _load_state_dict_into_model # noqa
# ---------------- mock _load_state_dict_into_model ------------------
def new_load(model_to_load, state_dict, start_prefix):
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
error_msgs = []
# TODO: add start_prefix judgement
for k, v in model_to_load.state_dict().items():
if k in state_dict and v.is_global:
state_dict[k] = state_dict[k].to_global(
sbp=flow.sbp.broadcast, placement=flow.env.all_device_placement("cpu")
)
state_dict[k] = state_dict[k].to_global(
sbp=v.sbp,
placement=flow.placement("cpu", ranks=list(v.placement.ranks)),
)
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module: nn.Module, state_dict, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
args = (state_dict, prefix, local_metadata, True, [], [], error_msgs)
# Parameters of module and children will start with prefix.
# We can exit early if there are none in this state_dict
if len([key for key in state_dict if key.startswith(prefix)]) > 0:
module._load_from_state_dict(*args)
for name, child in module._modules.items():
if child is not None:
load(child, state_dict, prefix + name + ".")
load(model_to_load, state_dict, prefix=start_prefix)
# Delete `state_dict` so it could be collected by GC earlier.
# Note that `state_dict` is a copy of the argument, so it's safe to delete it.
del state_dict
return error_msgs
modeling_utils._load_state_dict_into_model = new_load
# -----------------mock tensor.new_ones() -------------
def flow_ones(self, *args, **kwargs):
return flow.ones(*args, **kwargs, device=self.device, dtype=self.dtype)
Tensor.new_ones = flow_ones
# -----------------mock tensor.new() ------------------
def flow_zeros(self, *args, **kwargs):
return flow.zeros(*args, **kwargs, device=self.device, dtype=self.dtype)
Tensor.new = flow_zeros
# ------------------mock nn.functional.softmax---------
temp_func = nn.functional.softmax
def flow_softmax(*args, **kwargs):
if "dtype" in kwargs:
_tensor = args[0].to(dtype=kwargs.pop("dtype"))
return temp_func(_tensor, *args[1:], **kwargs)
else:
return temp_func(*args, **kwargs)
nn.functional.softmax = flow_softmax
# -----------------mock flow.tensor---------------
temp_tensor_func = flow.tensor
def flow_tensor(input_x, **kwargs):
if isinstance(input_x, (int, float)):
return input_x
else:
return temp_tensor_func(input_x, **kwargs)
flow.tensor = flow_tensor
| 4,360 | 33.338583 | 90 | py |
libai | libai-main/projects/mock_transformers/dist_infer_gpt.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import init_env # noqa
import oneflow as flow
from omegaconf import DictConfig
from oneflow.utils.global_view import global_mode
from transformers import AutoModelForCausalLM, AutoTokenizer, pytorch_utils
from transformers.models.gpt2 import modeling_gpt2
from libai.layers import Conv1D
from libai.utils import distributed as dist
# ------replace Conv1D to libai------
class LiBaiConv1d(Conv1D):
def __init__(
self,
nf,
nx,
bias=True,
parallel="data",
init_method=flow.nn.init.xavier_normal_,
skip_bias_add=False,
dtype=flow.float32,
layer_idx=0,
):
super().__init__(
in_features=nx,
out_features=nf,
bias=bias,
parallel=parallel,
init_method=init_method,
skip_bias_add=skip_bias_add,
dtype=dtype,
layer_idx=layer_idx,
)
pytorch_utils.Conv1D = LiBaiConv1d
# ------replace attention to libai------
temp_class = modeling_gpt2.GPT2Attention
class LiBaiGPT2Attention(temp_class):
def __init__(self, config, is_cross_attention=False, layer_idx=None):
super().__init__(config, is_cross_attention=is_cross_attention, layer_idx=layer_idx)
if is_cross_attention:
self.c_attn = Conv1D(
in_features=self.embed_dim,
out_features=2 * self.embed_dim,
parallel="col",
dtype=flow.float16,
)
self.q_attn = Conv1D(
in_features=self.embed_dim,
out_features=self.embed_dim,
parallel="col",
dtype=flow.float16,
)
else:
self.c_attn = Conv1D(
in_features=self.embed_dim,
out_features=3 * self.embed_dim,
parallel="col",
dtype=flow.float16,
)
self.c_proj = Conv1D(
in_features=self.embed_dim,
out_features=self.embed_dim,
parallel="row",
dtype=flow.float16,
)
modeling_gpt2.GPT2Attention = LiBaiGPT2Attention
# ------replace mlp to libai------
temp_class = modeling_gpt2.GPT2MLP
class LiBaiGPT2MLP(temp_class):
def __init__(self, intermediate_size, config):
super().__init__(intermediate_size, config)
embed_dim = config.hidden_size
self.c_fc = Conv1D(
in_features=embed_dim,
out_features=intermediate_size,
parallel="col",
dtype=flow.float16,
)
self.c_proj = Conv1D(
in_features=intermediate_size,
out_features=embed_dim,
parallel="row",
dtype=flow.float16,
)
if __name__ == "__main__":
# set dist config
parallel_config = DictConfig(
dict(
data_parallel_size=1,
tensor_parallel_size=2,
pipeline_parallel_size=1, # set to 1, unsupport pipeline parallel now
pipeline_num_layers=None,
device_type="cpu",
)
)
dist.setup_dist_util(parallel_config)
# initial and load model
model = AutoModelForCausalLM.from_pretrained("gpt2", torch_dtype=flow.float16)
# set model to cuda
dist.set_device_type("cuda")
model._apply(dist.convert_to_distributed_default_setting)
# initial tokenizer
tokenizer = AutoTokenizer.from_pretrained("gpt2", use_fast=False)
# get input_ids
prompt = "Hello, I'm a language model,"
input_ids = tokenizer(prompt, return_tensors="np").input_ids
input_ids = flow.from_numpy(input_ids)
input_ids = input_ids.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
# generate id
placement_sbp_dict = dict(
placement=flow.env.all_device_placement("cuda"),
sbp=flow.sbp.broadcast,
)
with global_mode(True, **placement_sbp_dict):
generated_ids = model.generate(input_ids, max_length=30)
out_put_ids = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
if dist.is_main_process():
print(out_put_ids)
| 4,812 | 29.656051 | 92 | py |
libai | libai-main/projects/mock_transformers/dist_infer_bloom.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import init_env # noqa
import oneflow as flow
from omegaconf import DictConfig
from oneflow.utils.global_view import global_mode
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.models.bloom import modeling_bloom
from libai.layers import Embedding, Linear
from libai.utils import distributed as dist
# ------replace attention to libai------
temp_class = modeling_bloom.BloomAttention
class LiBaiBloomAttention(temp_class):
def __init__(self, config):
super().__init__(config)
hidden_size = config.hidden_size
self.query_key_value = Linear(
hidden_size, 3 * hidden_size, bias=True, parallel="col", dtype=flow.float16
)
self.dense = Linear(hidden_size, hidden_size, bias=True, parallel="row", dtype=flow.float16)
modeling_bloom.BloomAttention = LiBaiBloomAttention
# ----------replace Decoder to libai -----
temp_class = modeling_bloom.BloomMLP
class LiBaiBloomMLP(temp_class):
def __init__(self, config):
super().__init__(config)
hidden_size = config.hidden_size
self.dense_h_to_4h = Linear(
hidden_size, 4 * hidden_size, bias=True, parallel="col", dtype=flow.float16
)
self.dense_4h_to_h = Linear(
4 * hidden_size, hidden_size, bias=True, parallel="row", dtype=flow.float16
)
modeling_bloom.BloomMLP = LiBaiBloomMLP
# ----------replace Embedding to libai -----
temp_class = modeling_bloom.BloomModel
class LiBaiBloomModel(temp_class):
def __init__(self, config):
super().__init__(config)
self.word_embeddings = Embedding(config.vocab_size, self.embed_dim, dtype=flow.float16)
modeling_bloom.BloomModel = LiBaiBloomModel
if __name__ == "__main__":
# set dist config
parallel_config = DictConfig(
dict(
data_parallel_size=1,
tensor_parallel_size=2,
pipeline_parallel_size=1, # set to 1, unsupport pipeline parallel now
pipeline_num_layers=None,
device_type="cpu",
)
)
dist.setup_dist_util(parallel_config)
# initial and load model
model = AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m", torch_dtype=flow.float16)
# set model to cuda
dist.set_device_type("cuda")
model._apply(dist.convert_to_distributed_default_setting)
# initial tokenizer
tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m", use_fast=False)
# get input_ids
prompt = "Hello, I'm am conscious and"
input_ids = tokenizer(prompt, return_tensors="np").input_ids
input_ids = flow.from_numpy(input_ids)
input_ids = input_ids.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
# generate id
placement_sbp_dict = dict(
placement=flow.env.all_device_placement("cuda"),
sbp=flow.sbp.broadcast,
)
with global_mode(True, **placement_sbp_dict):
generated_ids = model.generate(input_ids, max_length=30)
out_put_ids = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
if dist.is_main_process():
print(out_put_ids)
| 3,820 | 30.578512 | 100 | py |
libai | libai-main/projects/mock_transformers/mock_tokenization.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import oneflow as flow
from libai.utils import distributed as dist
flow.mock_torch.enable()
from transformers import BertTokenizer, GPT2Tokenizer, MT5Tokenizer, T5Tokenizer # noqa
from transformers.tokenization_utils_base import * # noqa
from transformers.utils import generic # noqa
from transformers.utils.generic import TensorType # noqa
# ---------------- mock TensorType ------------------
class TensorType(ExplicitEnum): # noqa
PYTORCH = "pt"
TENSORFLOW = "tf"
ONEFLOW = "of"
NUMPY = "np"
JAX = "jax"
generic.TensorType = TensorType
# ---------------- mock convert_to_tensors ------------------
def flow_convert_to_tensors(self, tensor_type=None, prepend_batch_axis=False):
if tensor_type is None:
return self
# Convert to TensorType
if not isinstance(tensor_type, TensorType):
tensor_type = TensorType(tensor_type)
as_tensor = None
is_tensor = None
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available(): # noqa
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not "
"installed."
)
import tensorflow as tf
as_tensor = tf.constant
is_tensor = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available(): # noqa
raise ImportError(
"Unable to convert output to PyTorch tensors format, PyTorch is not installed."
)
import torch
as_tensor = torch.tensor
is_tensor = torch.is_tensor
elif tensor_type == TensorType.ONEFLOW:
try:
import oneflow # noqa
except ImportError as e:
msg = "Unable to convert output to OneFlow tensors format, OneFlow is not installed."
raise ImportError(msg) from e
as_tensor = flow.tensor
is_tensor = flow.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available(): # noqa
raise ImportError(
"Unable to convert output to JAX tensors format, JAX is not installed."
)
import jax.numpy as jnp # noqa: F811
as_tensor = jnp.array
is_tensor = is_jax_tensor # noqa
else:
as_tensor = np.asarray # noqa
is_tensor = is_numpy_array # noqa
# Do the tensor conversion in batch
for key, value in self.items():
try:
if prepend_batch_axis:
value = [value]
if not is_tensor(value):
tensor = as_tensor(value)
# Removing this for now in favor of controlling the shape with `prepend_batch_axis`
# # at-least2d
# if tensor.ndim > 2:
# tensor = tensor.squeeze(0)
# elif tensor.ndim < 2:
# tensor = tensor[None, :]
self[key] = tensor
except Exception as e:
if key == "overflowing_tokens":
raise ValueError(
"Unable to create tensor returning overflowing tokens of different lengths. "
"Please see if a fast version of this tokenizer is available to have this "
"feature available."
) from e
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or "
"padding with 'padding=True' 'truncation=True' to have batched tensors with "
f"the same length. Perhaps your features (`{key}` in this case) have "
"excessive nesting (inputs type `list` where type `int` is expected)."
) from e
if os.getenv("IS_GLOBAL", True) is True:
size = self["input_ids"].size()
sbp = dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast])
for k, v in self.items():
if is_tensor != flow.is_tensor:
raise ValueError(
"Unable to create tensor, you should probably set `return_tensors='of'` "
)
if v.size() != size:
raise ValueError(
"Unable to create tensor, you should probably padding with `padding=True` "
)
self[k] = v.to_global(sbp=sbp, placement=dist.get_layer_placement(0))
return self
BatchEncoding.convert_to_tensors = flow_convert_to_tensors # noqa
| 5,136 | 35.432624 | 99 | py |
libai | libai-main/projects/MOCOV3/utils/load_checkpoint.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from utils.weight_convert import load_torch_checkpoint_linear_prob
from libai.utils.checkpoint import (
Checkpointer,
get_missing_parameters_message,
get_unexpected_parameters_message,
)
logger = logging.getLogger("libai." + __name__)
def load_checkpoint(model, path, weight_style, num_heads, embed_dim):
linear_keyword = "head"
for name, param in model.named_parameters():
if name not in ["%s.weight" % linear_keyword, "%s.bias" % linear_keyword]:
param.requires_grad = False
assert weight_style in ["pytorch", "oneflow"]
if weight_style == "pytorch":
params = load_torch_checkpoint_linear_prob(num_heads, embed_dim, path=path)
else:
params = Checkpointer(model).load(path)
model_state_dict = model.state_dict()
# check the incorrect shape and unexpected keys
incorrect_shapes = []
unexpected_keys = []
for k in list(params.keys()):
if k in model_state_dict:
shape_model = tuple(model_state_dict[k].shape)
shape_ckp = tuple(params[k].shape)
if shape_model != shape_ckp:
incorrect_shapes.append((k, shape_ckp, shape_model))
params.pop(k)
model_state_dict.pop(k)
else:
unexpected_keys.append(k)
missing_keys = list(model_state_dict.keys())
for k, shape_checkpoint, shape_model in incorrect_shapes:
logger.warning(
"Skip loading parameter '{}' to the model due to incompatible "
"shapes: {} in the checkpoint but {} in the "
"model! You might want to double check if this is expected.".format(
k, shape_checkpoint, shape_model
)
)
if missing_keys:
logger.info(get_missing_parameters_message(missing_keys))
if unexpected_keys:
logger.info(get_unexpected_parameters_message(unexpected_keys))
model.load_state_dict(params, strict=False)
| 2,591 | 34.506849 | 83 | py |
libai | libai-main/projects/MOCOV3/utils/weight_convert.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import oneflow as flow
import torch
logger = logging.getLogger(__name__)
def convert_qkv_weight(value, num_heads, hidden_size):
"""
convert qkv.weight to be compatible with LiBai transformer layer
Args:
cfg: config file
value: qkv.weight in the loaded checkpoint
"""
head_size = int(hidden_size / num_heads)
qkv_weight = (
value.view(3, num_heads, head_size, hidden_size)
.permute(1, 0, 2, 3)
.contiguous()
.view(hidden_size * 3, hidden_size)
)
return qkv_weight
def convert_qkv_bias(value, num_heads, hidden_size):
"""
convert qkv.bias to be compatible with LiBai transformer layer
Args:
cfg: config file
value: qkv.bias in the loaded checkpoint
"""
head_size = int(hidden_size / num_heads)
qkv_bias = (
value.view(3, num_heads, head_size).permute(1, 0, 2).contiguous().view(hidden_size * 3)
)
return qkv_bias
def filter_keys(key, value, num_heads, hidden_size):
"""Filtering the state_dict keys and values to match LiBai's MOCOV3 model"""
if "norm1" in key:
key = key.replace("norm1", "input_layernorm")
elif "attn.qkv" in key:
key = key.replace("attn.qkv", "self_attention.query_key_value")
if "weight" in key:
value = convert_qkv_weight(value, num_heads, hidden_size)
if "bias" in key:
value = convert_qkv_bias(value, num_heads, hidden_size)
elif "attn.proj" in key:
key = key.replace("attn.proj", "self_attention.dense")
elif "norm2" in key:
key = key.replace("norm2", "post_attention_layernorm")
elif "mlp.fc1" in key:
key = key.replace("mlp.fc1", "mlp.dense_h_to_4h")
elif "mlp.fc2" in key:
key = key.replace("mlp.fc2", "mlp.dense_4h_to_h")
elif "fc_norm" in key:
key = key.replace("fc_norm", "norm")
return key, value
def load_torch_checkpoint_linear_prob(
num_heads, hidden_size, path="projects/MOCOV3/output/vit-b-300ep.pth.tar", linear_keyword="head"
):
"""Load checkpoint from the given torch weights.
Torch weight from: xxx
"""
torch_dict = torch.load(path, map_location="cpu")["state_dict"]
parameters = torch_dict
new_parameters = dict()
for key, value in parameters.items():
if "num_batches_tracked" not in key:
if key.startswith("module.base_encoder") and not key.startswith(
"module.base_encoder.%s" % linear_keyword
):
# to global tensor
key, val = filter_keys(key, value, num_heads, hidden_size)
val = val.detach().cpu().numpy()
val = flow.tensor(val).to_global(
sbp=flow.sbp.broadcast, placement=flow.placement("cuda", {0: range(1)})
)
new_parameters[key[len("module.base_encoder.") :]] = val
return new_parameters
| 3,558 | 31.953704 | 100 | py |
libai | libai-main/projects/MOCOV3/modeling/vit.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------
# ViT Model
# References:
# moco-v3: https://github.com/facebookresearch/moco-v3/blob/main/vits.py
# --------------------------------------------------------
import math
from functools import reduce
from operator import mul
import oneflow as flow
import oneflow.nn as nn
from flowvision.layers.weight_init import trunc_normal_
from utils.load_checkpoint import load_checkpoint
from libai.layers import Linear, PatchEmbedding
from libai.models import vision_transformer
class VisionTransformer(vision_transformer.VisionTransformer):
"""Vision Transformer for MOCO
LiBai impl of: `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
- https://arxiv.org/abs/2010.11929
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
global_pool=False,
num_classes=1000,
loss_func=None,
linear_prob=None,
weight_style="pytorch",
stop_grad_conv1=False,
):
super(VisionTransformer, self).__init__(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
depth=depth,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=drop_path_rate,
num_classes=num_classes,
loss_func=loss_func,
)
self.global_pool = global_pool
# weight init
if linear_prob:
load_checkpoint(self, linear_prob, weight_style, num_heads, embed_dim)
self.head.weight.data.normal_(mean=0.0, std=0.01)
self.head.bias.data.zeros_()
else:
trunc_normal_(self.pos_embed, std=0.02)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
self.stop_grad_conv1 = stop_grad_conv1
self.embed_dim = embed_dim
self.initialization()
def initialization(self):
# Use fixed 2D sin-cos position embedding
self.build_2d_sincos_position_embedding()
# weight initialization
for name, m in self.named_modules():
if isinstance(m, Linear):
if "query_key_value" in name:
val = math.sqrt(6.0 / float(m.weight.shape[0] // 3 + m.weight.shape[1]))
nn.init.uniform_(m.weight, -val, val)
else:
nn.init.xavier_uniform_(m.weight)
nn.init.zeros_(m.bias)
nn.init.normal_(self.cls_token, std=1e-6)
if isinstance(self.patch_embed, PatchEmbedding):
# xavier_uniform initialization
val = math.sqrt(
6.0 / float(3 * reduce(mul, self.patch_embed.patch_size, 1) + self.embed_dim)
)
nn.init.uniform_(self.patch_embed.proj.weight, -val, val)
nn.init.zeros_(self.patch_embed.proj.bias)
if self.stop_grad_conv1:
self.patch_embed.proj.weight.requires_grad = False
self.patch_embed.proj.bias.requires_grad = False
def build_2d_sincos_position_embedding(self, temperature=10000.0):
sbp = self.pos_embed.sbp
placement = self.pos_embed.placement
h, w = self.patch_embed.grid_size
grid_w = flow.arange(w, dtype=flow.float32).to_global(sbp=sbp, placement=placement)
grid_h = flow.arange(h, dtype=flow.float32).to_global(sbp=sbp, placement=placement)
grid_w, grid_h = flow.meshgrid(grid_w, grid_h)
assert (
self.embed_dim % 4 == 0
), "Embed dimension must be divisible by 4 for 2D sin-cos position embedding"
pos_dim = self.embed_dim // 4
omega = (flow.arange(pos_dim, dtype=flow.float32) / pos_dim).to_global(
sbp=sbp, placement=placement
)
omega = 1.0 / flow.tensor(temperature).to_global(sbp=sbp, placement=placement) ** omega
out_w = flow.einsum("m,d->md", grid_w.flatten(), omega)
out_h = flow.einsum("m,d->md", grid_h.flatten(), omega)
pos_emb = flow.cat(
[flow.sin(out_w), flow.cos(out_w), flow.sin(out_h), flow.cos(out_h)], dim=1
)[None, :, :]
pe_token = flow.zeros([1, 1, self.embed_dim], dtype=flow.float32).to_global(
sbp=sbp, placement=placement
)
self.pos_embed = nn.Parameter(flow.cat([pe_token, pos_emb], dim=1))
self.pos_embed.requires_grad = False
def forward_head(self, x):
if self.global_pool:
x = x[:, 1:, :].mean(dim=1) # global pool without cls token
outcome = self.norm(x)
outcome = self.head(outcome)
else:
x = self.norm(x)
outcome = x[:, 0]
outcome = self.head(outcome)
return outcome
| 5,715 | 35.177215 | 95 | py |
libai | libai-main/projects/text_classification/modeling/load_megatron_weight.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import oneflow as flow
import torch
from libai.utils.checkpoint import get_missing_parameters_message, get_unexpected_parameters_message
logger = logging.getLogger("libai." + __name__)
def convert_tensor(tensor: torch.Tensor):
tensor = tensor.float()
return flow.Tensor(tensor.cpu().numpy())
def change_megatron_key(state_dict):
of_state_dict = {}
# Language model.
language_model = state_dict["language_model"]
# Embedding.
embedding = language_model["embedding"]
of_state_dict["embeddings.vocab_embeddings.weight"] = convert_tensor(
embedding["word_embeddings"]["weight"]
)
of_state_dict["embeddings.position_embeddings.weight"] = convert_tensor(
embedding["position_embeddings"]["weight"]
)
of_state_dict["embeddings.tokentype_embeddings.weight"] = convert_tensor(
embedding["tokentype_embeddings"]["weight"]
)
# Encoder.
encoder = language_model["encoder"]
for key, value in encoder.items():
# Change layers.0.input_layernorm.weight -> encoder.layers_0.input_layernorm.weight
key = "encoders." + key.replace("layers.", "")
if key.startswith("encoders.final_layernorm"):
key = key.replace("encoders.", "")
of_state_dict[key] = convert_tensor(value)
# Pooler.
pooler = language_model["pooler"]
of_state_dict["pooler.dense.weight"] = convert_tensor(pooler["dense.weight"])
of_state_dict["pooler.dense.bias"] = convert_tensor(pooler["dense.bias"])
# LM head.
lm_head = state_dict["lm_head"]
of_state_dict["cls.predictions.dense.weight"] = convert_tensor(lm_head["dense.weight"])
of_state_dict["cls.predictions.dense.bias"] = convert_tensor(lm_head["dense.bias"])
of_state_dict["cls.predictions.layernorm.weight"] = convert_tensor(lm_head["layernorm.weight"])
of_state_dict["cls.predictions.layernorm.bias"] = convert_tensor(lm_head["layernorm.bias"])
of_state_dict["lm_logits.bias"] = convert_tensor(lm_head["bias"])
# Binary head.
binary_head = state_dict["binary_head"]
of_state_dict["cls.seq_relationship.weight"] = convert_tensor(binary_head["weight"])
of_state_dict["cls.seq_relationship.bias"] = convert_tensor((binary_head["bias"]))
return of_state_dict
def load_tensor(tensor_lhs, tensor_rhs):
tensor_rhs = flow.to_global(tensor_rhs, placement=tensor_lhs.placement, sbp=tensor_lhs.sbp)
tensor_lhs.copy_(tensor_rhs)
def load_model(model: flow.nn.Module, state_dict):
model_state_dict = model.state_dict()
# Decide shape
incorrect_shapes = []
for k in list(state_dict.keys()):
if k in model_state_dict:
if (
(k.find("weight") != -1)
and (k.find("embeddings") == -1)
and (k.find("layernorm") == -1)
):
# Transpose from (M, N) -> (N, M), because the weight
# shape in megatron and oneflow missing one transpose.
shape_model = tuple(model_state_dict[k].shape[::-1])
else:
shape_model = tuple(model_state_dict[k].shape)
shape_ckpt = tuple(state_dict[k].shape)
if shape_model != shape_ckpt:
incorrect_shapes.append((k, shape_ckpt, shape_model))
state_dict.pop(k)
unexpected_keys = []
for key, value in state_dict.items():
if key not in model_state_dict:
unexpected_keys.append(key)
continue
model_state_dict.pop(key)
if (
(key.find("weight") != -1)
and (key.find("embeddings") == -1)
and (key.find("layernorm") == -1)
):
value = flow.transpose(value, 0, 1)
load_tensor(model.state_dict()[key], value)
missing_keys = list(model_state_dict.keys())
for k, shape_checkpoint, shape_model in incorrect_shapes:
logger.warning(
"Skip loading parameter '{}' to the model due to incompatible "
"shapes: {} in the checkpoint but {} in the "
"model! You might want to double check if this is expected.".format(
k, shape_checkpoint, shape_model
)
)
if missing_keys:
logger.info(get_missing_parameters_message(missing_keys))
if unexpected_keys:
logger.info(get_unexpected_parameters_message(unexpected_keys))
def load_megatron_bert(model: flow.nn.Module, model_weight_path: str):
import torch
megatron_state_dict = torch.load(model_weight_path, map_location="cpu")["model"]
of_state_dict = change_megatron_key(megatron_state_dict)
load_model(model, of_state_dict)
| 5,294 | 35.770833 | 100 | py |
libai | libai-main/projects/MAE/train_net.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
import sys
import numpy as np
import oneflow as flow
from utils.weight_convert import load_torch_checkpoint
from libai.config import LazyConfig, default_argument_parser, try_get_key
from libai.engine import DefaultTrainer, default_setup
from libai.utils.checkpoint import Checkpointer
sys.path.append(".")
logger = logging.getLogger("libai.mae." + __name__)
class Trainer(DefaultTrainer):
@classmethod
def build_model(cls, cfg):
model = super().build_model(cfg)
if try_get_key(cfg, "finetune") is not None:
if cfg.finetune.enable is True:
logger.info("Loading pretrained weight for finetuning")
assert cfg.finetune.weight_style in ["oneflow", "pytorch"]
if cfg.finetune.weight_style == "oneflow":
Checkpointer(model).load(cfg.finetune.path)
elif cfg.finetune.weight_style == "pytorch":
model = load_torch_checkpoint(model, cfg, path=cfg.finetune.path, strict=False)
else:
raise NotImplementedError(
"Only support loading oneflow & pytorch pretrained weight now."
)
return model
def main(args):
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
default_setup(cfg, args)
if args.fast_dev_run:
cfg.train.train_epoch = 0
cfg.train.checkpointer.period = 5
cfg.train.train_iter = 10
cfg.train.evaluation.eval_period = 10
cfg.train.log_period = 1
if args.eval_only:
cfg.eval_only = True
tokenizer = None
if try_get_key(cfg, "tokenization.setup", default=False):
tokenizer = Trainer.build_tokenizer(cfg)
model = Trainer.build_model(cfg)
Checkpointer(model, save_dir=cfg.train.output_dir).resume_or_load(
cfg.train.load_weight, resume=args.resume
)
if try_get_key(cfg, "train.graph.enabled", default=False):
model = Trainer.build_graph(cfg, model, is_train=False)
test_loader = Trainer.build_test_loader(cfg, tokenizer)
if len(test_loader) == 0:
logger.info("No dataset in dataloader.test, please set dataset for dataloader.test")
_ = Trainer.test(cfg, test_loader, model)
return
# manual different seed for each rank
seed_for_rank = cfg.train.seed + flow.env.get_rank()
flow.manual_seed(seed_for_rank)
flow.cuda.manual_seed(seed_for_rank)
np.random.seed(seed_for_rank)
random.seed(seed_for_rank)
trainer = Trainer(cfg)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
main(args)
| 3,384 | 35.010638 | 99 | py |
libai | libai-main/projects/MAE/configs/mae_finetune.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf import OmegaConf
from flowvision.data import Mixup
# from flowvision.loss.cross_entropy import SoftTargetCrossEntropy
from libai.config import LazyCall, get_config
from modeling.cross_entropy import SoftTargetCrossEntropy
from configs.models.vit_base_patch16 import model
from utils.scheduler import (
warmup_layerscale_cosine_lr_scheduler,
warmup_cosine_lr_scheduler,
)
from utils.lr_decay import param_groups_lrd
# Get train, optim and graph configs
train = get_config("common/train.py").train
optim = get_config("common/optim.py").optim
graph = get_config("common/models/graph.py").graph
dataloader = get_config("common/data/imagenet.py").dataloader
# number devices
n_gpus = 8
# Graph training
graph.enabled = True
# Refine model cfg for vit training on imagenet
model.num_classes = 1000
model.loss_func = LazyCall(SoftTargetCrossEntropy)()
# Path to the weight for fine-tune
finetune = OmegaConf.create()
finetune.enable = True # only load weight if enable is True
finetune.weight_style = (
"oneflow" # Set "oneflow" for loading oneflow weights, set "pytorch" for loading torch weights
)
finetune.path = "/path/to/pretrained_mae_weight"
# Refine data path to imagenet
dataloader.train.dataset[0].root = "/path/to/imagenet"
dataloader.test[0].dataset.root = "/path/to/imagenet"
# Add Mixup Func
dataloader.train.mixup_func = LazyCall(Mixup)(
mixup_alpha=0.8,
cutmix_alpha=1.0,
prob=1.0,
switch_prob=0.5,
mode="batch",
label_smoothing=0.1,
num_classes=model.num_classes,
)
# Refine training settings for MAE finetune
train.train_micro_batch_size = 32
train.num_accumulation_steps = 4
train.test_micro_batch_size = 32
effective_batch_size = train.train_micro_batch_size * train.num_accumulation_steps * n_gpus
train.train_epoch = 100
train.warmup_ratio = 5 / 100
train.log_period = 20
train.evaluation.eval_after_n_epoch = 1
train.checkpointer.save_model_after_n_epoch = 1
# Set layer decay for MAE fine-tune
train.layer_decay = 0.65
# AMP
train.amp.enabled = True
# Base learning in MAE is set to 1.5e-4
# The actually learning rate should be computed by linear scaling rule as follows:
# lr = base_lr * batch_size / 256
# In LiBai, you should refine the actually learning rate due to your on settings
# Here we use 8 GPUs, 128 batch_size per GPU for training, batch_size equals to 1024
base_lr = 5e-4
actual_lr = base_lr * effective_batch_size / 256
# Refine optim settings
optim.params._target_ = param_groups_lrd
optim.params.weight_decay = 0.05
optim.params.layer_decay = 0.65
optim.lr = actual_lr
del optim.params.clip_grad_max_norm
del optim.params.clip_grad_norm_type
del optim.params.weight_decay_norm
del optim.params.weight_decay_bias
del optim.weight_decay
# Refine scheduler
if graph.enabled:
train.scheduler = LazyCall(warmup_cosine_lr_scheduler)(
warmup_factor=0.0,
min_lr=1e-6,
)
else:
train.scheduler = LazyCall(warmup_layerscale_cosine_lr_scheduler)(
warmup_factor=0.0,
min_lr=1e-6,
)
# Distributed Settings
train.dist.pipeline_num_layers = model.depth
train.dist.data_parallel_size = n_gpus
train.dist.tensor_parallel_size = 1
train.dist.pipeline_parallel_size = 1
eval_only = False
| 3,855 | 28.212121 | 99 | py |
libai | libai-main/projects/MAE/utils/lr_decay.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------
# References:
# mae: https://github.com/facebookresearch/mae/blob/main/util/lr_decay.py
# --------------------------------------------------------
import logging
logger = logging.getLogger("libai.mae." + __name__)
def param_groups_lrd(model, weight_decay=0.05, layer_decay=0.75):
"""
Parameter groups for layer-wise lr decay
Modified from BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58
"""
param_group_names = {}
param_groups = {}
no_weight_decay_list = model.no_weight_decay()
num_layers = len(model.blocks) + 1
layer_scales = list(layer_decay ** (num_layers - i) for i in range(num_layers + 1))
for name, param in model.named_parameters():
if not param.requires_grad:
continue
if param.ndim == 1 or name in no_weight_decay_list:
g_decay = "no_decay"
this_decay = 0.0
else:
g_decay = "decay"
this_decay = weight_decay
layer_idx = get_layer_idx_for_vit(name, num_layers)
group_name = "layer_%d_%s" % (layer_idx, g_decay)
# logger.info(
# f"{name}, shape={param.shape}, {g_decay}={this_decay}"
# f", layer_scale={layer_scales[layer_idx]}"
# )
if group_name not in param_group_names:
this_scale = layer_scales[layer_idx]
param_group_names[group_name] = {
"lr_scale": this_scale,
"weight_decay": this_decay,
"params": [],
}
param_groups[group_name] = {
"lr_scale": this_scale,
"weight_decay": this_decay,
"params": [],
}
param_group_names[group_name]["params"].append(name)
param_groups[group_name]["params"].append(param)
return list(param_groups.values())
def get_layer_idx_for_vit(name, num_layers):
"""
Assign a parameter with its layer id
Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33
"""
if name in ["cls_token", "pos_embed"]:
return 0
elif name.startswith("patch_embed"):
return 0
elif name.startswith("blocks"):
return int(name.split(".")[1]) + 1
else:
return num_layers
# Refer to: add_weight_decay in
# https://github.com/rwightman/pytorch-image-models/blob/v0.3.3/timm/optim/optim_factory.py
def param_groups_weight_decay(model, weight_decay=1e-5, skip_list=()):
decay_params = []
no_decay_params = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list:
no_decay_params.append(param)
else:
decay_params.append(param)
return [
{"params": no_decay_params, "weight_decay": 0.0},
{"params": decay_params, "weight_decay": weight_decay},
]
| 3,656 | 32.550459 | 96 | py |
libai | libai-main/projects/MAE/utils/weight_convert.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import oneflow as flow
import torch
from flowvision.layers.weight_init import trunc_normal_
logger = logging.getLogger("libai.mae." + __name__)
def convert_qkv_weight(cfg, value):
"""
Convert qkv.weight to be compatible with LiBai transformer layer
Args:
cfg: config file
value: qkv.weight in the loaded checkpoint
"""
num_heads = cfg.model.num_heads
hidden_size = cfg.model.embed_dim
head_size = int(hidden_size / num_heads)
qkv_weight = (
value.view([3, num_heads, head_size, hidden_size])
.permute(1, 0, 2, 3)
.contiguous()
.view(hidden_size * 3, hidden_size)
)
return qkv_weight
def convert_qkv_bias(cfg, value):
"""
Convert qkv.bias to be compatible with LiBai transformer layer
Args:
cfg: config file
value: qkv.bias in the loaded checkpoint
"""
num_heads = cfg.model.num_heads
hidden_size = cfg.model.embed_dim
head_size = int(hidden_size / num_heads)
qkv_bias = (
value.view(3, num_heads, head_size).permute(1, 0, 2).contiguous().view(hidden_size * 3)
)
return qkv_bias
def filter_keys(key, value, cfg):
"""
Filtering the state_dict keys and values to match LiBai's MAE model
"""
if key.startswith("decoder_"):
value = None
elif "norm1" in key:
key = key.replace("norm1", "input_layernorm")
elif "attn.qkv" in key:
key = key.replace("attn.qkv", "self_attention.query_key_value")
if "weight" in key:
value = convert_qkv_weight(cfg, value)
if "bias" in key:
value = convert_qkv_bias(cfg, value)
elif "attn.proj" in key:
key = key.replace("attn.proj", "self_attention.dense")
elif "norm2" in key:
key = key.replace("norm2", "post_attention_layernorm")
elif "mlp.fc1" in key:
key = key.replace("mlp.fc1", "mlp.dense_h_to_4h")
elif "mlp.fc2" in key:
key = key.replace("mlp.fc2", "mlp.dense_4h_to_h")
elif "fc_norm" in key:
key = key.replace("fc_norm", "norm")
elif key == "norm.weight" or key == "norm.bias":
value = None
return key, value
def log_param(key, value):
logger.info(f"{key}, shape={value.shape}")
def load_torch_checkpoint(model, cfg, path="./mae_finetuned_vit_base.pth", strict=False):
"""
Load checkpoint from the given torch weights.
Torch weight can be downloaded from the original repo:
https://github.com/facebookresearch/mae
"""
torch_dict = torch.load(path, map_location="cpu")["model"]
parameters = torch_dict
new_parameters = dict()
for key, value in parameters.items():
# log_param(key, value)
if "num_batches_tracked" not in key:
# to global tensor
key, val = filter_keys(key, value, cfg)
if val is None:
continue
val = val.detach().cpu().numpy()
val = flow.tensor(val).to_global(
sbp=flow.sbp.broadcast, placement=flow.placement("cuda", ranks=[0])
)
new_parameters[key] = val
msg = model.load_state_dict(new_parameters, strict=strict)
logger.info(msg)
if not cfg.eval_only:
trunc_normal_(model.head.weight, std=2e-5)
logger.info("Successfully load torch mae checkpoint.")
return model
| 3,986 | 31.153226 | 95 | py |
libai | libai-main/projects/SimCSE/config/config_simcse_sup.py | from omegaconf import OmegaConf
from configs.common.data.bert_dataset import tokenization
from configs.common.models.bert import cfg as simcse_cfg
from configs.common.models.graph import graph
from configs.common.optim import optim
from configs.common.train import train
from libai.config import LazyCall
from libai.data.build import build_nlp_test_loader, build_nlp_train_loader
from libai.scheduler import WarmupExponentialLR
from libai.tokenizer import BertTokenizer
from projects.SimCSE.dataset.dataset import TestDataset_sup, TrainDataset_sup
from projects.SimCSE.evaluator import SimcseEvaluator
from projects.SimCSE.modeling.simcse_sup import Simcse_sup
optim["lr"] = 1e-5
graph["enabled"] = True
tokenization.tokenizer = LazyCall(BertTokenizer)(
vocab_file="./data/vocab.txt",
)
tokenization.make_vocab_size_divisible_by = 1
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_nlp_train_loader)(
dataset=[
LazyCall(TrainDataset_sup)(
name="snli-sup",
path="./data/SNLI/train.txt",
tokenizer=LazyCall(BertTokenizer)(vocab_file="./data/vocab.txt"),
max_len=64,
)
],
)
dataloader.test = [
LazyCall(build_nlp_test_loader)(
dataset=LazyCall(TestDataset_sup)(
name="cnsd_sts",
path="./data/STS/cnsd-sts-test.txt",
tokenizer=LazyCall(BertTokenizer)(vocab_file="./data/vocab.txt"),
),
),
LazyCall(build_nlp_test_loader)(
dataset=LazyCall(TestDataset_sup)(
name="cnsd_sts",
path="./data/STS/cnsd-sts-dev.txt",
tokenizer=LazyCall(BertTokenizer)(vocab_file="./data/vocab.txt"),
)
),
]
simcse_cfg.update(
dict(
vocab_size=21128,
hidden_size=768,
hidden_layers=12,
layernorm_eps=1e-12,
intermediate_size=3072,
pretrained_model_weight="./data/pytorch_model.bin",
temp=0.05,
pooler_type="cls",
bias_gelu_fusion=False,
bias_dropout_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=True,
)
)
model = LazyCall(Simcse_sup)(cfg=simcse_cfg)
train.update(
dict(
output_dir="./result",
train_micro_batch_size=8,
test_micro_batch_size=8,
train_epoch=1,
train_iter=1000,
log_period=10,
dist=dict(
data_parallel_size=8,
tensor_parallel_size=1,
pipeline_parallel_size=1,
),
evaluation=dict(
enabled=True,
evaluator=LazyCall(SimcseEvaluator)(),
eval_period=10,
eval_metric="Spearman",
eval_mode="max",
eval_iter=100,
),
scheduler=LazyCall(WarmupExponentialLR)(
warmup_factor=0.0,
gamma=1.0,
warmup_method="linear",
warmup_iter=0.0,
),
)
)
| 2,934 | 27.77451 | 77 | py |
libai | libai-main/projects/SimCSE/config/config_simcse_unsup.py | from omegaconf import OmegaConf
from configs.common.data.bert_dataset import tokenization
from configs.common.models.bert import cfg as simcse_cfg
from configs.common.models.graph import graph
from configs.common.optim import optim
from configs.common.train import train
from libai.config import LazyCall
from libai.data.build import build_nlp_test_loader, build_nlp_train_loader
from libai.scheduler import WarmupExponentialLR
from libai.tokenizer import BertTokenizer
from projects.SimCSE.dataset.dataset import TestDataset_unsup, TrainDataset_unsup
from projects.SimCSE.evaluator import SimcseEvaluator
from projects.SimCSE.modeling.simcse_unsup import Simcse_unsup
optim["lr"] = 3e-5
graph["enabled"] = True
tokenization.tokenizer = LazyCall(BertTokenizer)(
vocab_file="./data/vocab.txt",
)
tokenization.make_vocab_size_divisible_by = 1
dataloader = OmegaConf.create()
dataloader.train = LazyCall(build_nlp_train_loader)(
dataset=[
LazyCall(TrainDataset_unsup)(
name="snli-unsup",
path="./data/SNLI/train.txt",
tokenizer=LazyCall(BertTokenizer)(vocab_file="./data/vocab.txt"),
max_len=64,
path2="./data/STS/cnsd-sts-train.txt",
)
],
)
dataloader.test = [
LazyCall(build_nlp_test_loader)(
dataset=LazyCall(TestDataset_unsup)(
name="cnsd_sts",
path="./data/STS/cnsd-sts-test.txt",
tokenizer=LazyCall(BertTokenizer)(vocab_file="./data/vocab.txt"),
),
),
LazyCall(build_nlp_test_loader)(
dataset=LazyCall(TestDataset_unsup)(
name="cnsd_sts",
path="./data/STS/cnsd-sts-dev.txt",
tokenizer=LazyCall(BertTokenizer)(vocab_file="./data/vocab.txt"),
)
),
]
simcse_cfg.update(
dict(
vocab_size=21128,
hidden_size=768,
hidden_layers=12,
layernorm_eps=1e-12,
intermediate_size=3072,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=True,
pretrained_model_weight="./data/pytorch_model.bin",
pooler_type="cls",
temp=0.05,
)
)
model = LazyCall(Simcse_unsup)(cfg=simcse_cfg)
train.update(
dict(
output_dir="./result",
train_micro_batch_size=8,
test_micro_batch_size=8,
train_epoch=1,
train_iter=2500,
log_period=10,
dist=dict(
data_parallel_size=8,
tensor_parallel_size=1,
pipeline_parallel_size=1,
),
evaluation=dict(
enabled=True,
evaluator=LazyCall(SimcseEvaluator)(),
eval_period=10,
eval_iter=1e5,
eval_metric="Spearman",
eval_mode="max",
),
scheduler=LazyCall(WarmupExponentialLR)(
warmup_factor=0.000, gamma=1.0, warmup_method="linear", warmup_iter=0
),
)
)
| 2,966 | 28.67 | 81 | py |
libai | libai-main/projects/SimCSE/utils/load_huggingface_weight.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import oneflow as flow
import torch
import libai.utils.distributed as dist
def convert_tensor(tensor):
tensor = tensor.float()
return flow.Tensor(tensor.cpu().numpy())
def conver_state(state, layers, hidden_size, num_heads, head_size):
save = OrderedDict()
not_saved = []
Layers = layers
for name, tensor in state.items():
if "embeddings" in name:
if "word_embeddings" in name:
save["embeddings.vocab_embeddings.weight"] = convert_tensor(tensor)
elif "position_embeddings" in name:
save["embeddings.position_embeddings.weight"] = convert_tensor(tensor)
elif "token_type_embeddings" in name:
save["embeddings.tokentype_embeddings.weight"] = convert_tensor(tensor)
elif "LayerNorm.gamma" in name:
save["encoders.0.input_layernorm.weight"] = convert_tensor(tensor)
elif "LayerNorm.beta" in name:
save["encoders.0.input_layernorm.bias"] = convert_tensor(tensor)
elif "attention" in name:
if "self" in name:
index = name.split(".")[3]
if "encoders." + index + ".self_attention.query_key_value.weight" in save.keys():
continue
q_w = name.replace(name.split(".")[6], "query").replace(
name.split(".")[7], "weight"
)
k_w = name.replace(name.split(".")[6], "key").replace(name.split(".")[7], "weight")
v_w = name.replace(name.split(".")[6], "value").replace(
name.split(".")[7], "weight"
)
q_b = name.replace(name.split(".")[6], "query").replace(name.split(".")[7], "bias")
k_b = name.replace(name.split(".")[6], "key").replace(name.split(".")[7], "bias")
v_b = name.replace(name.split(".")[6], "value").replace(name.split(".")[7], "bias")
qkv_w = torch.cat((state[q_w], state[k_w], state[v_w]), dim=0) # 【768*3, 768】
# function for weight-----------------------------------
qkv_w = qkv_w.view([3, num_heads, head_size, hidden_size])
qkv_w = qkv_w.permute(1, 0, 2, 3).contiguous().view(3 * hidden_size, hidden_size)
# ---------------------------------------------------------
qkv_b = torch.cat((state[q_b], state[k_b], state[v_b]), dim=-1)
# function for bias--------------------------------------
qkv_b = qkv_b.view(3, num_heads, head_size)
qkv_b = qkv_b.permute(1, 0, 2).contiguous().view(-1)
# ---------------------------------------------------------
target_w = "encoders." + index + ".self_attention.query_key_value.weight"
save[target_w] = convert_tensor(qkv_w)
target_b = "encoders." + index + ".self_attention.query_key_value.bias"
save[target_b] = convert_tensor(qkv_b)
elif "output" in name:
index = name.split(".")[3]
if "dense" in name:
if "weight" in name:
target = "encoders." + index + ".self_attention.dense.weight"
save[target] = convert_tensor(tensor)
elif "bias" in name:
target = "encoders." + index + ".self_attention.dense.bias"
save[target] = convert_tensor(tensor)
elif "LayerNorm" in name:
if "gamma" in name:
target = "encoders." + index + ".post_attention_layernorm.weight"
save[target] = convert_tensor(tensor)
elif "beta" in name:
target = "encoders." + index + ".post_attention_layernorm.bias"
save[target] = convert_tensor(tensor)
elif "intermediate" in name:
index = name.split(".")[3]
if "encoders." + index + ".mlp.dense_h_to_4h.weight" in save.keys():
continue
w = "bert.encoder.layer." + index + ".intermediate.dense.weight"
b = "bert.encoder.layer." + index + ".intermediate.dense.bias"
t_w = "encoders." + index + ".mlp.dense_h_to_4h.weight"
t_b = "encoders." + index + ".mlp.dense_h_to_4h.bias"
save[t_w] = convert_tensor(state[w])
save[t_b] = convert_tensor(state[b])
elif "output" in name:
index = name.split(".")[3]
if "dense.weight" in name:
target = "encoders." + index + ".mlp.dense_4h_to_h.weight"
save[target] = convert_tensor(tensor)
elif "dense.bias" in name:
target = "encoders." + index + ".mlp.dense_4h_to_h.bias"
save[target] = convert_tensor(tensor)
elif "LayerNorm.gamma" in name:
if index == str(Layers - 1):
target = "final_layernorm.weight"
save[target] = convert_tensor(tensor)
continue
target = "encoders." + str(int(index) + 1) + ".input_layernorm.weight"
save[target] = convert_tensor(tensor)
elif "LayerNorm.beta" in name:
if index == str(Layers - 1):
target = "final_layernorm.bias"
save[target] = convert_tensor(tensor)
continue
target = "encoders." + str(int(index) + 1) + ".input_layernorm.bias"
save[target] = convert_tensor(tensor)
elif "pooler" in name:
if "weight" in name:
save["pooler.dense.weight"] = convert_tensor(tensor)
elif "bias" in name:
save["pooler.dense.bias"] = convert_tensor(tensor)
else:
not_saved.append(name)
return save, not_saved
def load_tensor(tensor_lhs, tensor_rhs):
tensor_rhs = flow.to_global(
tensor_rhs,
placement=tensor_lhs.placement,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
)
tensor_rhs = tensor_rhs.to_global(sbp=tensor_lhs.sbp)
tensor_lhs.copy_(tensor_rhs)
def load_huggingface_bert(model, path, hidden_size, num_heads, layers=12):
head_size = hidden_size // num_heads
huggingface_state_dict = torch.load(path)
of_state_dict, _ = conver_state(
huggingface_state_dict,
layers=layers,
hidden_size=hidden_size,
num_heads=num_heads,
head_size=head_size,
)
for key, value in of_state_dict.items():
load_tensor(model.state_dict()[key], value)
| 7,362 | 44.732919 | 99 | py |
libai | libai-main/projects/CLIP/clip/clip.py | # --------------------------------------------------------
# Borrow code from:
# https://github.com/openai/CLIP/tree/main/clip/clip.py
# --------------------------------------------------------
import hashlib
import os
import urllib
import warnings
from typing import List, Union
import oneflow as flow
import torch
from flowvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from PIL import Image
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
try:
from flowvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
# noqa:
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt", # noqa: E501
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt", # noqa: E501
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt", # noqa: E501
"RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt", # noqa: E501
"RN50x64": "https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt", # noqa: E501
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", # noqa: E501
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", # noqa: E501
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", # noqa: E501
"ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt", # noqa: E501
}
def _download(url: str, root: str):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(
f"{download_target} exists, but the SHA256 checksum does not match; "
"re-downloading the file"
)
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(
total=int(source.info().get("Content-Length")),
ncols=80,
unit="iB",
unit_scale=True,
unit_divisor=1024,
) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError("Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(n_px):
return Compose(
[
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
]
)
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(
name: str,
device: Union[str, torch.device] = "cuda" if flow.cuda.is_available() else "cpu",
download_root: str = None,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a
model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
download_root: str
path to download the model files; by default, it uses "~/.cache/clip"
Returns
-------
model : flow.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], flow.Tensor]
A flowvision transform that converts a PIL image into a tensor that
the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
with open(model_path, "rb") as opened_file:
try:
# loading JIT archive
model = torch.jit.load(opened_file, map_location="cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
state_dict = torch.load(opened_file, map_location="cpu")
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.img_size)
def tokenize(
texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False
) -> Union[flow.IntTensor, flow.LongTensor]:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
truncate: bool
Whether to truncate the text in case its encoding is longer than the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens,
shape = [number of input strings, context_length].
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = flow.zeros(len(all_tokens), context_length, dtype=flow.int)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate:
tokens = tokens[:context_length]
tokens[-1] = eot_token
else:
raise RuntimeError(
f"Input {texts[i]} is too long for context length {context_length}"
)
result[i, : len(tokens)] = flow.tensor(tokens, dtype=flow.int)
return result
| 7,197 | 34.99 | 168 | py |
libai | libai-main/projects/CLIP/clip/model.py | # --------------------------------------------------------
# Borrow code from:
# https://github.com/openai/CLIP/tree/main/clip/model.py
# --------------------------------------------------------
from collections import OrderedDict
from typing import Dict, Tuple, Union
import numpy as np
import oneflow as flow
import torch
from oneflow import nn
from libai.layers import MLP, Embedding, LayerNorm, Linear, MultiheadAttention, TransformerLayer
from libai.layers.activation import build_activation
from libai.layers.attention import AttnMaskType
from libai.models import VisionTransformer as ViT
from libai.utils import distributed as dist
from libai.utils.checkpoint import get_missing_parameters_message, get_unexpected_parameters_message
from .ops import multi_head_attention_forward
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed
# after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu3 = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool,
# and the subsequent convolution has stride 1
self.downsample = nn.Sequential(
OrderedDict(
[
("-1", nn.AvgPool2d(stride)),
(
"0",
nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False),
),
("1", nn.BatchNorm2d(planes * self.expansion)),
]
)
)
def forward(self, x: flow.Tensor):
identity = x
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu3(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(
flow.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5
)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
2, 0, 1
) # NCHW -> (HW)NC
x = flow.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = multi_head_attention_forward(
query=x,
key=x,
value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=flow.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False,
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to flowvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1,
with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is
prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.relu3 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(2)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.avgpool(x)
return x
x = x.to(dtype=self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class MLPClip(MLP):
def __init__(
self,
hidden_size,
ffn_hidden_size,
output_dropout_prob=0,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
*,
layer_idx=0,
):
super().__init__(
hidden_size,
ffn_hidden_size,
output_dropout_prob,
init_method,
output_layer_init_method,
bias_gelu_fusion,
bias_dropout_fusion,
layer_idx=layer_idx,
)
if not bias_gelu_fusion:
self.activation_func = build_activation("quick_gelu")
class TransformerLayerClip(TransformerLayer):
def __init__(
self,
hidden_size,
ffn_hidden_size,
num_attention_heads,
is_decoder=False,
attention_dropout_prob=0,
output_dropout_prob=0,
drop_path_prob=0,
layernorm_epsilon=0.00001,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=False,
attn_mask_type=AttnMaskType.padding,
*,
layer_idx=0,
):
super().__init__(
hidden_size,
ffn_hidden_size,
num_attention_heads,
is_decoder,
attention_dropout_prob,
output_dropout_prob,
drop_path_prob,
layernorm_epsilon,
init_method,
output_layer_init_method,
bias_gelu_fusion,
bias_dropout_fusion,
scale_mask_softmax_fusion,
apply_query_key_layer_scaling,
apply_residual_post_layernorm,
attn_mask_type,
layer_idx=layer_idx,
)
self.mlp = MLPClip(
self.hidden_size,
self.ffn_hidden_size,
self.output_dropout_prob,
self.init_method,
output_layer_init_method=self.output_layer_init_method,
bias_gelu_fusion=self.bias_gelu_fusion,
bias_dropout_fusion=self.bias_dropout_fusion,
layer_idx=self.layer_idx,
)
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: flow.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.attn_mask = attn_mask
self.resblocks = nn.ModuleList(
[TransformerLayerClip(width, 4 * width, heads, layer_idx=i) for i in range(layers)]
)
def forward(self, x: flow.Tensor):
for layer in self.resblocks:
x = layer(x, self.attn_mask)
return x
class VisionTransformer(ViT):
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=192,
depth=12,
num_heads=3,
mlp_ratio=4,
drop_rate=0,
attn_drop_rate=0,
drop_path_rate=0,
num_classes=1000,
loss_func=None,
):
super().__init__(
img_size,
patch_size,
in_chans,
embed_dim,
depth,
num_heads,
mlp_ratio,
drop_rate,
attn_drop_rate,
drop_path_rate,
num_classes,
loss_func,
)
self.ln_pre = LayerNorm(embed_dim, layer_idx=0)
self.head = Linear(embed_dim, num_classes, bias=False, layer_idx=-1)
def forward_features(self, x):
# patch embedding
x = self.patch_embed(x)
cls_token = self.cls_token.expand(
x.shape[0], -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
cls_token = cls_token.to_global(sbp=x.sbp, placement=cls_token.placement)
x = flow.cat((cls_token, x), dim=1)
# position embedding
pos_embed = self.pos_embed.expand(x.shape[0], -1, -1)
pos_embed = pos_embed.to_global(sbp=x.sbp, placement=pos_embed.placement)
x = self.pos_drop(x + pos_embed)
# layernorm_pre
x = self.ln_pre(x)
# transformer block
x = self.blocks(x)
return x
class CLIP(nn.Module):
def __init__(
self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width,
).to_global(sbp=flow.sbp.broadcast, placement=dist.get_layer_placement(0))
else:
vision_heads = vision_width // 64
self.visual = VisionTransformer(
img_size=image_resolution,
patch_size=vision_patch_size,
embed_dim=vision_width,
depth=vision_layers,
num_heads=vision_heads,
num_classes=embed_dim,
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask(),
)
self.vocab_size = vocab_size
self.token_embedding = Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(
flow.empty(
self.context_length,
transformer_width,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
)
self.ln_final = LayerNorm((transformer_width,), layer_idx=-1)
self.text_projection = nn.Parameter(
flow.empty(
transformer_width,
embed_dim,
sbp=flow.sbp.broadcast,
placement=dist.get_layer_placement(0),
)
)
self.logit_scale = nn.Parameter(
flow.ones([], sbp=flow.sbp.broadcast, placement=dist.get_layer_placement(0))
* np.log(1 / 0.07)
)
self.initialize_parameters()
def initialize_parameters(self):
if hasattr(self.visual, "patch_embed"):
nn.init.zeros_(self.visual.patch_embed.proj.bias)
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [
self.visual.layer1,
self.visual.layer2,
self.visual.layer3,
self.visual.layer4,
]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.self_attention.query_key_value.weight, std=attn_std)
nn.init.normal_(block.self_attention.dense.weight, std=proj_std)
nn.init.normal_(block.mlp.dense_h_to_4h.weight, std=fc_std)
nn.init.normal_(block.mlp.dense_4h_to_h.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = flow.ones(
self.context_length,
self.context_length,
sbp=flow.sbp.broadcast,
placement=dist.get_layer_placement(0),
)
mask = flow.tril(mask) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image)["prediction_scores"]
def encode_text(self, text):
x = self.token_embedding(text) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding
# x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
# x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = (
x[flow.arange(x.shape[0], sbp=x.sbp, placement=x.placement), text.argmax(dim=-1)]
@ self.text_projection
)
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=1, keepdim=True)
text_features = text_features / text_features.norm(dim=1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.to(dtype=flow.float16)
if l.bias is not None:
l.bias.data = l.bias.data.to(dtype=flow.float16)
if isinstance(l, MultiheadAttention):
for attr in ["query_key_value", "dense"]:
layer = getattr(l, attr)
weight = getattr(layer, "weight")
if weight is not None:
weight.data = weight.data.to(dtype=flow.float16)
bias = getattr(layer, "bias")
if bias is not None:
bias.data = bias.data.to(dtype=flow.float16)
if hasattr(l, "text_projection"):
attr = getattr(l, "text_projection")
if attr is not None:
attr.data = attr.data.to(dtype=flow.float16)
if hasattr(l, "proj"):
attr = getattr(l, "proj")
if attr is not None:
attr.weight.data = attr.weight.data.to(dtype=flow.float16)
model.apply(_convert_weights_to_fp16)
def load_tensor(tensor_lhs: flow.Tensor, tensor_rhs: torch.Tensor):
tensor_rhs = flow.Tensor(
tensor_rhs.cpu().numpy(),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=flow.env.all_device_placement("cuda"),
).to_global(sbp=tensor_lhs.sbp, placement=tensor_lhs.placement)
tensor_lhs.data.copy_(tensor_rhs.data)
def load_weights(model: nn.Module, state_dict: Dict):
model_state_dict = model.state_dict()
incorrect_shapes = []
for k in list(state_dict.keys()):
if k in model_state_dict:
shape_model = tuple(model_state_dict[k].shape)
shape_checkpoint = tuple(state_dict[k].shape)
if shape_model != shape_checkpoint:
incorrect_shapes.append((k, shape_checkpoint, shape_model))
state_dict.pop(k)
unexpected_keys = []
for key, value in state_dict.items():
if key not in model_state_dict:
unexpected_keys.append(key)
# skip this key
continue
model_state_dict.pop(key)
load_tensor(model.state_dict()[key], value)
missing_keys = list(model_state_dict.keys())
for k, shape_checkpoint, shape_model in incorrect_shapes:
print(
"Skip loading parameter '{}' to the model due to incompatible "
"shapes: {} in the checkpoint but {} in the "
"model! You might want to double check if this is expected.".format(
k, shape_checkpoint, shape_model
)
)
if missing_keys:
print(get_missing_parameters_message(missing_keys))
if unexpected_keys:
print(get_unexpected_parameters_message(unexpected_keys))
def convert_qkv_weight(qkv_weight, num_heads):
qkv_weight = qkv_weight.view([3, num_heads, 64, num_heads * 64])
qkv_weight = (
qkv_weight.permute(1, 0, 2, 3).contiguous().view(3 * num_heads * 64, num_heads * 64)
)
return qkv_weight
def convert_qkv_bias(qkv_bias, num_heads):
qkv_bias = qkv_bias.view(3, num_heads, 64)
qkv_bias = qkv_bias.permute(1, 0, 2).contiguous().view(-1)
return qkv_bias
def change_vit_state_dict(state_dict, visual_num_heads, text_num_heads):
new_state_dict = {}
for key, value in state_dict.items():
# change prefix
if "visual.transformer.resblocks" in key:
key = key.replace("visual.transformer.resblocks", "visual.blocks")
# change "ln_1" to "input_layernorm"
if "ln_1" in key:
key = key.replace("ln_1", "input_layernorm")
# change "ln_2" to "post_attention_layernorm"
if "ln_2" in key:
key = key.replace("ln_2", "post_attention_layernorm")
# change "attn.out_proj" to "attention.dense"
if "attn.out_proj" in key:
key = key.replace("attn.out_proj", "attention.dense")
# change "attn" to "attention.query_key_value"
if "attn.in_proj_weight" in key:
key = key.replace("attn.in_proj_weight", "attention.query_key_value.weight")
if "visual" not in key:
value = convert_qkv_weight(value, text_num_heads)
else:
value = convert_qkv_weight(value, visual_num_heads)
if "attn.in_proj_bias" in key:
key = key.replace("attn.in_proj_bias", "attention.query_key_value.bias")
if "visual" not in key:
value = convert_qkv_bias(value, text_num_heads)
else:
value = convert_qkv_bias(value, visual_num_heads)
# change "mlp.c_fc" to "mlp.dense_h_to_4h"
if "mlp.c_fc" in key:
key = key.replace("mlp.c_fc", "mlp.dense_h_to_4h")
# change "mlp.c_proj" to "mlp.dense_4h_to_h"
if "mlp.c_proj" in key:
key = key.replace("mlp.c_proj", "mlp.dense_4h_to_h")
# change "class_embedding" to "cls_token"
if "class_embedding" in key:
key = key.replace("class_embedding", "cls_token")
value = value.unsqueeze(0).unsqueeze(0)
# change "pos_embed" to "positional_embedding"
if "visual.positional_embedding" == key:
key = "visual.pos_embed"
value = value.unsqueeze(0)
# change patch_embedding
if key == "visual.conv1.weight":
key = "visual.patch_embed.proj.weight"
# change "ln_post"
if "ln_post" in key:
key = key.replace("ln_post", "norm")
# change "proj"
if "visual.proj" == key:
key = "visual.head.weight"
value = value.transpose(0, 1)
# added by huangwei
key = key.replace("attention.query_key_value", "self_attention.query_key_value").replace(
"attention.dense", "self_attention.dense"
)
new_state_dict[key] = value
return new_state_dict
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[
k
for k in state_dict.keys()
if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")
]
)
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [
len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}")))
for b in [1, 2, 3, 4]
]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round(
(state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5
)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(
set(k.split(".")[2] for k in state_dict if k.startswith("transformer.resblocks"))
)
if vit:
state_dict = change_vit_state_dict(state_dict, vision_width // 64, transformer_heads)
model = CLIP(
embed_dim,
image_resolution,
vision_layers,
vision_width,
vision_patch_size,
context_length,
vocab_size,
transformer_width,
transformer_heads,
transformer_layers,
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
# convert_weights(model)
load_weights(model, state_dict)
return model.eval()
| 25,690 | 34.731572 | 100 | py |
libai | libai-main/projects/CLIP/tests/test_multi_head_attn.py | import os
import sys
import unittest
import numpy as np
import oneflow as flow
import torch
from torch.nn.functional import multi_head_attention_forward as multi_head_attention_forward_torch
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from clip.ops import multi_head_attention_forward as multi_head_attention_forward_flow # noqa: E402
class TestMultiHeadAttention(unittest.TestCase):
def test_with_torch(self):
k_proj_weight = np.random.normal(size=(32, 32))
k_proj_bias = np.random.normal(size=(32))
q_proj_weight = np.random.normal(size=(32, 32))
q_proj_bias = np.random.normal(size=(32))
v_proj_weight = np.random.normal(size=(32, 32))
v_proj_bias = np.random.normal(size=(32))
c_proj_weight = np.random.normal(size=(64, 32))
c_proj_bias = np.random.normal(size=(64))
x = np.random.normal(size=(65, 16, 32))
x_torch = torch.from_numpy(x)
torch_out, _ = multi_head_attention_forward_torch(
query=x_torch,
key=x_torch,
value=x_torch,
embed_dim_to_check=x_torch.shape[-1],
num_heads=8,
q_proj_weight=torch.from_numpy(q_proj_weight),
k_proj_weight=torch.from_numpy(k_proj_weight),
v_proj_weight=torch.from_numpy(v_proj_weight),
in_proj_weight=None,
in_proj_bias=torch.cat(
[
torch.from_numpy(q_proj_bias),
torch.from_numpy(k_proj_bias),
torch.from_numpy(v_proj_bias),
]
),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=torch.from_numpy(c_proj_weight),
out_proj_bias=torch.from_numpy(c_proj_bias),
use_separate_proj_weight=True,
training=True,
need_weights=False,
)
x_flow = flow.from_numpy(x).cuda()
flow_out, _ = multi_head_attention_forward_flow(
query=x_flow,
key=x_flow,
value=x_flow,
embed_dim_to_check=x_flow.shape[-1],
num_heads=8,
q_proj_weight=flow.from_numpy(q_proj_weight).cuda(),
k_proj_weight=flow.from_numpy(k_proj_weight).cuda(),
v_proj_weight=flow.from_numpy(v_proj_weight).cuda(),
in_proj_weight=None,
in_proj_bias=flow.cat(
[
flow.from_numpy(q_proj_bias).cuda(),
flow.from_numpy(k_proj_bias).cuda(),
flow.from_numpy(v_proj_bias).cuda(),
]
),
bias_k=None,
bias_v=None,
dropout_p=0,
out_proj_weight=flow.from_numpy(c_proj_weight).cuda(),
out_proj_bias=flow.from_numpy(c_proj_bias).cuda(),
use_separate_proj_weight=True,
training=True,
need_weights=False,
)
assert np.allclose(torch_out.numpy(), flow_out.numpy())
if __name__ == "__main__":
unittest.main()
| 3,149 | 33.23913 | 100 | py |
libai | libai-main/projects/NeRF/datasets/nerf_dataset.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import os
import re
import sys
from collections import OrderedDict
from typing import Optional
import numpy as np
import oneflow as flow
from flowvision import transforms as T
from oneflow.utils.data import Dataset
from PIL import Image
from libai.data.structures import DistTensorData, Instance
def read_pfm(filename):
file = open(filename, "rb")
header = file.readline().decode("utf-8").rstrip()
if header == "PF":
color = True
elif header == "Pf":
color = False
else:
raise Exception("Not a PFM file.")
dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("utf-8"))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception("Malformed PFM header.")
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = "<"
scale = -scale
else:
endian = ">" # big-endian
data = np.fromfile(file, endian + "f")
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
file.close()
return data, scale
def save_pfm(filename, image, scale=1):
file = open(filename, "wb")
image = np.flipud(image)
if image.dtype.name != "float32":
raise Exception("Image dtype must be float32.")
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
file.write("PF\n".encode("utf-8") if color else "Pf\n".encode("utf-8"))
file.write("{} {}\n".format(image.shape[1], image.shape[0]).encode("utf-8"))
endian = image.dtype.byteorder
if endian == "<" or endian == "=" and sys.byteorder == "little":
scale = -scale
file.write(("%f\n" % scale).encode("utf-8"))
image.tofile(file)
file.close()
# Preparatory conversion tools for 3D rendering
def create_meshgrid(
height: int,
width: int,
normalized_coordinates: bool = True,
device: Optional[flow.device] = flow.device("cpu"),
dtype: flow.dtype = flow.float32,
):
"""Generate a coordinate grid for an image.
When the flag ``normalized_coordinates`` is set to True, the grid is
normalized to be in the range :math:`[-1,1]` to be consistent with the pytorch
function :py:func:`torch.nn.functional.grid_sample`.
Args:
height: the image height (rows).
width: the image width (cols).
normalized_coordinates: whether to normalize
coordinates in the range :math:`[-1,1]` in order to be consistent with the
PyTorch function :py:func:`torch.nn.functional.grid_sample`.
device: the device on which the grid will be generated.
dtype: the data type of the generated grid.
Return:
grid tensor with shape :math:`(1, H, W, 2)`.
Example:
>>> create_meshgrid(2, 2)
tensor([[[[-1., -1.],
[ 1., -1.]],
<BLANKLINE>
[[-1., 1.],
[ 1., 1.]]]])
>>> create_meshgrid(2, 2, normalized_coordinates=False)
tensor([[[[0., 0.],
[1., 0.]],
<BLANKLINE>
[[0., 1.],
[1., 1.]]]])
"""
xs = flow.linspace(0, width - 1, width, device=device, dtype=dtype)
ys = flow.linspace(0, height - 1, height, device=device, dtype=dtype)
if normalized_coordinates:
xs = (xs / (width - 1) - 0.5) * 2
ys = (ys / (height - 1) - 0.5) * 2
# generate grid by stacking coordinates
base_grid = flow.stack(flow.meshgrid([xs, ys], indexing="ij"), dim=-1) # WxHx2
return base_grid.permute(1, 0, 2).unsqueeze(0) # 1xHxWx2
def get_rays(directions, c2w):
"""
Get ray origin and normalized directions in world coordinate for all pixels in one image.
Inputs:
directions: (H, W, 3) precomputed ray directions in camera coordinate
c2w: (3, 4) transformation matrix from camera coordinate to world coordinate
Outputs:
rays_o: (H*W, 3), the origin of the rays in world coordinate
rays_d: (H*W, 3), the normalized direction of the rays in world coordinate
"""
# Rotate ray directions from camera coordinate to the world coordinate
rays_d = directions @ c2w[:, :3].T # (H, W, 3)
# rays_d = rays_d / flow.norm(rays_d, dim=-1, keepdim=True)
# The origin of all rays is the camera origin in world coordinate
rays_o = c2w[:, 3].expand(rays_d.shape) # (H, W, 3)
rays_d = rays_d.view(-1, 3)
rays_o = rays_o.view(-1, 3)
return rays_o, rays_d
def get_ray_directions(H, W, focal):
"""
Get ray directions for all pixels in camera coordinate.
Inputs:
H, W, focal: image height, width and focal length
Outputs:
directions: (H, W, 3), the direction of the rays in camera coordinate
"""
grid = create_meshgrid(H, W, normalized_coordinates=False)[0]
i, j = grid.unbind(-1)
i = flow.tensor(i.numpy())
j = flow.tensor(j.numpy())
directions = flow.stack(
[(i - W / 2) / focal, -(j - H / 2) / focal, -flow.ones_like(i)], -1
) # compute about tanx (H, W, 3)
return directions
def get_ndc_rays(H, W, focal, near, rays_o, rays_d):
"""
Transform rays from world coordinate to NDC.
NDC: Space such that the canvas is a cube with sides [-1, 1] in each axis.
For detailed derivation, please see:
http://www.songho.ca/opengl/gl_projectionmatrix.html
https://github.com/bmild/nerf/files/4451808/ndc_derivation.pdf
https://pengfeixc.com/blogs/computer-graphics/3D-matrix-transformation-part-three
In practice, use NDC "if and only if" the scene is unbounded (has a large depth).
See https://github.com/bmild/nerf/issues/18
Inputs:
H, W, focal: image height, width and focal length
near: (N_rays) or float, the depths of the near plane
rays_o: (N_rays, 3), the origin of the rays in world coordinate
rays_d: (N_rays, 3), the direction of the rays in world coordinate
Outputs:
rays_o: (N_rays, 3), the origin of the rays in NDC
rays_d: (N_rays, 3), the direction of the rays in NDC
"""
# Shift ray origins to near plane
t = -(near + rays_o[..., 2]) / rays_d[..., 2]
rays_o = rays_o + t[..., None] * rays_d
# Store some intermediate homogeneous results
ox_oz = rays_o[..., 0] / rays_o[..., 2]
oy_oz = rays_o[..., 1] / rays_o[..., 2]
# Projection
o0 = -1.0 / (W / (2.0 * focal)) * ox_oz
o1 = -1.0 / (H / (2.0 * focal)) * oy_oz
o2 = 1.0 + 2.0 * near / rays_o[..., 2]
d0 = -1.0 / (W / (2.0 * focal)) * (rays_d[..., 0] / rays_d[..., 2] - ox_oz)
d1 = -1.0 / (H / (2.0 * focal)) * (rays_d[..., 1] / rays_d[..., 2] - oy_oz)
d2 = 1 - o2
rays_o = flow.stack([o0, o1, o2], -1) # (B, 3)
rays_d = flow.stack([d0, d1, d2], -1) # (B, 3)
return rays_o, rays_d
def normalize(v):
"""Normalize a vector."""
return v / np.linalg.norm(v)
def average_poses(poses):
"""
Calculate the average pose, which is then used to center all poses
using @center_poses. Its computation is as follows:
1. Compute the center: the average of pose centers.
2. Compute the z axis: the normalized average z axis.
3. Compute axis y': the average y axis.
4. Compute x' = y' cross product z, then normalize it as the x axis.
5. Compute the y axis: z cross product x.
Note that at step 3, we cannot directly use y' as y axis since it's
not necessarily orthogonal to z axis. We need to pass from x to y.
Inputs:
poses: (N_images, 3, 4)
Outputs:
pose_avg: (3, 4) the average pose
"""
# 1. Compute the center
center = poses[..., 3].mean(0) # (3)
# 2. Compute the z axis
z = normalize(poses[..., 2].mean(0)) # (3)
# 3. Compute axis y' (no need to normalize as it's not the final output)
y_ = poses[..., 1].mean(0) # (3)
# 4. Compute the x axis
x = normalize(np.cross(y_, z)) # (3)
# 5. Compute the y axis (as z and x are normalized, y is already of norm 1)
y = np.cross(z, x) # (3)
pose_avg = np.stack([x, y, z, center], 1) # (3, 4)
return pose_avg
def center_poses(poses):
"""
Center the poses so that we can use NDC.
See https://github.com/bmild/nerf/issues/34
Inputs:
poses: (N_images, 3, 4)
Outputs:
poses_centered: (N_images, 3, 4) the centered poses
pose_avg: (3, 4) the average pose
"""
pose_avg = average_poses(poses) # (3, 4)
pose_avg_homo = np.eye(4)
pose_avg_homo[:3] = pose_avg # convert to homogeneous coordinate for faster computation
# by simply adding 0, 0, 0, 1 as the last row
last_row = np.tile(np.array([0, 0, 0, 1]), (len(poses), 1, 1)) # (N_images, 1, 4)
poses_homo = np.concatenate([poses, last_row], 1) # (N_images, 4, 4) homogeneous coordinate
poses_centered = np.linalg.inv(pose_avg_homo) @ poses_homo # (N_images, 4, 4)
poses_centered = poses_centered[:, :3] # (N_images, 3, 4)
return poses_centered, np.linalg.inv(pose_avg_homo)
def create_spiral_poses(radii, focus_depth, n_poses=120):
"""
Computes poses that follow a spiral path for rendering purpose.
See https://github.com/Fyusion/LLFF/issues/19
In particular, the path looks like:
https://tinyurl.com/ybgtfns3
Inputs:
radii: (3) radii of the spiral for each axis
focus_depth: float, the depth that the spiral poses look at
n_poses: int, number of poses to create along the path
Outputs:
poses_spiral: (n_poses, 3, 4) the poses in the spiral path
"""
poses_spiral = []
for t in np.linspace(0, 4 * np.pi, n_poses + 1)[:-1]: # rotate 4pi (2 rounds)
# the parametric function of the spiral (see the interactive web)
center = np.array([np.cos(t), -np.sin(t), -np.sin(0.5 * t)]) * radii
# the viewing z axis is the vector pointing from the @focus_depth plane
# to @center
z = normalize(center - np.array([0, 0, -focus_depth]))
# compute other axes as in @average_poses
y_ = np.array([0, 1, 0]) # (3)
x = normalize(np.cross(y_, z)) # (3)
y = np.cross(z, x) # (3)
poses_spiral += [np.stack([x, y, z, center], 1)] # (3, 4)
return np.stack(poses_spiral, 0) # (n_poses, 3, 4)
def spheric_pose(theta, phi, radius):
def trans_t(t):
return np.array(
[
[1, 0, 0, 0],
[0, 1, 0, -0.9 * t],
[0, 0, 1, t],
[0, 0, 0, 1],
]
)
def rot_phi(phi):
return np.array(
[
[1, 0, 0, 0],
[0, np.cos(phi), -np.sin(phi), 0],
[0, np.sin(phi), np.cos(phi), 0],
[0, 0, 0, 1],
]
)
def rot_theta(th):
return np.array(
[
[np.cos(th), 0, -np.sin(th), 0],
[0, 1, 0, 0],
[np.sin(th), 0, np.cos(th), 0],
[0, 0, 0, 1],
]
)
c2w = rot_theta(theta) @ rot_phi(phi) @ trans_t(radius)
c2w = np.array([[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]) @ c2w
return c2w[:3]
def create_spheric_poses(radius, n_poses=120):
"""
Create circular poses around z axis.
Inputs:
radius: the (negative) height and the radius of the circle.
Outputs:
spheric_poses: (n_poses, 3, 4) the poses in the circular path
"""
spheric_poses = []
for th in np.linspace(0, 2 * np.pi, n_poses + 1)[:-1]:
spheric_poses += [spheric_pose(th, -np.pi / 5, radius)] # 36 degree view downwards
return np.stack(spheric_poses, 0)
def pose_spherical(theta, phi, radius):
def trans_t(t):
return flow.Tensor(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, t],
[0, 0, 0, 1],
]
).float()
def rot_phi(phi):
return flow.Tensor(
[
[1, 0, 0, 0],
[0, np.cos(phi), -np.sin(phi), 0],
[0, np.sin(phi), np.cos(phi), 0],
[0, 0, 0, 1],
]
).float()
def rot_theta(th):
return flow.Tensor(
[
[np.cos(th), 0, -np.sin(th), 0],
[0, 1, 0, 0],
[np.sin(th), 0, np.cos(th), 0],
[0, 0, 0, 1],
]
).float()
c2w = trans_t(radius)
c2w = rot_phi(phi / 180.0 * np.pi) @ c2w
c2w = rot_theta(theta / 180.0 * np.pi) @ c2w
c2w = flow.Tensor(np.array([[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])) @ c2w
return c2w
def viewmatrix(z, up, pos):
vec2 = normalize(z)
vec1_avg = up
vec0 = normalize(np.cross(vec1_avg, vec2))
vec1 = normalize(np.cross(vec2, vec0))
m = np.stack([vec0, vec1, vec2, pos], 1)
return m
def render_path_spiral(c2w, hwf, up, rads, focal, zdelta, zrate, rots, N):
render_poses = []
hwf = hwf[:, None]
rads = np.array(list(rads) + [1.0])
for theta in np.linspace(0.0, 2.0 * np.pi * rots, N + 1)[:-1]:
c = np.dot(
c2w, np.array([np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.0]) * rads
)
z = normalize(c - np.dot(c2w, np.array([0, 0, -focal, 1.0])))
render_poses.append(np.concatenate([viewmatrix(z, up, c), hwf], 1))
return render_poses
# Blender and LLFF Datasets
def trun_dict_to_instance(dict):
return Instance(**{key: DistTensorData(flow.tensor(value)) for key, value in dict.items()})
class NerfBaseDataset(Dataset):
def __init__(self, root_dir, split, img_wh):
super(NerfBaseDataset, self).__init__()
self.root_dir = root_dir
self.split = split
self.img_wh = img_wh
self.transform = T.Compose([T.ToTensor()])
os.environ["ONEFLOW_DISABLE_VIEW"] = "true"
def load_meta(self):
pass
class BlenderDataset(NerfBaseDataset):
def __init__(self, root_dir, split="train", img_wh=(800, 800), batchsize=1024, **kwargs):
"""
Args:
root_dir: str,
split: str,
img_wh: tuple,
"""
super(BlenderDataset, self).__init__(root_dir, split, img_wh)
self.white_back = True
self.batchsize = batchsize
self.load_meta()
self.render_poses = flow.stack(
[pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180, 180, 40 + 1)[:-1]], 0
) # use for test
def load_meta(self):
if self.split == "vis":
with open(os.path.join(self.root_dir, "transforms_train.json"), "r") as f:
self.meta = json.load(f)
else:
with open(os.path.join(self.root_dir, f"transforms_{self.split}.json"), "r") as f:
self.meta = json.load(f)
w, h = self.img_wh
camera_angle_x = float(self.meta["camera_angle_x"])
self.focal = 0.5 * w / np.tan(0.5 * camera_angle_x)
self.near = 2.0
self.far = 6.0
self.bounds = np.array([self.near, self.far])
self.directions = get_ray_directions(h, w, self.focal) # (h, w, 3)
if self.split == "train": # create buffer of all rays and rgb data
self.image_paths = []
self.poses = []
self.all_rays = []
self.all_rgbs = []
self.indexs = [
i * self.img_wh[0] * self.img_wh[1] for i in range(len(self.meta["frames"]))
]
for frame in self.meta["frames"]:
pose = np.array(frame["transform_matrix"])[:3, :4]
self.poses += [pose]
c2w = flow.Tensor(pose)
image_path = os.path.join(self.root_dir, f"{frame['file_path']}.png")
self.image_paths += [image_path]
img = Image.open(image_path)
img = img.resize(self.img_wh, Image.LANCZOS)
img = self.transform(img) # (4, h, w)
img = img.view(4, -1).permute(1, 0) # (h*w, 4) RGBA
img = img[:, :3] * img[:, -1:] + (1 - img[:, -1:]) # blend A to RGB
self.all_rgbs += [img]
rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3)
self.all_rays += [
flow.cat(
[
rays_o,
rays_d,
self.near * flow.ones_like(rays_o[:, :1]),
self.far * flow.ones_like(rays_o[:, :1]),
],
1,
)
] # (h*w, 8)
self.all_rays = flow.cat(self.all_rays, 0) # (len(self.meta['frames])*h*w, 3)
self.all_rgbs = flow.cat(self.all_rgbs, 0) # (len(self.meta['frames])*h*w, 3)
self.num_iter = 0
self.dH = int(self.img_wh[0] // 2 * 0.5)
self.dW = int(self.img_wh[1] // 2 * 0.5)
def __len__(self):
if self.split == "train":
return int(len(self.all_rays) / self.batchsize)
elif self.split == "val":
return 8 # only validate 8 images (to support <=8 gpus)
elif self.split == "vis":
return len(self.render_poses)
elif self.split == "test":
return len(self.meta["frames"])
def __getitem__(self, idx):
if self.split == "train": # use data in the buffers
idx = idx % len(self.indexs)
if self.num_iter < 500:
coords = flow.stack(
flow.meshgrid(
flow.linspace(
self.img_wh[1] // 2 - self.dH,
self.img_wh[1] // 2 + self.dH - 1,
2 * self.dH,
),
flow.linspace(
self.img_wh[0] // 2 - self.dW,
self.img_wh[0] // 2 + self.dW - 1,
2 * self.dW,
),
),
-1,
)
else:
coords = flow.stack(
flow.meshgrid(
flow.linspace(0, self.img_wh[1] - 1, self.img_wh[1]),
flow.linspace(0, self.img_wh[0] - 1, self.img_wh[0]),
),
-1,
) # (H, W, 2)
coords = flow.reshape(coords, [-1, 2]) # (H * W, 2)
select_inds = np.random.choice(
coords.shape[0], size=[self.batchsize], replace=False
) # (N_rand,)
select_coords = coords[select_inds].long() # (N_rand, 2)
rays = self.all_rays[
self.indexs[idx] : self.indexs[idx] + self.img_wh[0] * self.img_wh[1]
]
rgbs = self.all_rgbs[
self.indexs[idx] : self.indexs[idx] + self.img_wh[0] * self.img_wh[1]
]
rays = rays.view(self.img_wh[1], self.img_wh[0], -1)
rgbs = rgbs.view(self.img_wh[1], self.img_wh[0], -1)
rays = rays[select_coords[:, 0], select_coords[:, 1]] # (N_rand, 3)
rgbs = rgbs[select_coords[:, 0], select_coords[:, 1]] # (N_rand, 3)
self.num_iter += 1
sample = OrderedDict(rays=rays, rgbs=rgbs) # # a alignment point with nerf_pytorch
elif self.split == "val" or self.split == "test": # create data for each image separately
frame = self.meta["frames"][idx]
c2w = flow.Tensor(frame["transform_matrix"])[:3, :4]
img = Image.open(os.path.join(self.root_dir, f"{frame['file_path']}.png"))
img = img.resize(self.img_wh, Image.LANCZOS)
img = self.transform(img) # (4, H, W)
valid_mask = (img[-1] > 0).flatten() # (H*W) valid color area
img = img.view(4, -1).permute(1, 0) # (H*W, 4) RGBA
img = img[:, :3] * img[:, -1:] + (1 - img[:, -1:]) # blend A to RGB
rays_o, rays_d = get_rays(self.directions, c2w)
rays = flow.concat(
[
rays_o,
rays_d,
self.near * flow.ones_like(rays_o[:, :1]),
self.far * flow.ones_like(rays_o[:, :1]),
],
1,
) # (H*W, 8)
sample = OrderedDict(rays=rays, rgbs=img, c2w=c2w, valid_mask=valid_mask)
else:
c2w = self.render_poses[idx][:3, :4]
rays_o, rays_d = get_rays(self.directions, c2w)
rays = flow.concat(
[
rays_o,
rays_d,
self.near * flow.ones_like(rays_o[:, :1]),
self.far * flow.ones_like(rays_o[:, :1]),
],
1,
) # (H*W, 8)
sample = OrderedDict(rays=rays, c2w=c2w)
return trun_dict_to_instance(sample)
class LLFFDataset(NerfBaseDataset):
def __init__(
self,
root_dir,
split="train",
img_wh=(504, 378),
spheric_poses=False,
val_num=1,
batchsize=1024,
):
"""
Args:
root_dir: str,
split: str,
img_wh: tuple,
spheric_poses: bool, whether the images are taken in a spheric inward-facing manner
default: False (forward-facing)
val_num: int, number of val images (used for multigpu training, validate same image
for all gpus)
batchsize: int, batchsize of rays
"""
super(LLFFDataset, self).__init__(root_dir, split, img_wh)
self.spheric_poses = spheric_poses
self.val_num = max(1, val_num) # at least 1
self.batchsize = batchsize
self.load_meta()
# build render_poses for inference
up = normalize(self.poses[:, :3, 1].sum(0))
tt = self.poses[:, :3, 3]
rads = np.percentile(np.abs(tt), 90, 0)
close_depth, inf_depth = self.bounds.min() * 0.9, self.bounds.max() * 5.0
dt = 0.75
focal = 1.0 / (((1.0 - dt) / close_depth + dt / inf_depth))
zdelta = close_depth * 0.2
N_views = 120
N_rots = 2
hwf = self.hwf
center = self.poses[:, :3, 3].mean(0)
vec2 = normalize(self.poses[:, :3, 2].sum(0))
up = self.poses[:, :3, 1].sum(0)
c2w = viewmatrix(vec2, up, center)
self.render_poses = flow.Tensor(
render_path_spiral(
c2w, hwf, normalize(up), rads, focal, zdelta, zrate=0.5, rots=N_rots, N=N_views
)
) # use for test
self.white_back = False
def load_meta(self):
poses_bounds = np.load(os.path.join(self.root_dir, "poses_bounds.npy")) # (N_images, 17)
self.image_paths = sorted(glob.glob(os.path.join(self.root_dir, "images/*")))
if self.split in ["train", "val"]:
assert len(poses_bounds) == len(
self.image_paths
), "Mismatch between number of images and number of poses! Please rerun COLMAP!"
poses = poses_bounds[:, :15].reshape(-1, 3, 5) # (N_images, 3, 5)
self.bounds = poses_bounds[:, -2:] # (N_images, 2)
H, W, self.focal = poses[0, :, -1] # original intrinsics, same for all images
H, W, self.focal = H.item(), W.item(), self.focal.item()
assert (
H * self.img_wh[0] == W * self.img_wh[1]
), f"You must set @img_wh to have the same aspect ratio as ({W}, {H}) !"
self.focal *= self.img_wh[0] / W
poses = np.concatenate([poses[..., 1:2], -poses[..., :1], poses[..., 2:4]], -1)
# (N_images, 3, 4) exclude H, W, focal
self.poses, self.pose_avg = center_poses(poses)
distances_from_center = np.linalg.norm(self.poses[..., 3], axis=1)
val_idx = np.argmin(distances_from_center) # choose val image as the closest to
near_original = self.bounds.min()
scale_factor = near_original * 0.75 # 0.75 is the default parameter
self.bounds /= scale_factor
self.poses[..., 3] /= scale_factor
self.directions = get_ray_directions(
self.img_wh[1], self.img_wh[0], self.focal
) # (H, W, 3)
self.hwf = np.array([self.img_wh[1], self.img_wh[0], self.focal])
if self.split == "train": # create buffer of all rays and rgb data
# use first N_images-1 to train, the LAST is val
self.all_rays = []
self.all_rgbs = []
self.indexs = [
i * self.img_wh[0] * self.img_wh[1] for i in range(len(self.image_paths) - 1)
]
for i, image_path in enumerate(self.image_paths):
if i == val_idx: # exclude the val image
continue
c2w = flow.Tensor(self.poses[i])
img = Image.open(image_path).convert("RGB")
assert (
img.size[1] * self.img_wh[0] == img.size[0] * self.img_wh[1]
), f"{image_path} has different aspect ratio than img_wh, please check your data!"
img = img.resize(self.img_wh, Image.LANCZOS)
img = self.transform(img) # (3, h, w)
img = img.view(3, -1).permute(1, 0) # (h*w, 3) RGB
self.all_rgbs += [img]
rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3)
if not self.spheric_poses:
near, far = 0, 1
rays_o, rays_d = get_ndc_rays(
self.img_wh[1], self.img_wh[0], self.focal, 1.0, rays_o, rays_d
)
else:
near = self.bounds.min()
far = min(8 * near, self.bounds.max()) # focus on central object only
self.all_rays += [
flow.concat(
[
rays_o,
rays_d,
near * flow.ones_like(rays_o[:, :1]),
far * flow.ones_like(rays_o[:, :1]),
],
1,
)
] # (h*w, 8)
self.all_rays = flow.cat(self.all_rays, 0) # ((N_images-1)*h*w, 8)
self.all_rgbs = flow.cat(self.all_rgbs, 0) # ((N_images-1)*h*w, 3)
self.num_iter = 0
self.dH = int(self.img_wh[0] // 2 * 0.5)
self.dW = int(self.img_wh[1] // 2 * 0.5)
elif self.split == "val":
self.c2w_val = self.poses[val_idx]
self.image_path_val = self.image_paths[val_idx]
else: # for testing, create a parametric rendering path
if self.split.endswith("train"): # test on training set
self.poses_test = self.poses
elif not self.spheric_poses:
focus_depth = 3.5 # hardcoded, this is numerically close to the formula
# given in the original repo. Mathematically if near=1
# and far=infinity, then this number will converge to 4
radii = np.percentile(np.abs(self.poses[..., 3]), 90, axis=0)
self.poses_test = create_spiral_poses(radii, focus_depth)
else:
radius = 1.1 * self.bounds.min()
self.poses_test = create_spheric_poses(radius)
def __len__(self):
if self.split == "train":
return int(len(self.all_rays) / self.batchsize)
elif self.split == "vis":
return len(self.render_poses)
elif self.split == "val":
return self.val_num
elif self.split == "test":
return len(self.poses_test)
def __getitem__(self, idx):
if self.split == "train": # use data in the buffers
idx = idx % len(self.indexs)
if self.num_iter < 500:
coords = flow.stack(
flow.meshgrid(
flow.linspace(
self.img_wh[1] // 2 - self.dH,
self.img_wh[1] // 2 + self.dH - 1,
2 * self.dH,
),
flow.linspace(
self.img_wh[0] // 2 - self.dW,
self.img_wh[0] // 2 + self.dW - 1,
2 * self.dW,
),
),
-1,
)
else:
coords = flow.stack(
flow.meshgrid(
flow.linspace(0, self.img_wh[1] - 1, self.img_wh[1]),
flow.linspace(0, self.img_wh[0] - 1, self.img_wh[0]),
),
-1,
) # (H, W, 2)
coords = flow.reshape(coords, [-1, 2]) # (H * W, 2)
select_inds = np.random.choice(
coords.shape[0], size=[self.batchsize], replace=False
) # (N_rand,)
select_coords = coords[select_inds].long() # (N_rand, 2)
rays = self.all_rays[
self.indexs[idx] : self.indexs[idx] + self.img_wh[0] * self.img_wh[1]
]
rgbs = self.all_rgbs[
self.indexs[idx] : self.indexs[idx] + self.img_wh[0] * self.img_wh[1]
]
rays = rays.view(self.img_wh[1], self.img_wh[0], -1)
rgbs = rgbs.view(self.img_wh[1], self.img_wh[0], -1)
rays = rays[select_coords[:, 0], select_coords[:, 1]] # (N_rand, 3)
rgbs = rgbs[select_coords[:, 0], select_coords[:, 1]] # (N_rand, 3)
self.num_iter += 1
sample = OrderedDict(rays=rays, rgbs=rgbs) # a alignment point with nerf_pytorch
elif self.split in ["val", "test"]:
if self.split == "val":
c2w = flow.Tensor(self.c2w_val)
else:
c2w = flow.Tensor(self.poses_test[idx])
rays_o, rays_d = get_rays(self.directions, c2w)
if not self.spheric_poses:
near, far = 0, 1
rays_o, rays_d = get_ndc_rays(
self.img_wh[1], self.img_wh[0], self.focal, 1.0, rays_o, rays_d
)
else:
near = self.bounds.min()
far = min(8 * near, self.bounds.max())
rays = flow.cat(
[
rays_o,
rays_d,
near * flow.ones_like(rays_o[:, :1]),
far * flow.ones_like(rays_o[:, :1]),
],
1,
) # (h*w, 8)
sample = OrderedDict(rays=rays, c2w=c2w)
if self.split == "val":
img = Image.open(self.image_path_val).convert("RGB")
img = img.resize(self.img_wh, Image.LANCZOS)
img = self.transform(img) # (3, h, w)
img = img.view(3, -1).permute(1, 0) # (h*w, 3)
sample["rgbs"] = img
else:
c2w = self.render_poses[idx][:3, :4]
rays_o, rays_d = get_rays(self.directions, c2w)
if not self.spheric_poses:
near, far = 0, 1
rays_o, rays_d = get_ndc_rays(
self.img_wh[1], self.img_wh[0], self.focal, 1.0, rays_o, rays_d
)
else:
near = self.bounds.min()
far = min(8 * near, self.bounds.max())
rays = flow.concat(
[
rays_o,
rays_d,
near * flow.ones_like(rays_o[:, :1]),
far * flow.ones_like(rays_o[:, :1]),
],
1,
) # (H*W, 8)
sample = OrderedDict(rays=rays, c2w=c2w)
return trun_dict_to_instance(sample)
| 32,893 | 36.379545 | 99 | py |
libai | libai-main/projects/NeRF/modeling/NeRF.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
import oneflow.nn as nn
import oneflow.nn.functional as F
from libai.utils import distributed as dist
class Embedding(nn.Module):
def __init__(self, in_channels, N_freqs, logscale=True):
"""
Defines a function that embeds x to (x, sin(2^k x), cos(2^k x), ...)
in_channels: number of input channels (3 for both xyz and direction)
"""
super(Embedding, self).__init__()
self.N_freqs = N_freqs
self.in_channels = in_channels
self.funcs = [flow.sin, flow.cos]
self.out_channels = in_channels * (len(self.funcs) * N_freqs + 1)
if logscale:
freq_bands = 2 ** flow.linspace(0, N_freqs - 1, N_freqs)
else:
freq_bands = flow.linspace(1, 2 ** (N_freqs - 1), N_freqs).cuda()
self.register_buffer(
"freq_bands",
freq_bands.to_global(
placement=dist.get_layer_placement(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
),
persistent=False,
)
def forward(self, x):
"""
Embeds x to (x, sin(2^k x), cos(2^k x), ...)
Different from the paper, "x" is also in the output
See https://github.com/bmild/nerf/issues/12
Inputs:
x (Tensor): (B, self.in_channels)
Outputs:
out (Tensor): (B, self.out_channels)
"""
out = [x]
for freq in self.freq_bands:
for func in self.funcs:
m = func(freq * x)
out += [m]
return flow.cat(out, -1)
class NeRF(nn.Module): # a alignment point with nerf_pytorch
def __init__(
self, D=8, W=256, input_ch=63, input_ch_views=27, output_ch=5, skips=[4], use_viewdirs=True
):
"""
D: number of layers for density (sigma) encoder
W: number of hidden units in each layer
input_ch: number of input channels for xyz (3+3*10*2=63 by default)
input_ch_views: number of input channels for direction (3+3*4*2=27 by default)
output_ch: number of output channels
skips: add skip connection in the Dth layer
"""
super(NeRF, self).__init__()
self.D = D
self.W = W
self.input_ch = input_ch
self.input_ch_views = input_ch_views
self.skips = skips
self.use_viewdirs = use_viewdirs
self.pts_linears = nn.ModuleList(
[nn.Linear(input_ch, W)]
+ [
nn.Linear(W, W) if i not in self.skips else nn.Linear(W + input_ch, W)
for i in range(D - 1)
]
)
self.views_linears = nn.ModuleList([nn.Linear(input_ch_views + W, W // 2)])
if use_viewdirs:
self.feature_linear = nn.Linear(W, W)
self.alpha_linear = nn.Linear(W, 1)
self.rgb_linear = nn.Linear(W // 2, 3)
else:
self.output_linear = nn.Linear(W, output_ch)
def forward(self, x, sigma_only=False):
"""
Encodes input (xyz+dir) to rgb+sigma (not ready to render yet).
For rendering this ray, please see rendering.py
Inputs:
x (Tensor): (B, self.in_channels_xyz+self.in_channels_dir)
the embedded vector of position and direction
sigma_only (bool): whether to infer sigma only. If True,
x is of shape (B, self.in_channels_xyz)
Outputs:
if sigma_ony:
sigma (Tensor): (B, 1) sigma
else:
out (Tensor): (B, 4), rgb and sigma
"""
if not sigma_only:
input_pts, input_views = flow.split(x, [self.input_ch, self.input_ch_views], dim=-1)
else:
input_pts = x
h = input_pts
for i, l in enumerate(self.pts_linears):
h = self.pts_linears[i](h)
h = F.relu(h)
if i in self.skips:
h = flow.cat([input_pts, h], -1)
if self.use_viewdirs:
alpha = self.alpha_linear(h)
if sigma_only:
return alpha
feature = self.feature_linear(h)
h = flow.cat([feature, input_views], -1)
for i, l in enumerate(self.views_linears):
h = self.views_linears[i](h)
h = F.relu(h)
rgb = self.rgb_linear(h).sigmoid() # sigmoid
outputs = flow.cat([rgb, alpha], -1)
else:
outputs = self.output_linear(h)
return outputs
| 5,175 | 34.210884 | 99 | py |
libai | libai-main/projects/QQP/modeling/load_megatron_weight.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import oneflow as flow
import torch
import libai.utils.distributed as dist
from libai.utils.checkpoint import get_missing_parameters_message, get_unexpected_parameters_message
logger = logging.getLogger("libai." + __name__)
def convert_tensor(tensor: torch.Tensor):
tensor = tensor.float()
return flow.Tensor(tensor.cpu().numpy())
def change_megatron_key(state_dict):
of_state_dict = {}
# Language model.
language_model = state_dict["language_model"]
# Embedding.
embedding = language_model["embedding"]
of_state_dict["embeddings.vocab_embeddings.weight"] = convert_tensor(
embedding["word_embeddings"]["weight"]
)
of_state_dict["embeddings.position_embeddings.weight"] = convert_tensor(
embedding["position_embeddings"]["weight"]
)
of_state_dict["embeddings.tokentype_embeddings.weight"] = convert_tensor(
embedding["tokentype_embeddings"]["weight"]
)
# Encoder.
encoder = language_model["encoder"]
for key, value in encoder.items():
# Change layers.0.input_layernorm.weight -> encoder.layers_0.input_layernorm.weight
key = "encoders." + key.replace("layers.", "")
if key.startswith("encoders.final_layernorm"):
key = key.replace("encoders.", "")
of_state_dict[key] = convert_tensor(value)
# Pooler.
pooler = language_model["pooler"]
of_state_dict["pooler.dense.weight"] = convert_tensor(pooler["dense.weight"])
of_state_dict["pooler.dense.bias"] = convert_tensor(pooler["dense.bias"])
# LM head.
lm_head = state_dict["lm_head"]
of_state_dict["cls.predictions.dense.weight"] = convert_tensor(lm_head["dense.weight"])
of_state_dict["cls.predictions.dense.bias"] = convert_tensor(lm_head["dense.bias"])
of_state_dict["cls.predictions.layernorm.weight"] = convert_tensor(lm_head["layernorm.weight"])
of_state_dict["cls.predictions.layernorm.bias"] = convert_tensor(lm_head["layernorm.bias"])
of_state_dict["lm_logits.bias"] = convert_tensor(lm_head["bias"])
# Binary head.
binary_head = state_dict["binary_head"]
of_state_dict["cls.seq_relationship.weight"] = convert_tensor(binary_head["weight"])
of_state_dict["cls.seq_relationship.bias"] = convert_tensor((binary_head["bias"]))
return of_state_dict
def load_tensor(tensor_lhs, tensor_rhs):
tensor_rhs = flow.to_global(
tensor_rhs,
placement=tensor_lhs.placement,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
)
tensor_rhs = tensor_rhs.to_global(sbp=tensor_lhs.sbp)
tensor_lhs.copy_(tensor_rhs)
def load_model(model: flow.nn.Module, state_dict):
model_state_dict = model.state_dict()
# Decide shape
incorrect_shapes = []
for k in list(state_dict.keys()):
if k in model_state_dict:
shape_model = tuple(model_state_dict[k].shape)
shape_ckpt = tuple(state_dict[k].shape)
if shape_model != shape_ckpt:
incorrect_shapes.append((k, shape_ckpt, shape_model))
state_dict.pop(k)
unexpected_keys = []
for key, value in state_dict.items():
if key not in model_state_dict:
unexpected_keys.append(key)
continue
model_state_dict.pop(key)
load_tensor(model.state_dict()[key], value)
missing_keys = list(model_state_dict.keys())
for k, shape_checkpoint, shape_model in incorrect_shapes:
logger.warning(
"Skip loading parameter '{}' to the model due to incompatible "
"shapes: {} in the checkpoint but {} in the "
"model! You might want to double check if this is expected.".format(
k, shape_checkpoint, shape_model
)
)
if missing_keys:
logger.info(get_missing_parameters_message(missing_keys))
if unexpected_keys:
logger.info(get_unexpected_parameters_message(unexpected_keys))
def load_megatron_bert(model: flow.nn.Module, model_weight_path: str):
import torch
megatron_state_dict = torch.load(model_weight_path, map_location="cpu")["model"]
of_state_dict = change_megatron_key(megatron_state_dict)
load_model(model, of_state_dict)
| 4,859 | 35 | 100 | py |
libai | libai-main/projects/DALLE2/dalle2/dalle2_loader.py | import logging
import oneflow as flow
from oneflow.framework.check_point_v2 import _broadcast_py_object
import libai.utils.distributed as dist
from libai.models.build import build_model
from libai.models.utils.model_loader.base_loader import (
ModelLoaderHuggerFace,
_load_state_dict_into_model,
)
logger = logging.getLogger("libai.dalle2." + __name__)
class Dalle2ModelLoader(ModelLoaderHuggerFace):
def __init__(self, model, libai_cfg, pretrained_model_path, **kwargs):
super().__init__(model, libai_cfg, pretrained_model_path, **kwargs)
self.base_model_prefix_1 = ""
self.base_model_prefix_2 = ""
def _convert_state_dict(self, state_dict, module="prior"):
old_keys = []
new_keys = []
if module == "prior":
for k in state_dict.keys():
if "clip." in k:
continue
old_keys.append(k)
if k.endswith(".g"):
k = k[:-1] + "weight"
elif k.startswith("net.causal_transformer"):
if k.endswith("gamma"):
k = k[:-5] + "weight"
elif k.endswith("beta"):
k = k[:-4] + "bias"
new_keys.append("prior." + k)
elif module == "decoder":
for k in state_dict.keys():
if "clip." in k:
continue
old_keys.append(k)
if k.endswith(".g"):
k = k[:-1] + "weight"
elif "cross_attn" in k:
if k.endswith("gamma"):
k = k[:-5] + "weight"
elif k.endswith("beta"):
k = k[:-4] + "bias"
new_keys.append("decoder." + k)
ret_state_dict = {}
for old_key, new_key in zip(old_keys, new_keys):
ret_state_dict[new_key] = state_dict.pop(old_key)
return ret_state_dict
def load(self):
if dist.is_main_process():
# prior
logger.info("loading torch model prior...")
torch_state_dict = self._load_torch_state_dict(self.libai_cfg.model.prior_weight_path)[
"ema_model"
]
logger.info("converting torch model prior into oneflow model...")
flow_state_dict = self._convert_tensors(torch_state_dict)
prior_state_dict = self._convert_state_dict(flow_state_dict)
# decoder
logger.info("loading torch model decoder...")
torch_state_dict = self._load_torch_state_dict(self.libai_cfg.model.decoder_weight_path)
flow_state_dict = self._convert_tensors(torch_state_dict)
logger.info("converting torch model decoder into oneflow model...")
decoder_state_dict = self._convert_state_dict(flow_state_dict, module="decoder")
flow_state_dict = {**prior_state_dict, **decoder_state_dict}
else:
flow_state_dict = None
logger.info("building LiBai model...")
self.libai_cfg = _broadcast_py_object(self.libai_cfg, src=0)
self.model = build_model(self.model)
self.model._apply(dist.convert_to_distributed_default_setting)
self.model = self.model.eval()
flow.cuda.empty_cache()
# State_dict to global
logger.info("transfering state_dict local to global...")
flow_state_dict = self._state_dict_to_global(flow_state_dict, mode="pytorch") # oom
# Load
# (
# model,
# missing_keys,
# unexpected_keys,
# mismatched_keys,
# error_msgs,
# ) = self._load_pretrained_model(self.model, flow_state_dict, self.pretrained_model_path)
logger.info("loading model weights into LiBai...")
_load_state_dict_into_model(self.model, flow_state_dict, "")
return self.model
| 3,927 | 39.494845 | 100 | py |
libai | libai-main/projects/DALLE2/dalle2/vector_quantize_flow.py | # from https://github.com/lucidrains/vector_quantize_pytorch/vector_quantize_pytorch.py
import oneflow as flow
import oneflow.nn.functional as F
from einops import rearrange, repeat
from oneflow import einsum, nn
from libai.utils import distributed
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def noop(*args, **kwargs):
pass
def l2norm(t):
return F.normalize(t, p=2, dim=-1)
def log(t, eps=1e-20):
return flow.log(t.clamp(min=eps))
def uniform_init(*shape):
t = flow.empty(shape)
nn.init.kaiming_uniform_(t)
return t
def gumbel_noise(t):
noise = flow.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature=1.0, dim=-1):
if temperature == 0:
return t.argmax(dim=dim)
return ((t / temperature) + gumbel_noise(t)).argmax(dim=dim)
def ema_inplace(moving_avg, new, decay):
moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay))
def laplace_smoothing(x, n_categories, eps=1e-5):
return (x + eps) / (x.sum() + n_categories * eps)
def sample_vectors(samples, num):
num_samples, device = samples.shape[0], samples.device
if num_samples >= num:
indices = flow.randperm(num_samples, device=device)[:num]
else:
indices = flow.randint(0, num_samples, (num,), device=device)
return samples[indices]
def batched_sample_vectors(samples, num):
return flow.stack([sample_vectors(sample, num) for sample in samples.unbind(dim=0)], dim=0)
def pad_shape(shape, size, dim=0):
return [size if i == dim else s for i, s in enumerate(shape)]
def sample_multinomial(total_count, probs):
device = probs.device
probs = probs.cpu()
total_count = probs.new_full((), total_count)
remainder = probs.new_ones(())
sample = flow.empty_like(probs, dtype=flow.long)
for i, p in enumerate(probs):
s = flow.binomial(total_count, p / remainder)
sample[i] = s
total_count -= s
remainder -= p
return sample.to(device)
def all_gather_sizes(x, dim):
size = flow.tensor(x.shape[dim], dtype=flow.long, device=x.device)
all_sizes = [flow.empty_like(size) for _ in range(distributed.get_world_size())]
distributed.all_gather(all_sizes, size)
return flow.stack(all_sizes)
def all_gather_variably_sized(x, sizes, dim=0):
rank = distributed.get_rank()
all_x = []
for i, size in enumerate(sizes):
t = x if i == rank else x.new_empty(pad_shape(x.shape, size, dim))
distributed.broadcast(t, src=i, async_op=True)
all_x.append(t)
distributed.barrier()
return all_x
def sample_vectors_distributed(local_samples, num):
rank = distributed.get_rank()
all_num_samples = all_gather_sizes(local_samples, dim=0)
if rank == 0:
samples_per_rank = sample_multinomial(num, all_num_samples / all_num_samples.sum())
else:
samples_per_rank = flow.empty_like(all_num_samples)
distributed.broadcast(samples_per_rank, src=0)
samples_per_rank = samples_per_rank.tolist()
local_samples = batched_sample_vectors(local_samples, samples_per_rank[rank])
all_samples = all_gather_variably_sized(local_samples, samples_per_rank, dim=0)
return flow.cat(all_samples, dim=0)
def batched_bincount(x, *, minlength):
batch, dtype, device = x.shape[0], x.dtype, x.device
target = flow.zeros(batch, minlength, dtype=dtype, device=device)
values = flow.ones_like(x)
target.scatter_add_(-1, x, values)
return target
def kmeans(
samples,
num_clusters,
num_iters=10,
use_cosine_sim=False,
sample_fn=batched_sample_vectors,
all_reduce_fn=noop,
):
num_codebooks, dim, dtype, _ = (
samples.shape[0],
samples.shape[-1],
samples.dtype,
samples.device,
)
means = sample_fn(samples, num_clusters)
for _ in range(num_iters):
if use_cosine_sim:
dists = samples @ rearrange(means, "h n d -> h d n")
else:
dists = -flow.cdist(samples, means, p=2)
buckets = flow.argmax(dists, dim=-1)
bins = batched_bincount(buckets, minlength=num_clusters)
all_reduce_fn(bins)
zero_mask = bins == 0
bins_min_clamped = bins.masked_fill(zero_mask, 1)
new_means = buckets.new_zeros(num_codebooks, num_clusters, dim, dtype=dtype)
new_means.scatter_add_(1, repeat(buckets, "h n -> h n d", d=dim), samples)
new_means = new_means / rearrange(bins_min_clamped, "... -> ... 1")
all_reduce_fn(new_means)
if use_cosine_sim:
new_means = l2norm(new_means)
means = flow.where(rearrange(zero_mask, "... -> ... 1"), means, new_means)
return means, bins
def batched_embedding(indices, embeds):
batch, dim = indices.shape[1], embeds.shape[-1]
indices = repeat(indices, "h b n -> h b n d", d=dim)
embeds = repeat(embeds, "h c d -> h b c d", b=batch)
return embeds.gather(2, indices)
# regularization losses
def orthgonal_loss_fn(t):
# eq (2) from https://arxiv.org/abs/2112.00384
h, n = t.shape[:2]
normed_codes = l2norm(t)
identity = repeat(flow.eye(n, device=t.device), "i j -> h i j", h=h)
cosine_sim = einsum("h i d, h j d -> h i j", normed_codes, normed_codes)
return ((cosine_sim - identity) ** 2).sum() / (h * n ** 2)
# distance types
class EuclideanCodebook(nn.Module):
def __init__(
self,
dim,
codebook_size,
num_codebooks=1,
kmeans_init=False,
kmeans_iters=10,
decay=0.8,
eps=1e-5,
threshold_ema_dead_code=2,
use_ddp=False,
learnable_codebook=False,
sample_codebook_temp=0,
):
super().__init__()
self.decay = decay
init_fn = uniform_init if not kmeans_init else flow.zeros
embed = init_fn(num_codebooks, codebook_size, dim)
self.codebook_size = codebook_size
self.num_codebooks = num_codebooks
self.kmeans_iters = kmeans_iters
self.eps = eps
self.threshold_ema_dead_code = threshold_ema_dead_code
self.sample_codebook_temp = sample_codebook_temp
self.sample_fn = sample_vectors_distributed if use_ddp else batched_sample_vectors
self.all_reduce_fn = distributed.all_reduce if use_ddp else noop
self.register_buffer("initted", flow.Tensor([not kmeans_init]))
self.register_buffer("cluster_size", flow.zeros(num_codebooks, codebook_size))
self.register_buffer("embed_avg", embed.clone())
self.learnable_codebook = learnable_codebook
if learnable_codebook:
self.embed = nn.Parameter(embed)
else:
self.register_buffer("embed", embed)
def init_embed_(self, data):
if self.initted:
return
embed, cluster_size = kmeans(
data,
self.codebook_size,
self.kmeans_iters,
sample_fn=self.sample_fn,
all_reduce_fn=self.all_reduce_fn,
)
self.embed.data.copy_(embed)
self.embed_avg.data.copy_(embed.clone())
self.cluster_size.data.copy_(cluster_size)
self.initted.data.copy_(flow.Tensor([True]))
def replace(self, batch_samples, batch_mask):
batch_samples = l2norm(batch_samples)
for ind, (samples, mask) in enumerate(
zip(batch_samples.unbind(dim=0), batch_mask.unbind(dim=0))
):
if not flow.any(mask):
continue
sampled = self.sample_fn(rearrange(samples, "... -> 1 ..."), mask.sum().item())
self.embed.data[ind][mask] = rearrange(sampled, "1 ... -> ...")
def expire_codes_(self, batch_samples):
if self.threshold_ema_dead_code == 0:
return
expired_codes = self.cluster_size < self.threshold_ema_dead_code
if not flow.any(expired_codes):
return
batch_samples = rearrange(batch_samples, "h ... d -> h (...) d")
self.replace(batch_samples, batch_mask=expired_codes)
def forward(self, x):
needs_codebook_dim = x.ndim < 4
x = x.float()
if needs_codebook_dim:
x = rearrange(x, "... -> 1 ...")
shape, dtype = x.shape, x.dtype
flatten = rearrange(x, "h ... d -> h (...) d")
self.init_embed_(flatten)
embed = self.embed if not self.learnable_codebook else self.embed.detach()
dist = -flow.cdist(flatten, embed, p=2)
embed_ind = gumbel_sample(dist, dim=-1, temperature=self.sample_codebook_temp)
embed_onehot = F.one_hot(embed_ind, self.codebook_size).type(dtype)
embed_ind = embed_ind.view(*shape[:-1])
quantize = batched_embedding(embed_ind, self.embed)
if self.training:
cluster_size = embed_onehot.sum(dim=1)
self.all_reduce_fn(cluster_size)
ema_inplace(self.cluster_size, cluster_size, self.decay)
embed_sum = einsum("h n d, h n c -> h c d", flatten, embed_onehot)
self.all_reduce_fn(embed_sum)
cluster_size = (
laplace_smoothing(self.cluster_size, self.codebook_size, self.eps)
* self.cluster_size.sum()
)
embed_normalized = self.embed_avg / rearrange(cluster_size, "... -> ... 1")
self.embed.data.copy_(embed_normalized)
self.expire_codes_(x)
if needs_codebook_dim:
quantize, embed_ind = map(lambda t: rearrange(t, "1 ... -> ..."), (quantize, embed_ind))
return quantize, embed_ind
class CosineSimCodebook(nn.Module):
def __init__(
self,
dim,
codebook_size,
num_codebooks=1,
kmeans_init=False,
kmeans_iters=10,
decay=0.8,
eps=1e-5,
threshold_ema_dead_code=2,
use_ddp=False,
learnable_codebook=False,
sample_codebook_temp=0.0,
):
super().__init__()
self.decay = decay
if not kmeans_init:
embed = l2norm(uniform_init(num_codebooks, codebook_size, dim))
else:
embed = flow.zeros(num_codebooks, codebook_size, dim)
self.codebook_size = codebook_size
self.num_codebooks = num_codebooks
self.kmeans_iters = kmeans_iters
self.eps = eps
self.threshold_ema_dead_code = threshold_ema_dead_code
self.sample_codebook_temp = sample_codebook_temp
self.sample_fn = sample_vectors_distributed if use_ddp else batched_sample_vectors
self.all_reduce_fn = distributed.all_reduce if use_ddp else noop
self.register_buffer("initted", flow.Tensor([not kmeans_init]))
self.register_buffer("cluster_size", flow.zeros(num_codebooks, codebook_size))
self.learnable_codebook = learnable_codebook
if learnable_codebook:
self.embed = nn.Parameter(embed)
else:
self.register_buffer("embed", embed)
def init_embed_(self, data):
if self.initted:
return
embed, cluster_size = kmeans(
data,
self.codebook_size,
self.kmeans_iters,
use_cosine_sim=True,
sample_fn=self.sample_fn,
all_reduce_fn=self.all_reduce_fn,
)
self.embed.data.copy_(embed)
self.cluster_size.data.copy_(cluster_size)
self.initted.data.copy_(flow.Tensor([True]))
def replace(self, batch_samples, batch_mask):
batch_samples = l2norm(batch_samples)
for ind, (samples, mask) in enumerate(
zip(batch_samples.unbind(dim=0), batch_mask.unbind(dim=0))
):
if not flow.any(mask):
continue
sampled = self.sample_fn(rearrange(samples, "... -> 1 ..."), mask.sum().item())
self.embed.data[ind][mask] = rearrange(sampled, "1 ... -> ...")
def expire_codes_(self, batch_samples):
if self.threshold_ema_dead_code == 0:
return
expired_codes = self.cluster_size < self.threshold_ema_dead_code
if not flow.any(expired_codes):
return
batch_samples = rearrange(batch_samples, "h ... d -> h (...) d")
self.replace(batch_samples, batch_mask=expired_codes)
def forward(self, x):
needs_codebook_dim = x.ndim < 4
x = x.float()
if needs_codebook_dim:
x = rearrange(x, "... -> 1 ...")
shape, dtype = x.shape, x.dtype
flatten = rearrange(x, "h ... d -> h (...) d")
flatten = l2norm(flatten)
self.init_embed_(flatten)
embed = self.embed if not self.learnable_codebook else self.embed.detach()
embed = l2norm(embed)
dist = einsum("h n d, h c d -> h n c", flatten, embed)
embed_ind = gumbel_sample(dist, dim=-1, temperature=self.sample_codebook_temp)
embed_onehot = F.one_hot(embed_ind, self.codebook_size).type(dtype)
embed_ind = embed_ind.view(*shape[:-1])
quantize = batched_embedding(embed_ind, self.embed)
if self.training:
bins = embed_onehot.sum(dim=1)
self.all_reduce_fn(bins)
ema_inplace(self.cluster_size, bins, self.decay)
zero_mask = bins == 0
bins = bins.masked_fill(zero_mask, 1.0)
embed_sum = einsum("h n d, h n c -> h c d", flatten, embed_onehot)
self.all_reduce_fn(embed_sum)
embed_normalized = embed_sum / rearrange(bins, "... -> ... 1")
embed_normalized = l2norm(embed_normalized)
embed_normalized = flow.where(
rearrange(zero_mask, "... -> ... 1"), embed, embed_normalized
)
ema_inplace(self.embed, embed_normalized, self.decay)
self.expire_codes_(x)
if needs_codebook_dim:
quantize, embed_ind = map(lambda t: rearrange(t, "1 ... -> ..."), (quantize, embed_ind))
return quantize, embed_ind
# main class
class VectorQuantize(nn.Module):
def __init__(
self,
dim,
codebook_size,
codebook_dim=None,
heads=1,
separate_codebook_per_head=False,
decay=0.8,
eps=1e-5,
kmeans_init=False,
kmeans_iters=10,
use_cosine_sim=False,
threshold_ema_dead_code=0,
channel_last=True,
accept_image_fmap=False,
commitment_weight=1.0,
orthogonal_reg_weight=0.0,
orthogonal_reg_active_codes_only=False,
orthogonal_reg_max_codes=None,
sample_codebook_temp=0.0,
sync_codebook=False,
):
super().__init__()
self.heads = heads
self.separate_codebook_per_head = separate_codebook_per_head
codebook_dim = default(codebook_dim, dim)
codebook_input_dim = codebook_dim * heads
requires_projection = codebook_input_dim != dim
self.project_in = (
nn.Linear(dim, codebook_input_dim) if requires_projection else nn.Identity()
)
self.project_out = (
nn.Linear(codebook_input_dim, dim) if requires_projection else nn.Identity()
)
self.eps = eps
self.commitment_weight = commitment_weight
has_codebook_orthogonal_loss = orthogonal_reg_weight > 0
self.orthogonal_reg_weight = orthogonal_reg_weight
self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only
self.orthogonal_reg_max_codes = orthogonal_reg_max_codes
codebook_class = EuclideanCodebook if not use_cosine_sim else CosineSimCodebook
self._codebook = codebook_class(
dim=codebook_dim,
num_codebooks=heads if separate_codebook_per_head else 1,
codebook_size=codebook_size,
kmeans_init=kmeans_init,
kmeans_iters=kmeans_iters,
decay=decay,
eps=eps,
threshold_ema_dead_code=threshold_ema_dead_code,
use_ddp=sync_codebook,
learnable_codebook=has_codebook_orthogonal_loss,
sample_codebook_temp=sample_codebook_temp,
)
self.codebook_size = codebook_size
self.accept_image_fmap = accept_image_fmap
self.channel_last = channel_last
@property
def codebook(self):
return self._codebook.embed
def forward(self, x):
_, device, heads, is_multiheaded, _ = (
x.shape,
x.device,
self.heads,
self.heads > 1,
self.codebook_size,
)
need_transpose = not self.channel_last and not self.accept_image_fmap
if self.accept_image_fmap:
height, width = x.shape[-2:]
x = rearrange(x, "b c h w -> b (h w) c")
if need_transpose:
x = rearrange(x, "b d n -> b n d")
x = self.project_in(x)
if is_multiheaded:
ein_rhs_eq = "h b n d" if self.separate_codebook_per_head else "1 (b h) n d"
x = rearrange(x, f"b n (h d) -> {ein_rhs_eq}", h=heads)
quantize, embed_ind = self._codebook(x)
if self.training:
quantize = x + (quantize - x).detach()
loss = flow.tensor([0.0], device=device, requires_grad=self.training)
if self.training:
if self.commitment_weight > 0:
commit_loss = F.mse_loss(quantize.detach(), x)
loss = loss + commit_loss * self.commitment_weight
if self.orthogonal_reg_weight > 0:
codebook = self.codebook
if self.orthogonal_reg_active_codes_only:
# only calculate orthogonal loss for the activated codes for this batch
unique_code_ids = flow.unique(embed_ind)
codebook = codebook[unique_code_ids]
num_codes = codebook.shape[0]
if (
exists(self.orthogonal_reg_max_codes)
and num_codes > self.orthogonal_reg_max_codes
):
rand_ids = flow.randperm(num_codes, device=device)[
: self.orthogonal_reg_max_codes
]
codebook = codebook[rand_ids]
orthogonal_reg_loss = orthgonal_loss_fn(codebook)
loss = loss + orthogonal_reg_loss * self.orthogonal_reg_weight
if is_multiheaded:
if self.separate_codebook_per_head:
quantize = rearrange(quantize, "h b n d -> b n (h d)", h=heads)
embed_ind = rearrange(embed_ind, "h b n -> b n h", h=heads)
else:
quantize = rearrange(quantize, "1 (b h) n d -> b n (h d)", h=heads)
embed_ind = rearrange(embed_ind, "1 (b h) n -> b n h", h=heads)
quantize = self.project_out(quantize)
if need_transpose:
quantize = rearrange(quantize, "b n d -> b d n")
if self.accept_image_fmap:
quantize = rearrange(quantize, "b (h w) c -> b c h w", h=height, w=width)
embed_ind = rearrange(embed_ind, "b (h w) ... -> b h w ...", h=height, w=width)
return quantize, embed_ind, loss
| 19,209 | 30.033926 | 100 | py |
libai | libai-main/projects/DALLE2/swinir/utils.py | # -----------------------------------------------------------------------------------
# from
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
# -----------------------------------------------------------------------------------
import collections.abc
import math
import warnings
from itertools import repeat
import oneflow as flow
import oneflow.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from Pytorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
with flow.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
# type: (flow.Tensor, float, float, float, float) -> flow.Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
NOTE: this impl is similar to the Pytorch trunc_normal_, the bounds [a, b] are
applied while sampling the normal with mean/std applied, therefore a, b args
should be adjusted to match the range of mean, std args.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
# -----------------------------------------------------------------------------------
# from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/helpers.py
# -----------------------------------------------------------------------------------
# From Pytorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
return x
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
# -----------------------------------------------------------------------------------
# from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
# -----------------------------------------------------------------------------------
def drop_path(x, drop_prob: float = 0.0, training: bool = False, scale_by_keep: bool = True):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a
separate paper...
See discussion: https://github.com/tensortorch/tpu/issues/494#issuecomment-532968956 ...
I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect
as a layer name and use 'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0 and scale_by_keep:
random_tensor.div_(keep_prob)
return x * random_tensor
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: float = 0.0, scale_by_keep: bool = True):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep
def forward(self, x):
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
def extra_repr(self):
return f"drop_prob={round(self.drop_prob,3):0.3f}"
| 5,374 | 39.413534 | 99 | py |
libai | libai-main/projects/DALLE2/swinir/models.py | # -----------------------------------------------------------------------------------
# SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257
# Originally Written by Ze Liu, Modified by Jingyun Liang.
# -----------------------------------------------------------------------------------
# code from https://github.com/JingyunLiang/SwinIR/blob/main/models/network_swinir.py
import math
import oneflow as flow
import oneflow.nn as nn
import oneflow.nn.functional as F
from oneflow.utils import checkpoint
from .utils import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(
self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r"""Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional):
If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(
self,
dim,
window_size,
num_heads,
qkv_bias=True,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
flow.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)
) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = flow.arange(self.window_size[0])
coords_w = flow.arange(self.window_size[1])
coords = flow.stack(flow.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = flow.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=0.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B_, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = q @ k.transpose(-2, -1)
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)
].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1
).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}"
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlock(nn.Module):
r"""Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(
self,
dim,
input_resolution,
num_heads,
window_size=7,
shift_size=0,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim,
window_size=to_2tuple(self.window_size),
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop
)
if self.shift_size > 0:
attn_mask = self.calculate_mask(self.input_resolution)
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def calculate_mask(self, x_size):
# calculate attention mask for SW-MSA
H, W = x_size
img_mask = flow.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
w_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(
img_mask, self.window_size
) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(
attn_mask == 0, float(0.0)
)
return attn_mask
def forward(self, x, x_size):
H, W = x_size
B, L, C = x.shape
# assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = flow.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(
shifted_x, self.window_size
) # nW*B, window_size, window_size, C
x_windows = x_windows.view(
-1, self.window_size * self.window_size, C
) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA (to be compatible for testing on images
# whose shapes are the multiple of window size
if self.input_resolution == x_size:
attn_windows = self.attn(
x_windows, mask=self.attn_mask
) # nW*B, window_size*window_size, C
else:
attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = flow.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return (
f"dim={self.dim}, input_resolution={self.input_resolution}, "
f"num_heads={self.num_heads}, window_size={self.window_size},"
f"shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
)
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r"""Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = flow.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
"""A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional):
Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(
self,
dim,
input_resolution,
depth,
num_heads,
window_size,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
norm_layer=nn.LayerNorm,
downsample=None,
use_checkpoint=False,
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList(
[
SwinTransformerBlock(
dim=dim,
input_resolution=input_resolution,
num_heads=num_heads,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
)
for i in range(depth)
]
)
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x, x_size):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, x_size)
else:
x = blk(x, x_size)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class RSTB(nn.Module):
"""Residual Swin Transformer Block (RSTB).
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional):
If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional):
Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional):
Downsample layer at the end of the layer. Default: None
use_checkpoint (bool):
Whether to use checkpointing to save memory. Default: False.
img_size: Input image size.
patch_size: Patch size.
resi_connection: The convolutional block before residual connection.
"""
def __init__(
self,
dim,
input_resolution,
depth,
num_heads,
window_size,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
norm_layer=nn.LayerNorm,
downsample=None,
use_checkpoint=False,
img_size=224,
patch_size=4,
resi_connection="1conv",
):
super(RSTB, self).__init__()
self.dim = dim
self.input_resolution = input_resolution
self.residual_group = BasicLayer(
dim=dim,
input_resolution=input_resolution,
depth=depth,
num_heads=num_heads,
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path,
norm_layer=norm_layer,
downsample=downsample,
use_checkpoint=use_checkpoint,
)
if resi_connection == "1conv":
self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
elif resi_connection == "3conv":
# to save parameters and memory
self.conv = nn.Sequential(
nn.Conv2d(dim, dim // 4, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(dim // 4, dim, 3, 1, 1),
)
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, norm_layer=None
)
self.patch_unembed = PatchUnEmbed(
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, norm_layer=None
)
def forward(self, x, x_size):
return (
self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size)))
+ x
)
def flops(self):
flops = 0
flops += self.residual_group.flops()
H, W = self.input_resolution
flops += H * W * self.dim * self.dim * 9
flops += self.patch_embed.flops()
flops += self.patch_unembed.flops()
return flops
class PatchEmbed(nn.Module):
r"""Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
flops = 0
H, W = self.img_size
if self.norm is not None:
flops += H * W * self.embed_dim
return flops
class PatchUnEmbed(nn.Module):
r"""Image to Patch Unembedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
def forward(self, x, x_size):
B, HW, C = x.shape
x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
return x
def flops(self):
flops = 0
return flops
class Upsample(nn.Sequential):
"""Upsample module.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
def __init__(self, scale, num_feat):
m = []
if (scale & (scale - 1)) == 0: # scale = 2^n
for _ in range(int(math.log(scale, 2))):
m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(2))
elif scale == 3:
m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(3))
else:
raise ValueError(f"scale {scale} is not supported. " "Supported scales: 2^n and 3.")
super(Upsample, self).__init__(*m)
class UpsampleOneStep(nn.Sequential):
"""UpsampleOneStep module
(the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
Used in lightweight SR to save parameters.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
self.num_feat = num_feat
self.input_resolution = input_resolution
m = []
m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1))
m.append(nn.PixelShuffle(scale))
super(UpsampleOneStep, self).__init__(*m)
def flops(self):
H, W = self.input_resolution
flops = H * W * self.num_feat * 3 * 9
return flops
class SwinIR(nn.Module):
r"""SwinIR
A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`,
based on Swin Transformer.
Args:
img_size (int | tuple(int)): Input image size. Default 64
patch_size (int | tuple(int)): Patch size. Default: 1
in_chans (int): Number of input image channels. Default: 3
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float):
Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool):
If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool):
Whether to use checkpointing to save memory. Default: False
upscale: Upscale factor.
2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
img_range: Image range. 1. or 255.
upsampler: The reconstruction reconstruction module.
'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
"""
def __init__(
self,
img_size=64,
patch_size=1,
in_chans=3,
embed_dim=96,
depths=[6, 6, 6, 6],
num_heads=[6, 6, 6, 6],
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
ape=False,
patch_norm=True,
use_checkpoint=False,
upscale=2,
img_range=1.0,
upsampler="",
resi_connection="1conv",
**kwargs,
):
super(SwinIR, self).__init__()
num_in_ch = in_chans
num_out_ch = in_chans
num_feat = 64
self.img_range = img_range
if in_chans == 3:
rgb_mean = (0.4488, 0.4371, 0.4040)
self.mean = flow.Tensor(rgb_mean).view(1, 3, 1, 1)
else:
self.mean = flow.zeros(1, 1, 1, 1)
self.upscale = upscale
self.upsampler = upsampler
self.window_size = window_size
# 1, shallow feature extraction
self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
# 2, deep feature extraction
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = embed_dim
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=embed_dim,
embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None,
)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# merge non-overlapping patches into image
self.patch_unembed = PatchUnEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=embed_dim,
embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None,
)
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(flow.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=0.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [
x.item() for x in flow.linspace(0, drop_path_rate, sum(depths))
] # stochastic depth decay rule
# build Residual Swin Transformer blocks (RSTB)
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = RSTB(
dim=embed_dim,
input_resolution=(patches_resolution[0], patches_resolution[1]),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[
sum(depths[:i_layer]) : sum(depths[: i_layer + 1])
], # no impact on SR results
norm_layer=norm_layer,
downsample=None,
use_checkpoint=use_checkpoint,
img_size=img_size,
patch_size=patch_size,
resi_connection=resi_connection,
)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
# build the last conv layer in deep feature extraction
if resi_connection == "1conv":
self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
elif resi_connection == "3conv":
# to save parameters and memory
self.conv_after_body = nn.Sequential(
nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1),
)
# 3, high quality image reconstruction
if self.upsampler == "pixelshuffle":
# for classical SR
self.conv_before_upsample = nn.Sequential(
nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)
)
self.upsample = Upsample(upscale, num_feat)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
elif self.upsampler == "pixelshuffledirect":
# for lightweight SR (to save parameters)
self.upsample = UpsampleOneStep(
upscale, embed_dim, num_out_ch, (patches_resolution[0], patches_resolution[1])
)
elif self.upsampler == "nearest+conv":
# for real-world SR (less artifacts)
self.conv_before_upsample = nn.Sequential(
nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)
)
self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
if self.upscale == 4:
self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
# for image denoising and JPEG compression artifact reduction
self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {"absolute_pos_embed"}
def no_weight_decay_keywords(self):
return {"relative_position_bias_table"}
def check_image_size(self, x):
_, _, h, w = x.size()
mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), "reflect")
return x
def forward_features(self, x):
x_size = (x.shape[2], x.shape[3])
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x, x_size)
x = self.norm(x) # B L C
x = self.patch_unembed(x, x_size)
return x
def forward(self, x):
H, W = x.shape[2:]
x = self.check_image_size(x)
self.mean = self.mean.type_as(x).to(x.device)
x = (x - self.mean) * self.img_range
if self.upsampler == "pixelshuffle":
# for classical SR
x = self.conv_first(x)
x = self.conv_after_body(self.forward_features(x)) + x
x = self.conv_before_upsample(x)
x = self.conv_last(self.upsample(x))
elif self.upsampler == "pixelshuffledirect":
# for lightweight SR
x = self.conv_first(x)
x = self.conv_after_body(self.forward_features(x)) + x
x = self.upsample(x)
elif self.upsampler == "nearest+conv":
# for real-world SR
x = self.conv_first(x)
x = self.conv_after_body(self.forward_features(x)) + x
x = self.conv_before_upsample(x)
x = self.lrelu(
self.conv_up1(flow.nn.functional.interpolate(x, scale_factor=2, mode="nearest"))
)
if self.upscale == 4:
x = self.lrelu(
self.conv_up2(flow.nn.functional.interpolate(x, scale_factor=2, mode="nearest"))
)
x = self.conv_last(self.lrelu(self.conv_hr(x)))
else:
# for image denoising and JPEG compression artifact reduction
x_first = self.conv_first(x)
res = self.conv_after_body(self.forward_features(x_first)) + x_first
x = x + self.conv_last(res)
x = x / self.img_range + self.mean
return x[:, :, : H * self.upscale, : W * self.upscale]
def flops(self):
flops = 0
H, W = self.patches_resolution
flops += H * W * 3 * self.embed_dim * 9
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += H * W * 3 * self.embed_dim * self.embed_dim
flops += self.upsample.flops()
return flops
if __name__ == "__main__":
upscale = 4
window_size = 8
height = (1024 // upscale // window_size + 1) * window_size
width = (720 // upscale // window_size + 1) * window_size
model = SwinIR(
upscale=2,
img_size=(height, width),
window_size=window_size,
img_range=1.0,
depths=[6, 6, 6, 6],
embed_dim=60,
num_heads=[6, 6, 6, 6],
mlp_ratio=2,
upsampler="pixelshuffledirect",
)
print(model)
print(height, width, model.flops() / 1e9)
x = flow.randn((1, 3, height, width))
x = model(x)
print(x.shape)
| 37,112 | 34.823359 | 100 | py |
libai | libai-main/projects/DALLE2/swinir/upsample.py | import os
import oneflow as flow
import requests
from .models import SwinIR as net
def load_torch_weight(model, model_path):
# load torch weight
import torch
param_key_g = "params_ema"
pretrained_model = torch.load(model_path, map_location="cpu")
pretrained_model = (
pretrained_model[param_key_g]
if param_key_g in pretrained_model.keys()
else pretrained_model
)
new_state_dict = {}
for k, v in pretrained_model.items():
flow_tensor = flow.tensor(v.numpy())
new_state_dict[k] = flow_tensor
model.load_state_dict(new_state_dict, strict=True)
return model
def load_model(model_path=None):
# set up model
if os.path.exists(model_path):
print(f"loading model from {model_path}")
else:
os.makedirs(os.path.dirname(model_path), exist_ok=True)
url = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/{}".format(
os.path.basename(model_path)
)
r = requests.get(url, allow_redirects=True)
print(f"downloading model {model_path}")
open(model_path, "wb").write(r.content)
model = net(
upscale=4,
in_chans=3,
img_size=64,
window_size=8,
img_range=1.0,
depths=[6, 6, 6, 6, 6, 6, 6, 6, 6],
embed_dim=240,
num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8],
mlp_ratio=2,
upsampler="nearest+conv",
resi_connection="3conv",
)
model = load_torch_weight(model, model_path)
return model
def upsample4x(img_lq, model):
"""upsample img from h*w to (4h) * (4w)"""
device = flow.device("cuda" if flow.cuda.is_available() else "cpu")
model.eval()
model = model.to(device)
img_lq = img_lq.to(device)
window_size = 8
scale = 4
# inference
with flow.no_grad():
# pad input image to be a multiple of window_size
_, _, h_old, w_old = img_lq.size()
h_pad = (h_old // window_size + 1) * window_size - h_old
w_pad = (w_old // window_size + 1) * window_size - w_old
img_lq = flow.cat([img_lq, flow.flip(img_lq, [2])], 2)[:, :, : h_old + h_pad, :]
img_lq = flow.cat([img_lq, flow.flip(img_lq, [3])], 3)[:, :, :, : w_old + w_pad]
output = model(img_lq)
output = output[..., : h_old * scale, : w_old * scale]
output = output.clamp_(0, 1)
return output
def upsample16x(imgs, model):
return upsample4x(upsample4x(imgs, model), model)
| 2,486 | 28.607143 | 88 | py |
libai | libai-main/projects/BLOOM/modeling/activation.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
def bloom_gelu_forward(x):
"""
Custom bias GELU function. Adapted from Megatron-DeepSpeed code. Here we use a simple
implementation (inference) to make the model jitable.
Args:
x (`torch.tensor`, *required*):
input hidden states
"""
return x * 0.5 * (1.0 + flow.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
def bloom_gelu_back(g, x):
"""
gradient of tanh approximation of gelu gradient of actual gelu is:
0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
Args:
g (`torch.tensor`, *required*):
gradient output tensor
x (`torch.tensor`, *required*):
input tensor
"""
x = x[0]
tanh_out = flow.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (
1 + tanh_out
)
return ff * g
class GeLUFunction(flow.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return bloom_gelu_forward(input)
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_tensors
tmp = bloom_gelu_back(grad_output, input)
return tmp
class BloomGelu(nn.Module):
"""
BloomBiasGelu wrapper function that make use of the simple function on inference mode to make
the model torchscriptable and use the autograd function in training mode to get the accurate
results of the gradients Partly copied from Megatron-DeepSpeed code and adapted for our needs
See here why autograd functions are not torchscriptable:
https://github.com/pytorch/pytorch/issues/22329
"""
def __init__(self):
super().__init__()
def forward(self, x):
if self.training:
return GeLUFunction.apply(x)
else:
return bloom_gelu_forward(x)
| 2,621 | 30.590361 | 97 | py |
libai | libai-main/projects/BLOOM/modeling/bloom_model.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from libai.config import configurable
from libai.inference.generator.generation_utils import Generator
from libai.layers import Embedding, LayerNorm, LMLogits
from libai.models.utils import init_method_normal, scaled_init_method_normal
from libai.utils import distributed as dist
from projects.BLOOM.modeling.mask import _expand_mask, _make_causal_mask, build_alibi_tensor
from projects.BLOOM.modeling.transformers import BloomBlock
class BloomModel(nn.Module):
@configurable
def __init__(
self,
vocab_size,
hidden_size,
hidden_layers,
n_head,
padding_idx,
pretraining_tp=1,
slow_but_exact=False,
initializer_range=0.02,
apply_residual_connection_post_layernorm=False,
hidden_dropout=0,
attention_dropout=0,
amp_enabled=False,
layer_norm_epsilon=1e-12,
cfg=None,
):
super().__init__()
self.cfg = cfg
self.embed_dim = hidden_size
self.num_heads = n_head
self.hidden_layers = hidden_layers
init_method = init_method_normal(initializer_range)
scaled_init_method = scaled_init_method_normal(initializer_range, hidden_layers)
self.word_embeddings = Embedding(
vocab_size,
self.embed_dim,
padding_idx=padding_idx,
init_method=init_method,
amp_enabled=amp_enabled,
layer_idx=0,
)
self.word_embeddings_layernorm = LayerNorm(
self.embed_dim, eps=layer_norm_epsilon, layer_idx=0
)
self.h = flow.nn.ModuleList(
[
BloomBlock(
hidden_size=hidden_size,
n_head=n_head,
layer_norm_epsilon=layer_norm_epsilon,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
pretraining_tp=pretraining_tp,
slow_but_exact=slow_but_exact,
init_method=init_method,
output_layer_init_method=scaled_init_method,
apply_residual_connection_post_layernorm=apply_residual_connection_post_layernorm, # noqa
layer_idx=i,
)
for i in range(hidden_layers)
]
)
# Final Layer Norm
self.ln_f = LayerNorm(self.embed_dim, eps=layer_norm_epsilon, layer_idx=hidden_layers - 1)
@classmethod
def from_config(cls, cfg):
return {
"vocab_size": cfg.vocab_size,
"hidden_size": cfg.hidden_size,
"hidden_layers": cfg.hidden_layers,
"n_head": cfg.n_head,
"padding_idx": cfg.padding_idx,
"pretraining_tp": cfg.pretraining_tp,
"slow_but_exact": cfg.slow_but_exact,
"apply_residual_connection_post_layernorm": cfg.apply_residual_connection_post_layernorm, # noqa
"hidden_dropout": cfg.hidden_dropout,
"attention_dropout": cfg.attention_dropout,
"amp_enabled": cfg.amp_enabled,
"layer_norm_epsilon": cfg.layer_norm_epsilon,
"cfg": cfg,
}
def _prepare_attn_mask(
self,
attention_mask,
input_shape,
past_key_values_length,
):
combined_attention_mask = None
_, src_length = input_shape
if src_length > 1:
combined_attention_mask = _make_causal_mask(
input_shape, past_key_values_length=past_key_values_length
)
# [batch_size, seq_length] -> [batch_size, 1, tgt_length, src_length]
expanded_attn_mask = _expand_mask(attention_mask, tgt_length=src_length)
combined_attention_mask = (
expanded_attn_mask
if combined_attention_mask is None
else expanded_attn_mask | combined_attention_mask
)
return combined_attention_mask
def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
"""-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
head_mask = head_mask.to(dtype=self.dtype) # switch to float if need + fp16 compatibility
return head_mask
def get_head_mask(self, head_mask, num_hidden_layers, is_attention_chunked=False):
"""
Prepare the head mask if needed.
Args:
head_mask (`torch.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`,
*optional*):
The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for
discard).
num_hidden_layers (`int`):
The number of hidden layers in the model.
is_attention_chunked: (`bool`, *optional*, defaults to `False`):
Whether or not the attentions scores are computed by chunks or not.
Returns:
`torch.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x
seq_length]` or list with `[None]` for each layer.
"""
if head_mask is not None:
head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
if is_attention_chunked is True:
head_mask = head_mask.unsqueeze(-1)
else:
head_mask = [None] * num_hidden_layers
return head_mask
def forward(
self,
input_ids=None,
attention_mask=None,
past_key_values=None,
head_mask=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
):
input_ids = (
input_ids.to_global(placement=dist.get_layer_placement(0))
if input_ids is not None
else input_ids
)
attention_mask = (
attention_mask.to_global(placement=dist.get_layer_placement(0))
if attention_mask is not None
else attention_mask
)
head_mask = (
head_mask.to_global(placement=dist.get_layer_placement(0))
if head_mask is not None
else head_mask
)
inputs_embeds = (
inputs_embeds.to_global(placement=dist.get_layer_placement(0))
if inputs_embeds is not None
else inputs_embeds
)
if input_ids is not None:
batch_size, seq_length = input_ids.shape
if past_key_values is None:
past_key_values = tuple([None] * len(self.h))
head_mask = self.get_head_mask(head_mask, self.hidden_layers)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
hidden_states = self.word_embeddings_layernorm(inputs_embeds)
presents = () if use_cache else None
seq_length_with_past = seq_length
past_key_values_length = 0
if past_key_values[0] is not None:
past_key_values_length = past_key_values[0][0].shape[2]
seq_length_with_past = seq_length_with_past + past_key_values_length
if attention_mask is None:
attention_mask = flow.ones(
(batch_size, seq_length_with_past),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
alibi = build_alibi_tensor(attention_mask, self.num_heads, hidden_states.dtype)
causal_mask = self._prepare_attn_mask(
attention_mask,
input_shape=(batch_size, seq_length),
past_key_values_length=past_key_values_length,
)
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=causal_mask,
head_mask=head_mask[i],
use_cache=use_cache,
output_attentions=output_attentions,
alibi=alibi,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
hidden_states = self.ln_f(hidden_states)
return {"last_hidden_state": hidden_states, "past_key_values": presents}
class BloomForCausalLM(nn.Module, Generator):
@configurable
def __init__(
self,
vocab_size,
hidden_size,
hidden_layers,
n_head,
padding_idx,
pretraining_tp=1,
slow_but_exact=False,
initializer_range=0.02,
apply_residual_connection_post_layernorm=False,
hidden_dropout=0,
attention_dropout=0,
amp_enabled=False,
layer_norm_epsilon=1e-12,
cfg=None,
):
super().__init__()
self.cfg = cfg
self.transformer = BloomModel(
vocab_size=vocab_size,
hidden_size=hidden_size,
hidden_layers=hidden_layers,
n_head=n_head,
padding_idx=padding_idx,
pretraining_tp=pretraining_tp,
slow_but_exact=slow_but_exact,
initializer_range=initializer_range,
apply_residual_connection_post_layernorm=apply_residual_connection_post_layernorm,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
amp_enabled=amp_enabled,
layer_norm_epsilon=layer_norm_epsilon,
cfg=cfg,
)
self.lm_head = LMLogits(vocab_size, bias=False)
@classmethod
def from_config(cls, cfg):
return {
"vocab_size": cfg.vocab_size,
"hidden_size": cfg.hidden_size,
"hidden_layers": cfg.hidden_layers,
"n_head": cfg.n_head,
"padding_idx": cfg.padding_idx,
"pretraining_tp": cfg.pretraining_tp,
"slow_but_exact": cfg.slow_but_exact,
"apply_residual_connection_post_layernorm": cfg.apply_residual_connection_post_layernorm, # noqa
"hidden_dropout": cfg.hidden_dropout,
"attention_dropout": cfg.attention_dropout,
"amp_enabled": cfg.amp_enabled,
"layer_norm_epsilon": cfg.layer_norm_epsilon,
"cfg": cfg,
}
def forward(
self,
input_ids=None,
attention_mask=None,
past_key_values=None,
head_mask=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
):
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = transformer_outputs["last_hidden_state"]
lm_logits = self.lm_head(hidden_states, self.transformer.word_embeddings.weight)
return {
"logits": lm_logits,
"past_key_values": transformer_outputs["past_key_values"],
"hidden_states": transformer_outputs["last_hidden_state"],
# "attentions": transformer_outputs.attentions,
}
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
**kwargs,
) -> dict:
# only last token for input_ids if past is not None
if past_key_values:
input_ids = input_ids[:, -1].unsqueeze(-1)
if past_key_values[0][0].shape[0] == input_ids.shape[0]:
past_key_values = self._convert_to_bloom_cache(past_key_values)
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs.update(
{
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"attention_mask": attention_mask,
}
)
return model_inputs
def _reorder_cache(self, past, beam_idx):
standardized_past = self._convert_to_standard_cache(past, batch_size=len(beam_idx))
device_to_beam_idx = {
past_state.device: beam_idx.to(past_state.device)
for layer_past in past
for past_state in layer_past
}
reordered_past = tuple(
(
layer_past[0].index_select(0, device_to_beam_idx[layer_past[0].device]),
layer_past[1].index_select(0, device_to_beam_idx[layer_past[0].device]),
)
for layer_past in standardized_past
)
return self._convert_to_bloom_cache(reordered_past)
def _convert_to_standard_cache(
past_key_value,
batch_size,
):
"""
Standardizes the format of the cache so as to match most implementations,
i.e. to tuple(tuple([batch_size, num_heads, ...]))
"""
batch_size_times_num_heads, head_dim, seq_length = past_key_value[0][0].shape
num_heads = batch_size_times_num_heads // batch_size
return tuple(
(
layer_past[0].view(batch_size, num_heads, head_dim, seq_length),
layer_past[1].view(batch_size, num_heads, seq_length, head_dim),
)
for layer_past in past_key_value
)
def _convert_to_bloom_cache(past_key_value):
"""
Converts the cache to the format expected by Bloom,
i.e. to tuple(tuple([batch_size * num_heads, ...]))
"""
batch_size, num_heads, head_dim, seq_length = past_key_value[0][0].shape
batch_size_times_num_heads = batch_size * num_heads
return tuple(
(
layer_past[0].view(batch_size_times_num_heads, head_dim, seq_length),
layer_past[1].view(batch_size_times_num_heads, seq_length, head_dim),
)
for layer_past in past_key_value
)
| 15,297 | 35.080189 | 110 | py |
libai | libai-main/projects/BLOOM/modeling/attention.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import oneflow as flow
from oneflow import nn
from oneflow.nn import functional as F
from libai.layers import Linear
def dropout_add(x, residual, prob, training):
"""
Dropout add function
Args:
x (`torch.tensor`, *required*):
input tensor
residual (`torch.tensor`, *required*):
esidual tensor
prob (`float`, *required*):
dropout probability
training (`bool`, *required*):
training mode
"""
out = F.dropout(x, p=prob, training=training)
out = residual + out
return out
class BloomAttention(nn.Module):
def __init__(
self,
hidden_size,
n_head,
hidden_dropout,
attention_dropout,
pretraining_tp,
slow_but_exact,
init_method=nn.init.xavier_normal_,
output_layer_init_method=None,
layer_idx=0,
):
super().__init__()
self.pretraining_tp = pretraining_tp
self.slow_but_exact = slow_but_exact
self.hidden_size = hidden_size
self.num_heads = n_head
self.head_dim = self.hidden_size // self.num_heads
self.split_size = self.hidden_size
self.hidden_dropout = hidden_dropout
if output_layer_init_method is None:
output_layer_init_method = init_method
if self.head_dim * self.num_heads != self.hidden_size:
raise ValueError(
f"`hidden_size` must be divisible by num_heads "
f"(got `hidden_size`: {self.hidden_size} and `num_heads`:"
f" {self.num_heads})."
)
# Layer-wise attention scaling
self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim)
self.beta = 1.0
self.query_key_value = Linear(
self.hidden_size,
3 * self.hidden_size,
bias=True,
parallel="col",
init_method=init_method,
layer_idx=layer_idx,
)
self.dense = Linear(
self.hidden_size,
self.hidden_size,
parallel="row",
init_method=output_layer_init_method,
layer_idx=layer_idx,
)
self.attention_dropout = nn.Dropout(attention_dropout)
def _split_heads(self, fused_qkv):
"""
Split the last dimension into (num_heads, head_dim) without making any copies, results share
same memory storage as `fused_qkv`
Args:
fused_qkv (`torch.tensor`, *required*):
[batch_size, seq_length, num_heads * 3 * head_dim]
Returns:
query: [batch_size, seq_length, num_heads, head_dim]
key: [batch_size, seq_length, num_heads, head_dim]
value: [batch_size, seq_length, num_heads, head_dim]
"""
batch_size, seq_length, three_times_hidden_size = fused_qkv.shape
fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim)
return fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :]
def _merge_heads(self, x):
"""
Merge heads together over the last dimenstion
Args:
x: (`torch.tensor`, *required*): [batch_size * num_heads, seq_length, head_dim]
Returns:
torch.tensor: [batch_size, seq_length, num_heads * head_dim]
"""
# What we want to achieve is:
# batch_size * num_heads, seq_len, head_dim -> batch_size, seq_len, num_heads * head_dim
batch_size_and_num_heads, seq_length, _ = x.shape
batch_size = batch_size_and_num_heads // self.num_heads
# First view to decompose the batch size
# batch_size * num_heads, seq_len, head_dim -> batch_size, num_heads, seq_len, head_dim
x = x.view(batch_size, self.num_heads, seq_length, self.head_dim)
# batch_size, num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads, head_dim
x = x.permute(0, 2, 1, 3)
# batch_size, seq_len, num_heads, head_dim -> batch_size, seq_len, num_heads * head_dim
return x.reshape(batch_size, seq_length, self.num_heads * self.head_dim)
def forward(
self,
hidden_states,
residual,
alibi,
attention_mask,
layer_past=None,
head_mask=None,
use_cache=False,
output_attentions=False,
):
fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size]
# 3 x [batch_size, seq_length, num_heads, head_dim]
(query_layer, key_layer, value_layer) = self._split_heads(fused_qkv)
batch_size, q_length, _, _ = query_layer.shape
query_layer = query_layer.transpose(1, 2).reshape(
batch_size * self.num_heads, q_length, self.head_dim
)
key_layer = key_layer.permute(0, 2, 3, 1).reshape(
batch_size * self.num_heads, self.head_dim, q_length
)
value_layer = value_layer.transpose(1, 2).reshape(
batch_size * self.num_heads, q_length, self.head_dim
)
if layer_past is not None:
past_key, past_value = layer_past
key_layer = flow.cat((past_key, key_layer), dim=2)
value_layer = flow.cat((past_value, value_layer), dim=1)
_, _, kv_length = key_layer.shape
if use_cache is True:
present = (key_layer, value_layer)
else:
present = None
matmul_result = flow.baddbmm(
alibi,
batch1=query_layer,
batch2=key_layer,
beta=self.beta,
alpha=self.inv_norm_factor,
)
attention_scores = matmul_result.view(batch_size, self.num_heads, q_length, kv_length)
input_dtype = attention_scores.dtype
attn_weights = flow.masked_fill(
attention_scores, attention_mask, flow.finfo(attention_scores.dtype).min
)
attention_probs = F.softmax(attn_weights, dim=-1).to(input_dtype)
attention_probs = self.attention_dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
attention_probs_reshaped = attention_probs.view(
batch_size * self.num_heads, q_length, kv_length
)
context_layer = flow.bmm(attention_probs_reshaped, value_layer)
context_layer = self._merge_heads(context_layer)
if self.pretraining_tp > 1 and self.slow_but_exact:
slices = self.hidden_size / self.pretraining_tp
output_tensor = flow.zeros_like(context_layer)
for i in range(self.pretraining_tp):
output_tensor = output_tensor + F.linear(
context_layer[:, :, int(i * slices) : int((i + 1) * slices)],
self.dense.weight[:, int(i * slices) : int((i + 1) * slices)],
)
else:
output_tensor = self.dense(context_layer)
output_tensor = dropout_add(output_tensor, residual, self.hidden_dropout, self.training)
outputs = (output_tensor, present)
if output_attentions:
outputs += (attention_probs,)
return outputs
| 7,869 | 33.669604 | 100 | py |
libai | libai-main/projects/BLOOM/modeling/mask.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import oneflow as flow
from libai.utils import distributed as dist
def _make_causal_mask(input_ids_shape, past_key_values_length):
"""
Make causal mask used for self-attention.
"""
batch_size, target_length = input_ids_shape
mask = flow.ones(
(target_length, target_length + past_key_values_length),
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
# ONNX doesn't support `torch.Tensor.triu` properly, thus we use this workaround
seq_ids = flow.arange(
target_length,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
mask[:, past_key_values_length:] = seq_ids[:, None] < seq_ids[None, :]
if past_key_values_length > 0:
mask[:, :past_key_values_length] = False
expanded_mask = mask[None, None, :, :].expand(
batch_size, 1, target_length, target_length + past_key_values_length
)
return expanded_mask
def _expand_mask(mask, tgt_length):
"""
Expands attention_mask from `[batch_size, src_length]` to
`[batch_size, 1, tgt_length, src_length]`.
"""
batch_size, src_length = mask.shape
tgt_length = tgt_length if tgt_length is not None else src_length
expanded_mask = ~(mask[:, None, None, :].to(flow.bool))
return expanded_mask.expand(batch_size, 1, tgt_length, src_length)
def build_alibi_tensor(attention_mask, num_heads, dtype):
batch_size, seq_length = attention_mask.shape
closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
base = flow.tensor(
2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=attention_mask.placement,
)
powers = flow.arange(
1,
1 + closest_power_of_2,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=attention_mask.placement,
)
slopes = flow.pow(base, powers)
if closest_power_of_2 != num_heads:
extra_base = flow.tensor(
2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=attention_mask.placement,
)
num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2)
extra_powers = flow.arange(
1,
1 + 2 * num_remaining_heads,
2,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=attention_mask.placement,
)
slopes = flow.cat([slopes, flow.pow(extra_base, extra_powers)], dim=0)
arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :]
alibi = slopes[..., None] * arange_tensor
return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype)
| 3,662 | 35.63 | 86 | py |
libai | libai-main/projects/T5/utils/weight_convert.py | import argparse
import oneflow as flow
import torch
from libai.config import LazyConfig
def parse_args():
parser = argparse.ArgumentParser(description="MT5 Weight Convertor")
parser.add_argument(
"--oneflow_state_dict_path", type=str, help="The path of mt5's checkpoint in LiBai"
)
parser.add_argument(
"--config_path",
type=str,
default="projects/T5/configs/mt5_pretrain.py",
help="The path of the training config",
)
parser.add_argument("--save_path", type=str, default="projects/T5/pytorch_model.bin")
return parser.parse_args()
def fix_qkv_ordering(qkv, head_size, num_heads, hidden_size=None):
hidden_size = (head_size * num_heads) if hidden_size is None else hidden_size
num_of_qkv = qkv.shape[0] // (head_size * num_heads)
qkv = qkv.view(-1)
qkv = qkv.view(num_heads, num_of_qkv, head_size, hidden_size)
qkv = qkv.permute(1, 0, 2, 3).contiguous()
qkv = qkv.view(num_of_qkv * head_size * num_heads, hidden_size)
return qkv
def convert_tensor(tensor):
return torch.tensor(tensor.detach().to_numpy(), dtype=torch.float32)
def convert_state_dict(oneflow_state_dict_path, libai_cfg, prefix="t5_model."):
oneflow_state_dict = flow.load(oneflow_state_dict_path)
torch_state_dict = {}
# Get configs
num_heads = libai_cfg.get("num_attention_heads")
hidden_size = libai_cfg.get("hidden_size")
head_size = libai_cfg.get("head_size", None)
if head_size is None:
head_size = int(hidden_size / num_heads)
layer_idx = 3 if len(prefix) > 1 else 2
enc_dec_idx = 1 if len(prefix) > 1 else 0
op_idx = 4 if len(prefix) > 1 else 3
# Convert T5's Embedding layers.
x = convert_tensor(oneflow_state_dict.pop(prefix + "embedding.word_embeddings.weight"))
new_key = "shared.weight"
torch_state_dict[new_key] = x
new_key = "encoder.embed_tokens.weight"
torch_state_dict[new_key] = x
new_key = "decoder.embed_tokens.weight"
torch_state_dict[new_key] = x
# Convert T5's final_layer_norm
new_key = "encoder.final_layer_norm.weight"
torch_state_dict[new_key] = convert_tensor(
oneflow_state_dict.pop(prefix + "encoder.final_layernorm.weight")
)
new_key = "decoder.final_layer_norm.weight"
torch_state_dict[new_key] = convert_tensor(
oneflow_state_dict.pop(prefix + "decoder.final_layernorm.weight")
)
old_keys = list(oneflow_state_dict.keys())
# Convert T5's lm_head
new_key = "lm_head.weight"
if prefix + new_key in old_keys:
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(prefix + new_key))
for key in old_keys:
keys = key.split(".")
if op_idx > len(keys):
continue
layers = keys[layer_idx]
enc_dec = keys[enc_dec_idx]
op_name = keys[op_idx]
if keys[op_idx + 1] == "relative_attention_bias":
new_key = enc_dec + ".block.0.layer.0.SelfAttention.relative_attention_bias.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
# Convert T5's Encoder layers.
if enc_dec == "encoder":
if op_name == "self_attention":
if keys[op_idx + 1] == "query_key_value":
x = oneflow_state_dict.pop(key)
x = fix_qkv_ordering(x, head_size, num_heads, hidden_size)
q, k, v = flow.chunk(x, chunks=3, dim=0)
new_key = "encoder.block." + layers + ".layer.0.SelfAttention.q.weight"
torch_state_dict[new_key] = convert_tensor(q)
new_key = new_key.replace(".q", ".k")
torch_state_dict[new_key] = convert_tensor(k)
new_key = new_key.replace(".k", ".v")
torch_state_dict[new_key] = convert_tensor(v)
if keys[op_idx + 1] == "dense":
new_key = "encoder.block." + layers + ".layer.0.SelfAttention.o.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif op_name == "input_layernorm":
new_key = "encoder.block." + layers + ".layer.0.layer_norm.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif op_name == "post_attention_layernorm":
new_key = "encoder.block." + layers + ".layer.1.layer_norm.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif op_name == "mlp":
if libai_cfg.get("model_type") == "mt5":
if keys[op_idx + 1] == "wi_0":
new_key = "encoder.block." + layers + ".layer.1.DenseReluDense.wi_0.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
if keys[op_idx + 1] == "wi_1":
new_key = "encoder.block." + layers + ".layer.1.DenseReluDense.wi_1.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
if keys[op_idx + 1] == "wo":
new_key = "encoder.block." + layers + ".layer.1.DenseReluDense.wo.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif libai_cfg.get("model_type") == "t5":
if keys[op_idx + 1] == "dense_h_to_4h":
new_key = "encoder.block." + layers + ".layer.1.DenseReluDense.wi.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
if keys[op_idx + 1] == "dense_4h_to_h":
new_key = "encoder.block." + layers + ".layer.1.DenseReluDense.wo.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
# Convert T5's decoder Layers.
elif enc_dec == "decoder":
if op_name == "self_attention":
if keys[op_idx + 1] == "query_key_value":
x = oneflow_state_dict.pop(key)
x = fix_qkv_ordering(x, head_size, num_heads, hidden_size)
q, k, v = flow.chunk(x, chunks=3, dim=0)
new_key = "decoder.block." + layers + ".layer.0.SelfAttention.q.weight"
torch_state_dict[new_key] = convert_tensor(q)
new_key = new_key.replace(".q", ".k")
torch_state_dict[new_key] = convert_tensor(k)
new_key = new_key.replace(".k", ".v")
torch_state_dict[new_key] = convert_tensor(v)
if keys[op_idx + 1] == "dense":
new_key = "decoder.block." + layers + ".layer.0.SelfAttention.o.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif op_name == "input_layernorm":
new_key = "decoder.block." + layers + ".layer.0.layer_norm.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif op_name == "post_attention_layernorm":
new_key = "decoder.block." + layers + ".layer.1.layer_norm.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif op_name == "post_cross_attention_layernorm":
new_key = "decoder.block." + layers + ".layer.2.layer_norm.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif op_name == "cross_attention":
if keys[op_idx + 1] == "query":
x = oneflow_state_dict.pop(key)
x = fix_qkv_ordering(x, head_size, num_heads, hidden_size)
new_key = "decoder.block." + layers + ".layer.1.EncDecAttention.q.weight"
torch_state_dict[new_key] = convert_tensor(x)
if keys[op_idx + 1] == "key_value":
x = oneflow_state_dict.pop(key)
x = fix_qkv_ordering(x, head_size, num_heads, hidden_size)
k, v = flow.chunk(x, chunks=2, dim=0)
new_key = "decoder.block." + layers + ".layer.1.EncDecAttention.k.weight"
torch_state_dict[new_key] = convert_tensor(k)
new_key = new_key.replace(".k", ".v")
torch_state_dict[new_key] = convert_tensor(v)
if keys[op_idx + 1] == "dense":
new_key = "decoder.block." + layers + ".layer.1.EncDecAttention.o.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif op_name == "mlp":
if libai_cfg.get("model_type") == "mt5":
if keys[op_idx + 1] == "wi_0":
new_key = "decoder.block." + layers + ".layer.2.DenseReluDense.wi_0.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
if keys[op_idx + 1] == "wi_1":
new_key = "decoder.block." + layers + ".layer.2.DenseReluDense.wi_1.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
if keys[op_idx + 1] == "wo":
new_key = "decoder.block." + layers + ".layer.2.DenseReluDense.wo.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
elif libai_cfg.get("model_type") == "t5":
if keys[op_idx + 1] == "dense_h_to_4h":
new_key = "decoder.block." + layers + ".layer.2.DenseReluDense.wi.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
if keys[op_idx + 1] == "dense_4h_to_h":
new_key = "decoder.block." + layers + ".layer.2.DenseReluDense.wo.weight"
torch_state_dict[new_key] = convert_tensor(oneflow_state_dict.pop(key))
return torch_state_dict
if __name__ == "__main__":
args = parse_args()
oneflow_state_dict_path = args.oneflow_state_dict_path
config_path = args.config_path
save_path = args.save_path
training_config = LazyConfig.load(config_path)
model_config = training_config.model.cfg
torch_state_dict = convert_state_dict(oneflow_state_dict_path, model_config)
torch.save(torch_state_dict, save_path)
| 10,611 | 47.678899 | 99 | py |
libai | libai-main/projects/Stable_Diffusion/generate_prior_image.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import hashlib
from pathlib import Path
import oneflow as flow
from diffusers import OneFlowStableDiffusionPipeline
from tqdm import tqdm
from projects.Stable_Diffusion.dataset import PromptDataset
def parse_args(input_args=None):
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help=(
"Revision of pretrained model identifier from huggingface.co/models. "
"Trainable model components should be float32 precision."
),
)
parser.add_argument(
"--class_data_dir",
type=str,
default=None,
required=True,
help="A folder containing the training data of class images.",
)
parser.add_argument(
"--class_prompt",
type=str,
default=None,
required=True,
help="The prompt to specify images in the same class as provided instance images.",
)
parser.add_argument(
"--num_class_images",
type=int,
default=100,
required=False,
help=(
"Minimal class images for prior preservation loss. "
"If there are not enough images already present in "
"class_data_dir, additional images will be sampled with class_prompt."
),
)
parser.add_argument(
"--sample_batch_size",
type=int,
default=4,
help="Batch size (per device) for sampling images.",
)
parser.add_argument(
"--prior_generation_precision",
type=str,
default="fp16",
choices=["no", "fp32", "fp16", "bf16"],
help=(
"Choose prior generation precision between fp32, fp16 and bf16 (bfloat16)."
" Bf16 requires PyTorch >=1.10.and an Nvidia Ampere GPU. "
" Default to fp16 if a GPU is available else fp32."
),
)
if input_args is not None:
args = parser.parse_args(input_args)
else:
args = parser.parse_args()
return args
def main(args):
class_images_dir = Path(args.class_data_dir)
if not class_images_dir.exists():
class_images_dir.mkdir(parents=True)
cur_class_images = len(list(class_images_dir.iterdir()))
if args.prior_generation_precision == "fp32":
torch_dtype = flow.float32
elif args.prior_generation_precision == "fp16":
torch_dtype = flow.float16
elif args.prior_generation_precision == "bf16":
torch_dtype = flow.bfloat16
if cur_class_images < args.num_class_images:
pipeline = OneFlowStableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
use_auth_token=True,
revision=args.revision,
torch_dtype=torch_dtype,
).to("cuda")
pipeline.set_progress_bar_config(disable=True)
num_new_images = args.num_class_images - cur_class_images
print(f"Number of class images to sample: {num_new_images}.")
sample_dataset = PromptDataset(args.class_prompt, num_new_images)
sample_dataloader = flow.utils.data.DataLoader(
sample_dataset, batch_size=args.sample_batch_size
)
for example in tqdm(sample_dataloader, desc="Generating class images"):
images = pipeline(example["prompt"]).images
for i, image in enumerate(images):
hash_image = hashlib.sha1(image.tobytes()).hexdigest()
image_filename = (
class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
)
image.save(image_filename)
del pipeline
if flow.cuda.is_available():
flow.cuda.empty_cache()
return
if __name__ == "__main__":
args = parse_args()
main(args)
| 4,744 | 31.951389 | 99 | py |
libai | libai-main/tests/config/test_instantiate_config.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unittests followed
https://github.com/facebookresearch/detectron2/blob/main/tests/config/test_instantiate_config.py
"""
from collections import namedtuple
import os
import unittest
import yaml
import tempfile
from libai.config import instantiate, LazyCall
from omegaconf import OmegaConf
from dataclasses import dataclass
from omegaconf import __version__ as oc_version
OC_VERSION = tuple(int(x) for x in oc_version.split(".")[:2])
class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "width"])):
"""
A simple structure that contains basic shape specification about a tensor.
It is often used as the auxiliary inputs/outputs of models,
to complement the lack of shape inference ability among pytorch modules.
Attributes:
channels:
width:
"""
def __new__(cls, channels=None, width=None):
return super().__new__(cls, channels, width)
class TestClass:
def __init__(self, int_arg, list_arg=None, dict_arg=None, extra_arg=None) -> None:
self.int_arg = int_arg
self.list_arg = list_arg
self.dict_arg = dict_arg
self.extra_arg = extra_arg
def __call__(self, call_arg):
return call_arg + self.int_arg
@dataclass
class TestDataClass:
x: int
y: str
@unittest.skipIf(OC_VERSION < (2, 1), "omegaconf version too old")
class TestConstruction(unittest.TestCase):
def test_basic_construct(self):
objconf = LazyCall(TestClass)(
int_arg=3,
list_arg=[10],
dict_arg={},
extra_arg=LazyCall(TestClass)(int_arg=4, list_arg="${..list_arg}"),
)
obj = instantiate(objconf)
self.assertIsInstance(obj, TestClass)
self.assertEqual(obj.int_arg, 3)
self.assertEqual(obj.extra_arg.int_arg, 4)
self.assertEqual(obj.extra_arg.list_arg, obj.list_arg)
objconf.extra_arg.list_arg = [5]
obj = instantiate(objconf)
self.assertIsInstance(obj, TestClass)
self.assertEqual(obj.extra_arg.list_arg, [5])
def test_instantiate_other_obj(self):
# do nothing for other obj
self.assertEqual(instantiate(5), 5)
x = [3, 4, 5]
self.assertEqual(instantiate(x), x)
x = TestClass(1)
self.assertIs(instantiate(x), x)
x = {"xx": "yy"}
self.assertEqual(instantiate(x), x)
def test_instantiate_lazy_target(self):
# _target_ is result of instantiate
objconf = LazyCall(LazyCall(len)(int_arg=3))(call_arg=4)
objconf._target_._target_ = TestClass
self.assertEqual(instantiate(objconf), 7)
def test_instantiate_lst(self):
lst = [1, 2, LazyCall(TestClass)(int_arg=1)]
x = LazyCall(TestClass)(
int_arg=lst
) # list as an argument should be recursively instantiated
x = instantiate(x).int_arg
self.assertEqual(x[:2], [1, 2])
self.assertIsInstance(x[2], TestClass)
self.assertEqual(x[2].int_arg, 1)
def test_instantiate_namedtuple(self):
x = LazyCall(TestClass)(int_arg=ShapeSpec(channels=1, width=3))
# test serialization
with tempfile.TemporaryDirectory() as d:
fname = os.path.join(d, "lb_test.yaml")
OmegaConf.save(x, fname)
with open(fname) as f:
x = yaml.unsafe_load(f)
x = instantiate(x)
self.assertIsInstance(x.int_arg, ShapeSpec)
self.assertEqual(x.int_arg.channels, 1)
def test_bad_lazycall(self):
with self.assertRaises(Exception):
LazyCall(3)
def test_instantiate_dataclass(self):
a = LazyCall(TestDataClass)(x=1, y="s")
a = instantiate(a)
self.assertEqual(a.x, 1)
self.assertEqual(a.y, "s")
def test_instantiate_no_recursive(self):
def helper_func(obj):
self.assertNotIsInstance(obj, TestClass)
obj = instantiate(obj)
self.assertIsInstance(obj, TestClass)
return obj.int_arg
objconf = LazyCall(helper_func)(obj=LazyCall(TestClass)(int_arg=4))
self.assertEqual(instantiate(objconf, _recursive_=False), 4)
if __name__ == "__main__":
unittest.main()
| 4,865 | 31.657718 | 96 | py |
libai | libai-main/tests/model_loader/test_mt5_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from omegaconf import DictConfig
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
from projects.MT5.configs.mt5_base import cfg as libai_cfg
from projects.MT5.mt5_model import MT5Model
from projects.MT5.utils.mt5_loader import T5LoaderHuggerFace
PRETRAINED_MODEL_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/mt5_utils/pytorch_model.bin" # noqa
PRETRAINED_MODEL_CONFIG_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/mt5_utils/config.json" # noqa
PRETRAINED_MODEL_MD5 = "4c9c0be541b89de9b01c597ec4cc371a"
PRETRAINED_MODEL_CONFIG_MD5 = "b159e41603b7eeaf9a9c489165bbcaca"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_mt5_utils")
setup_logger(distributed_rank=dist.get_rank())
class TestMT5Loader(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "mt5_utils_data"
)
self.pretrained_model_path = cache_dir
# prepare dataset
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(PRETRAINED_MODEL_URL, cache_dir, md5=PRETRAINED_MODEL_MD5)
get_data_from_cache(
PRETRAINED_MODEL_CONFIG_URL, cache_dir, md5=PRETRAINED_MODEL_CONFIG_MD5
)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
# prepare input data
self.encoder_input_ids = [
[101, 2009, 1005, 1055, 2986, 2651, 1012, 102],
[101, 2028, 12314, 3377, 102, 0, 0, 0],
[101, 2064, 2017, 3305, 2009, 102, 0, 0],
]
self.encoder_att_mask = [
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
]
self.decoder_input_ids = [
[101, 2009, 1005, 1055, 2986],
[101, 2028, 12314, 3377, 102],
[101, 2064, 2017, 3305, 2009],
]
self.decoder_att_mask = [[1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_mt5_loader_with_data_tensor_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = T5LoaderHuggerFace(
model=MT5Model,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
embedding_dropout_prob=0.0,
model_type="mt5",
)
model = load_func.load()
model.eval()
encoder_input_ids = flow.tensor(
self.encoder_input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
decoder_input_ids = flow.tensor(
self.decoder_input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
encode_att_mask = flow.tensor(
self.encoder_att_mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
decoder_att_mask = flow.tensor(
self.decoder_att_mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
logits = model(
encoder_input_ids, decoder_input_ids, encode_att_mask, decoder_att_mask, encode_att_mask
)["logits"]
self.assertTrue(
np.allclose(
np.array(-83584720.0),
logits.sum().data.numpy(),
)
)
@flow.unittest.skip_unless_1n4d()
def test_mt5_loader_with_data_tensor_pipeline_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=16,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = T5LoaderHuggerFace(
model=MT5Model,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
embedding_dropout_prob=0.0,
model_type="mt5",
)
model = load_func.load()
model.eval()
encoder_input_ids = flow.tensor(
self.encoder_input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
decoder_input_ids = flow.tensor(
self.decoder_input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
encode_att_mask = flow.tensor(
self.encoder_att_mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
decoder_att_mask = flow.tensor(
self.decoder_att_mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
logits = model(
encoder_input_ids, decoder_input_ids, encode_att_mask, decoder_att_mask, encode_att_mask
)["logits"]
self.assertTrue(
np.allclose(
np.array(-83584720.0),
logits.sum().data.numpy(),
)
)
if __name__ == "__main__":
unittest.main()
| 7,293 | 33.899522 | 151 | py |
libai | libai-main/tests/model_loader/test_t5_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from omegaconf import DictConfig
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
from projects.MT5.configs.mt5_base import cfg as libai_cfg
from projects.MT5.mt5_model import MT5Model
from projects.MT5.utils.mt5_loader import T5LoaderHuggerFace
PRETRAINED_MODEL_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/t5_utils/pytorch_model.bin" # noqa
PRETRAINED_MODEL_CONFIG_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/t5_utils/config.json" # noqa
PRETRAINED_MODEL_MD5 = "952862a8ba425a25739a69e5f33b0df8"
PRETRAINED_MODEL_CONFIG_MD5 = "7ebc91dc4377c01190f4116c3c1ac6cd"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_t5_utils")
setup_logger(distributed_rank=dist.get_rank())
class TestT5Loader(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "t5_utils_data"
)
self.pretrained_model_path = cache_dir
# prepare dataset
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(PRETRAINED_MODEL_URL, cache_dir, md5=PRETRAINED_MODEL_MD5)
get_data_from_cache(
PRETRAINED_MODEL_CONFIG_URL, cache_dir, md5=PRETRAINED_MODEL_CONFIG_MD5
)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
# prepare input data
self.encoder_input_ids = [
[101, 2009, 1005, 1055, 2986, 2651, 1012, 102],
[101, 2028, 12314, 3377, 102, 0, 0, 0],
[101, 2064, 2017, 3305, 2009, 102, 0, 0],
]
self.encoder_att_mask = [
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
]
self.decoder_input_ids = [
[101, 2009, 1005, 1055, 2986],
[101, 2028, 12314, 3377, 102],
[101, 2064, 2017, 3305, 2009],
]
self.decoder_att_mask = [[1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_t5_loader_with_data_tensor_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = T5LoaderHuggerFace(
model=MT5Model,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
embedding_dropout_prob=0.0,
model_type="t5",
)
model = load_func.load()
model.eval()
encoder_input_ids = flow.tensor(
self.encoder_input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
decoder_input_ids = flow.tensor(
self.decoder_input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
encode_att_mask = flow.tensor(
self.encoder_att_mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
decoder_att_mask = flow.tensor(
self.decoder_att_mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
logits = model(
encoder_input_ids, decoder_input_ids, encode_att_mask, decoder_att_mask, encode_att_mask
)["logits"]
self.assertTrue(
np.allclose(
np.array(-9836561.0),
logits.sum().data.numpy(),
)
)
@flow.unittest.skip_unless_1n4d()
def test_t5_loader_with_data_tensor_pipeline_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=24,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = T5LoaderHuggerFace(
model=MT5Model,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
embedding_dropout_prob=0.0,
model_type="t5",
)
model = load_func.load()
model.eval()
encoder_input_ids = flow.tensor(
self.encoder_input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
decoder_input_ids = flow.tensor(
self.decoder_input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
encode_att_mask = flow.tensor(
self.encoder_att_mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
decoder_att_mask = flow.tensor(
self.decoder_att_mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
)
logits = model(
encoder_input_ids, decoder_input_ids, encode_att_mask, decoder_att_mask, encode_att_mask
)["logits"]
self.assertTrue(
np.allclose(
np.array(-9836561.0),
logits.sum().data.numpy(),
)
)
if __name__ == "__main__":
unittest.main()
| 7,282 | 33.84689 | 150 | py |
libai | libai-main/tests/model_loader/test_roberta_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from omegaconf import DictConfig
import libai
from configs.common.models.roberta import cfg as libai_cfg
from libai.models.utils import RobertaLoaderHuggerFace
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
PRETRAINED_MODEL_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/roberta_utils/pytorch_model.bin" # noqa
PRETRAINED_MODEL_CONFIG_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/roberta_utils/config.json" # noqa
PRETRAINED_MODEL_MD5 = "73db58b6c51b028e0ee031f12261b51d" # noqa
PRETRAINED_MODEL_CONFIG_MD5 = "a53c22291c7f25d5077260ad5ca4d5fa" # noqa
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_roberta_utils")
setup_logger(distributed_rank=dist.get_rank())
class TestRobertaLoader(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "roberta_utils_data"
)
self.pretrained_model_path = cache_dir
# prepare dataset
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(PRETRAINED_MODEL_URL, cache_dir, md5=PRETRAINED_MODEL_MD5)
get_data_from_cache(
PRETRAINED_MODEL_CONFIG_URL, cache_dir, md5=PRETRAINED_MODEL_CONFIG_MD5
)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
# prepare input data
self.input_ids = [
[101, 2009, 1005, 1055, 2986, 2651, 1012, 102],
[101, 2028, 12314, 3377, 102, 0, 0, 0],
[101, 2064, 2017, 3305, 2009, 102, 0, 0],
]
self.mask = [[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1]]
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_roberta_loader_with_data_tensor_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = RobertaLoaderHuggerFace(
model=libai.models.RobertaModel,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=True,
amp_enabled=False,
)
model = load_func.load()
model.eval()
input_ids = flow.tensor(
self.input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.vocab_embeddings.weight.placement,
)
mask = flow.tensor(
self.mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.vocab_embeddings.weight.placement,
)
last_hidden_state, _ = model(input_ids, mask)
self.assertTrue(
np.allclose(np.array(341.5831), last_hidden_state.sum().data.numpy(), 1e-4, 1e-4)
)
@flow.unittest.skip_unless_1n4d()
def test_roberta_loader_with_data_tensor_pipeline_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = RobertaLoaderHuggerFace(
model=libai.models.RobertaModel,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=True,
amp_enabled=False,
)
model = load_func.load()
model.eval()
input_ids = flow.tensor(
self.input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.vocab_embeddings.weight.placement,
)
mask = flow.tensor(
self.mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.vocab_embeddings.weight.placement,
)
last_hidden_state, _ = model(input_ids, mask)
self.assertTrue(
np.allclose(np.array(341.5831), last_hidden_state.sum().data.numpy(), 1e-4, 1e-4)
)
if __name__ == "__main__":
unittest.main()
| 6,023 | 34.64497 | 155 | py |
libai | libai-main/tests/model_loader/test_gpt_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from omegaconf import DictConfig
import libai
from configs.common.models.gpt import cfg as libai_cfg
from libai.models.utils import GPT2LoaderHuggerFace
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
PRETRAINED_MODEL_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/gpt_utils/pytorch_model.bin" # noqa
PRETRAINED_MODEL_CONFIG_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/gpt_utils/config.json" # noqa
PRETRAINED_MODEL_MD5 = "c086214036308afc71896da17ca0442a"
PRETRAINED_MODEL_CONFIG_MD5 = "6e1dba197b511b8759d6ad4551095a29"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_gpt_utils")
setup_logger(distributed_rank=dist.get_rank())
class TestGPT2Loader(flow.unittest.TestCase):
"""The activation function of gpt2 in LiBai is GELU, so the result here is to
replace the activation function of gpt2 in huggingface from gelu_new to gelu.
"""
def setUp(self) -> None:
cache_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "gpt_utils_data"
)
self.pretrained_model_path = cache_dir
# prepare dataset
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(PRETRAINED_MODEL_URL, cache_dir, md5=PRETRAINED_MODEL_MD5)
get_data_from_cache(
PRETRAINED_MODEL_CONFIG_URL, cache_dir, md5=PRETRAINED_MODEL_CONFIG_MD5
)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
# prepare input data
self.input_ids = [
[101, 2009, 1005, 1055, 2986, 2651, 1012, 102],
[101, 2028, 12314, 3377, 102, 0, 0, 0],
[101, 2064, 2017, 3305, 2009, 102, 0, 0],
]
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_gpt_loader_with_data_tensor_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = GPT2LoaderHuggerFace(
model=libai.models.GPTModel,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=True,
apply_query_key_layer_scaling=True,
apply_residual_post_layernorm=False,
amp_enabled=False,
attention_dropout_prob=0,
output_dropout_prob=0,
)
model = load_func.load()
model.eval()
input_ids = flow.tensor(
self.input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.token_embeddings.weight.placement,
)
logits = model(input_ids)
self.assertTrue(
np.allclose(
np.array(-93505050.0),
logits.sum().data.numpy(),
)
)
@flow.unittest.skip_unless_1n4d()
def test_gpt_loader_with_data_tensor_pipeline_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = GPT2LoaderHuggerFace(
model=libai.models.GPTModel,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=True,
apply_query_key_layer_scaling=True,
apply_residual_post_layernorm=False,
amp_enabled=False,
attention_dropout_prob=0,
output_dropout_prob=0,
)
model = load_func.load()
model.eval()
input_ids = flow.tensor(
self.input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.token_embeddings.weight.placement,
)
logits = model(input_ids)
self.assertTrue(
np.allclose(
np.array(-93505050.0),
logits.sum().data.numpy(),
)
)
@flow.unittest.skip_unless_1n4d()
def test_gpt_loader_with_data_tensor_parallel_backward(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = GPT2LoaderHuggerFace(
model=libai.models.GPTModel,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=True,
apply_query_key_layer_scaling=True,
apply_residual_post_layernorm=False,
amp_enabled=False,
attention_dropout_prob=0,
output_dropout_prob=0,
embedding_dropout_prob=0,
)
model = load_func.load()
input_ids = flow.tensor(
self.input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.token_embeddings.weight.placement,
)
logits = model(input_ids)
loss = logits.sum()
loss.backward()
self.assertTrue(
np.allclose(-24882176.0, model.transformer.layernorm_f.weight.grad.sum().numpy())
)
self.assertTrue(
np.allclose(
3.1779e08, model.embeddings.token_embeddings.weight.grad.sum().numpy(), 1e-3
)
)
@flow.unittest.skip_unless_1n4d()
def test_gpt_loader_with_data_tensor_pipeline_parallel_backward(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = GPT2LoaderHuggerFace(
model=libai.models.GPTModel,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=True,
apply_query_key_layer_scaling=True,
apply_residual_post_layernorm=False,
amp_enabled=False,
attention_dropout_prob=0,
output_dropout_prob=0,
embedding_dropout_prob=0,
)
model = load_func.load()
input_ids = flow.tensor(
self.input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.token_embeddings.weight.placement,
)
logits = model(input_ids)
loss = logits.sum()
loss.backward()
self.assertTrue(
np.allclose(-24882176.0, model.transformer.layernorm_f.weight.grad.sum().numpy())
)
self.assertTrue(
np.allclose(317785760.0, model.embeddings.token_embeddings.weight.grad.sum().numpy())
)
if __name__ == "__main__":
unittest.main()
| 8,936 | 32.724528 | 151 | py |
libai | libai-main/tests/model_loader/test_swin_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from omegaconf import DictConfig
import libai
from configs.common.models.swin.swin_tiny_patch4_window7_224 import cfg as libai_cfg
from libai.models.utils import SwinLoaderHuggerFace
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
PRETRAINED_MODEL_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/swin_utils/pytorch_model.bin" # noqa
PRETRAINED_MODEL_CONFIG_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/swin_utils/config.json" # noqa
INIT_DATA = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/swin_utils/init_data.npz" # noqa
PRETRAINED_MODEL_MD5 = "cd8c03d9cd4a9c536a5a245f663035b6"
PRETRAINED_MODEL_CONFIG_MD5 = "a8a71ed22b99323edd6a1457bede5819"
INIT_DATA_MD5 = "5fecdcd8d46bfefa310d19e084bd4815"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_swin_utils")
setup_logger(distributed_rank=dist.get_rank())
class TestSwinLoder(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "swin_utils_data"
)
self.pretrained_model_path = cache_dir
self.init_data_path = os.path.join(cache_dir, "init_data.npz")
# download model and data
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(PRETRAINED_MODEL_URL, cache_dir, md5=PRETRAINED_MODEL_MD5)
get_data_from_cache(
PRETRAINED_MODEL_CONFIG_URL, cache_dir, md5=PRETRAINED_MODEL_CONFIG_MD5
)
get_data_from_cache(INIT_DATA, cache_dir, md5=INIT_DATA_MD5)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
# prepare input data
self.input_image = np.load(self.init_data_path)["arr_0"]
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_swin_loader_with_data_tensor_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = SwinLoaderHuggerFace(
model=libai.models.SwinTransformer,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
)
model = load_func.load()
model.eval()
input_image = flow.tensor(
self.input_image.tolist(),
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
self.assertTrue(np.allclose(np.array(80.9373), prediction_scores.sum().data.numpy(), 1e-3))
@flow.unittest.skip_unless_1n4d()
def test_swin_loader_with_data_tensor_pipeline_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = SwinLoaderHuggerFace(
model=libai.models.SwinTransformer,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
)
model = load_func.load()
model.eval()
input_image = flow.tensor(
self.input_image,
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
self.assertTrue(np.allclose(np.array(80.9373), prediction_scores.sum().data.numpy(), 1e-3))
@flow.unittest.skip_unless_1n4d()
def test_swin_loader_with_data_tensor_parallel_backward(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = SwinLoaderHuggerFace(
model=libai.models.SwinTransformer,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
drop_rate=0.0,
drop_path_rate=0.0,
)
model = load_func.load()
input_image = flow.tensor(
self.input_image,
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
loss = prediction_scores.sum()
loss.backward()
self.assertTrue(np.allclose(108775.88, model.head.weight.grad.sum().numpy(), 1e-3))
self.assertTrue(
np.allclose(24.320518, model.patch_embed.norm.weight.grad.sum().numpy(), 1e-2)
)
@flow.unittest.skip_unless_1n4d()
def test_swin_loader_with_data_tensor_pipeline_parallel_backward(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = SwinLoaderHuggerFace(
model=libai.models.SwinTransformer,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
drop_rate=0.0,
drop_path_rate=0.0,
)
model = load_func.load()
input_image = flow.tensor(
self.input_image,
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
loss = prediction_scores.sum()
loss.backward()
self.assertTrue(np.allclose(108775.88, model.head.weight.grad.sum().numpy(), 1e-3))
self.assertTrue(
np.allclose(24.320518, model.patch_embed.norm.weight.grad.sum().numpy(), 1e-2)
)
if __name__ == "__main__":
unittest.main()
| 7,713 | 34.223744 | 152 | py |
libai | libai-main/tests/model_loader/test_vit_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from omegaconf import DictConfig
import libai
from configs.common.models.vit.vit_tiny_patch16_224 import cfg as libai_cfg
from libai.models.utils import ViTLoaderHuggerFace
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
PRETRAINED_MODEL_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/vit_utils/pytorch_model.bin" # noqa
PRETRAINED_MODEL_CONFIG_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/vit_utils/config.json" # noqa
INIT_DATA = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/vit_utils/init_data.npz" # noqa
PRETRAINED_MODEL_MD5 = "c587693e5e312064c56f27aa2d4f1e81"
PRETRAINED_MODEL_CONFIG_MD5 = "9ea94d9e5bc3543b1de7d12956321c50"
INIT_DATA_MD5 = "5fecdcd8d46bfefa310d19e084bd4815"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_vit_utils")
setup_logger(distributed_rank=dist.get_rank())
class TestViTLoder(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "vit_utils_data"
)
self.pretrained_model_path = cache_dir
self.init_data_path = os.path.join(cache_dir, "init_data.npz")
# download model and data
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(PRETRAINED_MODEL_URL, cache_dir, md5=PRETRAINED_MODEL_MD5)
get_data_from_cache(
PRETRAINED_MODEL_CONFIG_URL, cache_dir, md5=PRETRAINED_MODEL_CONFIG_MD5
)
get_data_from_cache(INIT_DATA, cache_dir, md5=INIT_DATA_MD5)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
# prepare input data
self.input_image = np.load(self.init_data_path)["arr_0"]
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_vit_loader_with_data_tensor_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = ViTLoaderHuggerFace(
model=libai.models.VisionTransformer,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
)
model = load_func.load()
model.eval()
input_image = flow.tensor(
self.input_image.tolist(),
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
self.assertTrue(
np.allclose(np.array(3.1374), prediction_scores.sum().data.numpy(), 1e-4, 1e-4)
)
@flow.unittest.skip_unless_1n4d()
def test_vit_loader_with_data_tensor_pipeline_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = ViTLoaderHuggerFace(
model=libai.models.VisionTransformer,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
)
model = load_func.load()
model.eval()
input_image = flow.tensor(
self.input_image,
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
self.assertTrue(
np.allclose(np.array(3.1374), prediction_scores.sum().data.numpy(), 1e-4, 1e-4)
)
@flow.unittest.skip_unless_1n4d()
def test_vit_loader_with_data_tensor_parallel_backward(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = ViTLoaderHuggerFace(
model=libai.models.VisionTransformer,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
drop_rate=0,
attn_drop_rate=0,
drop_path_rate=0,
)
model = load_func.load()
input_image = flow.tensor(
self.input_image,
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
loss = prediction_scores.sum()
loss.backward()
self.assertTrue(np.allclose(-173459.77, model.head.weight.grad.sum().numpy(), 1e-3))
@flow.unittest.skip_unless_1n4d()
def test_vit_loader_with_data_tensor_pipeline_parallel_backward(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = ViTLoaderHuggerFace(
model=libai.models.VisionTransformer,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
drop_rate=0,
attn_drop_rate=0,
drop_path_rate=0,
)
model = load_func.load()
input_image = flow.tensor(
self.input_image,
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
loss = prediction_scores.sum()
loss.backward()
self.assertTrue(np.allclose(-173459.77, model.head.weight.grad.sum().numpy(), 1e-3))
if __name__ == "__main__":
unittest.main()
| 7,553 | 33.493151 | 151 | py |
libai | libai-main/tests/model_loader/test_bert_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from omegaconf import DictConfig
import libai
from configs.common.models.bert import cfg as libai_cfg
from libai.models.utils import BertLoaderHuggerFace
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
PRETRAINED_MODEL_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/bert_utils/pytorch_model.bin" # noqa
PRETRAINED_MODEL_CONFIG_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/bert_utils/config.json" # noqa
PRETRAINED_MODEL_MD5 = "ea97b42698d3b5f6d8e8011eba3d1611"
PRETRAINED_MODEL_CONFIG_MD5 = "0939b914fc32135f6c12d8ef281dbd7a"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_bert_utils")
setup_logger(distributed_rank=dist.get_rank())
class TestBertLoder(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "bert_utils_data"
)
self.pretrained_model_path = cache_dir
# prepare dataset
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(PRETRAINED_MODEL_URL, cache_dir, md5=PRETRAINED_MODEL_MD5)
get_data_from_cache(
PRETRAINED_MODEL_CONFIG_URL, cache_dir, md5=PRETRAINED_MODEL_CONFIG_MD5
)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
# prepare input data
self.input_ids = [
[101, 2009, 1005, 1055, 2986, 2651, 1012, 102],
[101, 2028, 12314, 3377, 102, 0, 0, 0],
[101, 2064, 2017, 3305, 2009, 102, 0, 0],
]
self.mask = [[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1]]
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_bert_loader_with_data_tensor_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = BertLoaderHuggerFace(
model=libai.models.BertModel,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=True,
amp_enabled=False,
)
model = load_func.load()
model.eval()
input_ids = flow.tensor(
self.input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.vocab_embeddings.weight.placement,
)
mask = flow.tensor(
self.mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.vocab_embeddings.weight.placement,
)
last_hidden_state, _ = model(input_ids, mask)
self.assertTrue(
np.allclose(np.array(-214.9335), last_hidden_state.sum().data.numpy(), 1e-4, 1e-4)
)
@flow.unittest.skip_unless_1n4d()
def test_bert_loader_with_data_tensor_pipeline_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = BertLoaderHuggerFace(
model=libai.models.BertModel,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
bias_gelu_fusion=False,
bias_dropout_fusion=False,
scale_mask_softmax_fusion=False,
apply_query_key_layer_scaling=False,
apply_residual_post_layernorm=True,
amp_enabled=False,
)
model = load_func.load()
model.eval()
input_ids = flow.tensor(
self.input_ids,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.vocab_embeddings.weight.placement,
)
mask = flow.tensor(
self.mask,
dtype=flow.bool,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.embeddings.vocab_embeddings.weight.placement,
)
last_hidden_state, _ = model(input_ids, mask)
self.assertTrue(
np.allclose(np.array(-214.9335), last_hidden_state.sum().data.numpy(), 1e-4, 1e-4)
)
if __name__ == "__main__":
unittest.main()
| 5,969 | 34.325444 | 152 | py |
libai | libai-main/tests/model_loader/test_swinv2_loader.py | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from omegaconf import DictConfig
import libai
from configs.common.models.swinv2.swinv2_tiny_patch4_window8_256 import cfg as libai_cfg
from libai.models.utils import SwinV2LoaderHuggerFace
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
PRETRAINED_MODEL_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/swinv2_utils/pytorch_model.bin" # noqa
PRETRAINED_MODEL_CONFIG_URL = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/swinv2_utils/config.json" # noqa
INIT_DATA = "http://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/model_utils_test/swinv2_utils/init_data.npz" # noqa
PRETRAINED_MODEL_MD5 = "40f085f8916974dcb5d86fc6e03aa0df"
PRETRAINED_MODEL_CONFIG_MD5 = "2d3874d58f3d5684f51f70ca29a7de9f"
INIT_DATA_MD5 = "c19b2ad8afe9a708aac9d2a0ff15f7bd"
TEST_OUTPUT = os.path.join(os.getenv("TEST_OUTPUT", "output_unittest"), "test_swinv2_utils")
setup_logger(distributed_rank=dist.get_rank())
class TestSwinV2Loder(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(
os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "swinv2_utils_data"
)
self.pretrained_model_path = cache_dir
self.init_data_path = os.path.join(cache_dir, "init_data.npz")
# download model and data
if dist.get_local_rank() == 0:
# download dataset on main process of each node
get_data_from_cache(PRETRAINED_MODEL_URL, cache_dir, md5=PRETRAINED_MODEL_MD5)
get_data_from_cache(
PRETRAINED_MODEL_CONFIG_URL, cache_dir, md5=PRETRAINED_MODEL_CONFIG_MD5
)
get_data_from_cache(INIT_DATA, cache_dir, md5=INIT_DATA_MD5)
os.makedirs(TEST_OUTPUT, exist_ok=True)
dist.synchronize()
# prepare input data
self.input_image = np.load(self.init_data_path)["arr_0"]
@classmethod
def tearDownClass(cls) -> None:
if os.path.isdir(TEST_OUTPUT) and dist.get_local_rank() == 0:
shutil.rmtree(TEST_OUTPUT)
@flow.unittest.skip_unless_1n4d()
def test_swinv2_loader_with_data_tensor_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = SwinV2LoaderHuggerFace(
model=libai.models.SwinTransformerV2,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
)
model = load_func.load()
model.eval()
input_image = flow.tensor(
self.input_image.tolist(),
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
self.assertTrue(
np.allclose(np.array(221.7827), prediction_scores.sum().data.numpy(), 1e-4, 1e-4)
)
@flow.unittest.skip_unless_1n4d()
def test_swinv2_loader_with_data_tensor_pipeline_parallel(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = SwinV2LoaderHuggerFace(
model=libai.models.SwinTransformerV2,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
)
model = load_func.load()
model.eval()
input_image = flow.tensor(
self.input_image,
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
self.assertTrue(
np.allclose(np.array(221.7827), prediction_scores.sum().data.numpy(), 1e-4, 1e-4)
)
@flow.unittest.skip_unless_1n4d()
def test_swinv2_loader_with_data_tensor_parallel_backward(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = SwinV2LoaderHuggerFace(
model=libai.models.SwinTransformerV2,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
drop_rate=0,
drop_path_rate=0,
)
model = load_func.load()
input_image = flow.tensor(
self.input_image.tolist(),
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
loss = prediction_scores.sum()
loss.backward()
self.assertTrue(np.allclose(373520.47, model.head.weight.grad.sum().numpy(), 1e-3))
self.assertTrue(
np.allclose(259.379, model.patch_embed.norm.weight.grad.sum().numpy(), 1e-3)
)
@flow.unittest.skip_unless_1n4d()
def test_swinv2_loader_with_data_tensor_pipeline_parallel_backward(self):
# set distributed config
dist_cfg = DictConfig(
dict(
data_parallel_size=2,
tensor_parallel_size=1,
pipeline_parallel_size=2,
pipeline_num_layers=12,
)
)
dist.setup_dist_util(dist_cfg)
# load model
load_func = SwinV2LoaderHuggerFace(
model=libai.models.SwinTransformerV2,
libai_cfg=libai_cfg,
pretrained_model_path=self.pretrained_model_path,
drop_rate=0,
drop_path_rate=0,
)
model = load_func.load()
input_image = flow.tensor(
self.input_image.tolist(),
dtype=flow.float32,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=model.patch_embed.proj.weight.placement,
)
prediction_scores = model(input_image)["prediction_scores"]
loss = prediction_scores.sum()
loss.backward()
self.assertTrue(np.allclose(373520.47, model.head.weight.grad.sum().numpy(), 1e-3))
self.assertTrue(
np.allclose(259.379, model.patch_embed.norm.weight.grad.sum().numpy(), 1e-3)
)
if __name__ == "__main__":
unittest.main()
| 7,819 | 34.067265 | 154 | py |
libai | libai-main/configs/swinv2_imagenet.py | from libai.config import LazyCall
from .common.models.swinv2.swinv2_tiny_patch4_window8_256 import model
from .common.models.graph import graph
from .common.train import train
from .common.optim import optim
from .common.data.imagenet import dataloader
from flowvision import transforms
from flowvision.data import Mixup
from flowvision.loss.cross_entropy import SoftTargetCrossEntropy
from flowvision.transforms import InterpolationMode
from flowvision.transforms.functional import str_to_interp_mode
from flowvision.data.constants import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
)
from flowvision.data.auto_augment import rand_augment_transform
from flowvision.data.random_erasing import RandomErasing
# Refine data path to imagenet
dataloader.train.dataset[0].root = "/path/to/imagenet"
dataloader.test[0].dataset.root = "/path/to/imagenet"
# Add Mixup Func
dataloader.train.mixup_func = LazyCall(Mixup)(
mixup_alpha=0.8,
cutmix_alpha=1.0,
prob=1.0,
switch_prob=0.5,
mode="batch",
num_classes=1000,
)
dataloader.train.dataset[0].transform = LazyCall(transforms.Compose)(
transforms=[
LazyCall(transforms.RandomResizedCrop)(
size=256,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation=InterpolationMode.BICUBIC,
),
LazyCall(transforms.RandomHorizontalFlip)(p=0.5),
LazyCall(rand_augment_transform)(
config_str="rand-m9-mstd0.5-inc1",
hparams=dict(
translate_const=int(256 * 0.45),
img_mean=tuple([min(255, round(255 * x)) for x in IMAGENET_DEFAULT_MEAN]),
interpolation=str_to_interp_mode("bicubic"),
),
),
LazyCall(transforms.ToTensor)(),
LazyCall(transforms.Normalize)(
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
),
LazyCall(RandomErasing)(
probability=0.25,
mode="pixel",
max_count=1,
num_splits=0,
device="cpu",
),
]
)
dataloader.test[0].dataset.transform = LazyCall(transforms.Compose)(
transforms=[
LazyCall(transforms.Resize)(
size=256,
interpolation=InterpolationMode.BICUBIC,
),
LazyCall(transforms.CenterCrop)(
size=256,
),
LazyCall(transforms.ToTensor)(),
LazyCall(transforms.Normalize)(
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
),
]
)
# Refine model cfg for vit training on imagenet
model.cfg.num_classes = 1000
model.cfg.loss_func = SoftTargetCrossEntropy()
# Refine optimizer cfg for vit model
optim.lr = 1e-3 # The pytorch version is 1024 as the total batch size, 1e-3 as the learning rate
optim.eps = 1e-8
optim.weight_decay = 0.05
def check_keywords_in_name(name, keywords=()):
isin = False
for keyword in keywords:
if keyword in name:
isin = True
return isin
def set_weight_decay(model, skip_list=(), skip_keywords=()):
has_decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if (
len(param.shape) == 1
or name.endswith(".bias")
or (name in skip_list)
or check_keywords_in_name(name, skip_keywords)
):
no_decay.append(param)
else:
has_decay.append(param)
return [{"params": has_decay}, {"params": no_decay, "weight_decay": 0.0}]
optim.params = LazyCall(set_weight_decay)(
model=model,
skip_list=("absolute_pos_embed"),
skip_keywords=("cpb_mlp", "logit_scale", "relative_position_bias_table"),
)
# Refine train cfg for vit model
train.train_micro_batch_size = 128
train.test_micro_batch_size = 128
train.train_epoch = 300
train.warmup_ratio = 20 / 300
train.eval_period = 1562
train.log_period = 100
graph.enabled = False
train.rdma_enabled = True
# Scheduler
train.scheduler.warmup_factor = 0.001
train.scheduler.alpha = 0.01
train.scheduler.warmup_method = "linear"
# Set fp16 ON
train.amp.enabled = True
| 4,195 | 28.549296 | 97 | py |
spring | spring-main/spring_amr/optim.py | # taken from
import math
import torch
from torch.optim.optimizer import Optimizer, required
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
self.degenerated_to_sgd = degenerated_to_sgd
if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):
for param in params:
if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]):
param['buffer'] = [[None, None, None] for _ in range(10)]
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
buffer=[[None, None, None] for _ in range(10)])
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = group['buffer'][int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = math.sqrt(
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (
N_sma_max - 2)) / (1 - beta1 ** state['step'])
elif self.degenerated_to_sgd:
step_size = 1.0 / (1 - beta1 ** state['step'])
else:
step_size = -1
buffered[2] = step_size
# more conservative since it's an approximated value
if N_sma >= 5:
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
p.data.copy_(p_data_fp32)
elif step_size > 0:
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
return loss | 4,345 | 42.46 | 111 | py |
spring | spring-main/spring_amr/utils.py | from glob import glob
from pathlib import Path
import torch
from transformers import AutoConfig
from spring_amr.dataset import AMRDataset, AMRDatasetTokenBatcherAndLoader
from spring_amr.modeling_bart import AMRBartForConditionalGeneration
from spring_amr.tokenization_bart import AMRBartTokenizer, PENMANBartTokenizer
def instantiate_model_and_tokenizer(
name=None,
checkpoint=None,
additional_tokens_smart_init=True,
dropout = 0.15,
attention_dropout = 0.15,
from_pretrained = True,
init_reverse = False,
collapse_name_ops = False,
penman_linearization = False,
use_pointer_tokens = False,
raw_graph = False,
):
if raw_graph:
assert penman_linearization
skip_relations = False
if name is None:
name = 'facebook/bart-large'
if name == 'facebook/bart-base':
tokenizer_name = 'facebook/bart-large'
else:
tokenizer_name = name
config = AutoConfig.from_pretrained(name)
config.output_past = False
config.no_repeat_ngram_size = 0
config.prefix = " "
config.output_attentions = True
config.dropout = dropout
config.attention_dropout = attention_dropout
if penman_linearization:
tokenizer = PENMANBartTokenizer.from_pretrained(
tokenizer_name,
collapse_name_ops=collapse_name_ops,
use_pointer_tokens=use_pointer_tokens,
raw_graph=raw_graph,
config=config,
)
else:
tokenizer = AMRBartTokenizer.from_pretrained(
tokenizer_name,
collapse_name_ops=collapse_name_ops,
use_pointer_tokens=use_pointer_tokens,
config=config,
)
if from_pretrained:
model = AMRBartForConditionalGeneration.from_pretrained(name, config=config)
else:
model = AMRBartForConditionalGeneration(config)
model.resize_token_embeddings(len(tokenizer.encoder))
if additional_tokens_smart_init:
modified = 0
for tok, idx in tokenizer.encoder.items():
tok = tok.lstrip(tokenizer.INIT)
if idx < tokenizer.old_enc_size:
continue
elif tok.startswith('<pointer:') and tok.endswith('>'):
tok_split = ['pointer', str(tok.split(':')[1].strip('>'))]
elif tok.startswith('<'):
continue
elif tok.startswith(':'):
if skip_relations:
continue
elif tok.startswith(':op'):
tok_split = ['relation', 'operator', str(int(tok[3:]))]
elif tok.startswith(':snt'):
tok_split = ['relation', 'sentence', str(int(tok[4:]))]
elif tok.startswith(':ARG'):
tok_split = ['relation', 'argument', str(int(tok[4:]))]
else:
tok_split = ['relation'] + tok.lstrip(':').split('-')
else:
tok_split = tok.split('-')
tok_split_ = tok_split
tok_split = []
for s in tok_split_:
s_ = s + tokenizer.INIT
if s_ in tokenizer.encoder:
tok_split.append(s_)
else:
tok_split.extend(tokenizer._tok_bpe(s))
vecs = []
for s in tok_split:
idx_split = tokenizer.encoder.get(s, -1)
if idx_split > -1:
vec_split = model.model.shared.weight.data[idx_split].clone()
vecs.append(vec_split)
if vecs:
vec = torch.stack(vecs, 0).mean(0)
noise = torch.empty_like(vec)
noise.uniform_(-0.1, +0.1)
model.model.shared.weight.data[idx] = vec + noise
modified += 1
if init_reverse:
model.init_reverse_model()
if checkpoint is not None:
model.load_state_dict(torch.load(checkpoint, map_location='cpu')['model'])
return model, tokenizer
def instantiate_loader(
glob_pattn,
tokenizer,
batch_size=500,
evaluation=True,
out=None,
use_recategorization=False,
remove_longer_than=None,
remove_wiki=False,
dereify=True,
):
paths = []
if isinstance(glob_pattn, str) or isinstance(glob_pattn, Path):
glob_pattn = [glob_pattn]
for gpattn in glob_pattn:
paths += [Path(p) for p in glob(gpattn)]
if evaluation:
assert out is not None
Path(out).write_text(
'\n\n'.join([p.read_text() for p in paths]))
dataset = AMRDataset(
paths,
tokenizer,
use_recategorization=use_recategorization,
remove_longer_than=remove_longer_than,
remove_wiki=remove_wiki,
dereify=dereify,
)
loader = AMRDatasetTokenBatcherAndLoader(
dataset,
batch_size=batch_size,
shuffle=not evaluation,
)
return loader
| 5,027 | 28.751479 | 84 | py |
spring | spring-main/spring_amr/dataset.py | import logging
import random
import torch
from cached_property import cached_property
from torch.utils.data import Dataset
from spring_amr.IO import read_raw_amr_data
def reverse_direction(x, y, pad_token_id=1):
input_ids = torch.cat([y['decoder_input_ids'], y['lm_labels'][:, -1:]], 1)
attention_mask = torch.ones_like(input_ids)
attention_mask[input_ids == pad_token_id] = 0
decoder_input_ids = x['input_ids'][:,:-1]
lm_labels = x['input_ids'][:,1:]
x = {'input_ids': input_ids, 'attention_mask': attention_mask}
y = {'decoder_input_ids': decoder_input_ids, 'lm_labels': lm_labels}
return x, y
class AMRDataset(Dataset):
def __init__(
self,
paths,
tokenizer,
device=torch.device('cpu'),
use_recategorization=False,
remove_longer_than=None,
remove_wiki=False,
dereify=True,
):
self.paths = paths
self.tokenizer = tokenizer
self.device = device
graphs = read_raw_amr_data(paths, use_recategorization, remove_wiki=remove_wiki, dereify=dereify)
self.graphs = []
self.sentences = []
self.linearized = []
self.linearized_extra = []
self.remove_longer_than = remove_longer_than
for g in graphs:
l, e = self.tokenizer.linearize(g)
try:
self.tokenizer.batch_encode_sentences([g.metadata['snt']])
except:
logging.warning('Invalid sentence!')
continue
if remove_longer_than and len(l) > remove_longer_than:
continue
if len(l) > 1024:
logging.warning('Sequence longer than 1024 included. BART does not support it!')
self.sentences.append(g.metadata['snt'])
self.graphs.append(g)
self.linearized.append(l)
self.linearized_extra.append(e)
def __len__(self):
return len(self.sentences)
def __getitem__(self, idx):
sample = {}
sample['id'] = idx
sample['sentences'] = self.sentences[idx]
if self.linearized is not None:
sample['linearized_graphs_ids'] = self.linearized[idx]
sample.update(self.linearized_extra[idx])
return sample
def size(self, sample):
return len(sample['linearized_graphs_ids'])
def collate_fn(self, samples, device=torch.device('cpu')):
x = [s['sentences'] for s in samples]
x, extra = self.tokenizer.batch_encode_sentences(x, device=device)
if 'linearized_graphs_ids' in samples[0]:
y = [s['linearized_graphs_ids'] for s in samples]
y, extra_y = self.tokenizer.batch_encode_graphs_from_linearized(y, samples, device=device)
extra.update(extra_y)
else:
y = None
extra['ids'] = [s['id'] for s in samples]
return x, y, extra
class AMRDatasetTokenBatcherAndLoader:
def __init__(self, dataset, batch_size=800 ,device=torch.device('cpu'), shuffle=False, sort=False):
assert not (shuffle and sort)
self.batch_size = batch_size
self.tokenizer = dataset.tokenizer
self.dataset = dataset
self.device = device
self.shuffle = shuffle
self.sort = sort
def __iter__(self):
it = self.sampler()
it = ([[self.dataset[s] for s in b] for b in it])
it = (self.dataset.collate_fn(b, device=self.device) for b in it)
return it
@cached_property
def sort_ids(self):
lengths = [len(s.split()) for s in self.dataset.sentences]
ids, _ = zip(*sorted(enumerate(lengths), reverse=True))
ids = list(ids)
return ids
def sampler(self):
ids = list(range(len(self.dataset)))[::-1]
if self.shuffle:
random.shuffle(ids)
if self.sort:
ids = self.sort_ids.copy()
batch_longest = 0
batch_nexamps = 0
batch_ntokens = 0
batch_ids = []
def discharge():
nonlocal batch_longest
nonlocal batch_nexamps
nonlocal batch_ntokens
ret = batch_ids.copy()
batch_longest *= 0
batch_nexamps *= 0
batch_ntokens *= 0
batch_ids[:] = []
return ret
while ids:
idx = ids.pop()
size = self.dataset.size(self.dataset[idx])
cand_batch_ntokens = max(size, batch_longest) * (batch_nexamps + 1)
if cand_batch_ntokens > self.batch_size and batch_ids:
yield discharge()
batch_longest = max(batch_longest, size)
batch_nexamps += 1
batch_ntokens = batch_longest * batch_nexamps
batch_ids.append(idx)
if len(batch_ids) == 1 and batch_ntokens > self.batch_size:
yield discharge()
if batch_ids:
yield discharge()
| 4,991 | 32.503356 | 105 | py |
spring | spring-main/spring_amr/modeling_bart.py | import copy
import math
import random
from typing import *
import torch
from torch import Tensor
from torch import nn
from torch.nn import functional as F
from transformers import modeling_bart as bart
from transformers.modeling_utils import BeamHypotheses, calc_banned_ngram_tokens, calc_banned_bad_words_ids, \
top_k_top_p_filtering
def extract_backreferences(ids, num_embeddings, backpointer_idx):
ids_mask = ids >= num_embeddings
backreferences = ids.clone() - num_embeddings
backreferences[~ids_mask] = 0
backreferences += (~ids_mask).long() * torch.arange(
ids.size(1),
dtype=ids.dtype,
device=ids.device)
ids = ids.clone()
ids[ids_mask] = backpointer_idx
return ids, backreferences
class AMRBartEncoder(nn.Module):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer
is a :class:`EncoderLayer`.
Args:
config: BartConfig
"""
def __init__(self, config: bart.BartConfig, embed_tokens, backpointer_idx):
super().__init__()
self.backpointer_idx = backpointer_idx
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
embed_dim = embed_tokens.embedding_dim
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = config.max_position_embeddings
self.embed_tokens = embed_tokens
if config.static_position_embeddings:
self.embed_positions = bart.SinusoidalPositionalEmbedding(
config.max_position_embeddings, embed_dim, self.padding_idx
)
else:
self.embed_positions = bart.LearnedPositionalEmbedding(
config.max_position_embeddings, embed_dim, self.padding_idx, #config.extra_pos_embeddings,
)
self.layers = nn.ModuleList([bart.EncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = bart.LayerNorm(embed_dim) if config.normalize_embedding else nn.Identity()
# mbart has one extra layer_norm
self.layer_norm = bart.LayerNorm(config.d_model) if config.normalize_before else None
def forward(
self, input_ids, embedded=None, attention_mask=None,
):
"""
Args:
input_ids (LongTensor): tokens in the source language of shape
`(batch, src_len)`
attention_mask (torch.LongTensor): indicating which indices are padding tokens.
Returns:
Tuple comprised of:
- **x** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *self.output_hidden_states:* is True.
- **all_attentions** (List[Tensor]): Attention weights for each layer.
During training might not be of length n_layers because of layer dropout.
"""
# check attention mask and invert
if attention_mask is not None:
attention_mask = bart.invert_mask(attention_mask)
input_ids, backreferences = extract_backreferences(
input_ids, self.embed_tokens.num_embeddings, self.backpointer_idx)
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_ids)
x = inputs_embeds + embed_pos
if embedded is not None:
x += embedded
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states, all_attentions = [], []
for encoder_layer in self.layers:
if self.output_hidden_states:
encoder_states.append(x)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
attn = None
else:
x, attn = encoder_layer(x, attention_mask)
if self.output_attentions:
all_attentions.append(attn)
if self.layer_norm:
x = self.layer_norm(x)
if self.output_hidden_states:
encoder_states.append(x)
# T x B x C -> B x T x C
encoder_states = [hidden_state.transpose(0, 1) for hidden_state in encoder_states]
x = x.transpose(0, 1)
return x, encoder_states, all_attentions
class AMRBartDecoder(nn.Module):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer
is a :class:`DecoderLayer`.
Args:
config: BartConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: bart.BartConfig, embed_tokens: nn.Embedding, backpointer_idx, amr_mode=True):
super().__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.backpointer_idx = backpointer_idx
embed_dim = embed_tokens.embedding_dim
self.embed_tokens = embed_tokens
if config.static_position_embeddings:
self.embed_positions = bart.SinusoidalPositionalEmbedding(
config.max_position_embeddings, embed_dim, self.padding_idx
)
else:
self.embed_positions = bart.LearnedPositionalEmbedding(
config.max_position_embeddings, embed_dim, self.padding_idx, #config.extra_pos_embeddings,
)
self.layers = nn.ModuleList(
[bart.DecoderLayer(config) for _ in range(config.decoder_layers)]
) # type: List[DecoderLayer]
self.layernorm_embedding = bart.LayerNorm(config.d_model) if config.normalize_embedding else nn.Identity()
self.layer_norm = bart.LayerNorm(config.d_model) if config.add_final_layer_norm else None
self.pointer_k = nn.Linear(config.d_model, config.d_model)
# self.pointer_k.weight.data = self.layers[-1].self_attn.k_proj.weight.data.clone()
self.pointer_q = nn.Linear(config.d_model, config.d_model)
# self.pointer_q.weight.data = self.layers[-1].self_attn.q_proj.weight.data.clone()
# self.pointer_k = nn.Sequential(
# nn.Linear(config.d_model, config.decoder_ffn_dim),
# nn.GELU(),
# nn.Linear(config.decoder_ffn_dim, config.d_model),
# )
# self.pointer_q = nn.Sequential(
# nn.Linear(config.d_model, config.decoder_ffn_dim),
# nn.GELU(),
# nn.Linear(config.decoder_ffn_dim, config.d_model),
# )
self.amr_mode = amr_mode
def forward(
self,
input_ids,
encoder_hidden_states,
encoder_padding_mask,
decoder_padding_mask,
decoder_causal_mask,
decoder_cached_states=None,
use_cache=False,
**unused
):
"""
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
input_ids (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_hidden_states: output from the encoder, used for
encoder-side attention
encoder_padding_mask: for ignoring pad tokens
decoder_cached_states (dict or None): dictionary used for storing state during generation
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- hidden states
- attentions
"""
# check attention mask and invert
if encoder_padding_mask is not None:
encoder_padding_mask = bart.invert_mask(encoder_padding_mask)
input_ids, backreferences = extract_backreferences(
input_ids,
self.embed_tokens.num_embeddings,
self.backpointer_idx)
# embed positions
embed_pos = self.embed_positions(input_ids, use_cache=use_cache)
positions = embed_pos
# to do this during prediction the old positions should be removed
if use_cache:
input_ids = input_ids[:, -1:]
positions = positions[:, -1:] # happens after we embed them
# assert input_ids.ne(self.padding_idx).any()
x = self.embed_tokens(input_ids) * self.embed_scale
x += positions
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# Convert to Bart output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
# decoder layers
all_hidden_states = ()
all_self_attns = ()
next_decoder_cache = []
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if self.output_hidden_states:
all_hidden_states += (x,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
layer_state = decoder_cached_states[idx] if decoder_cached_states is not None else None
x, layer_self_attn, layer_past = decoder_layer(
x,
encoder_hidden_states,
encoder_attn_mask=encoder_padding_mask,
decoder_padding_mask=decoder_padding_mask,
layer_state=layer_state,
causal_mask=decoder_causal_mask,
)
if use_cache:
next_decoder_cache.append(layer_past.copy())
if self.layer_norm and (idx == len(self.layers) - 1): # last layer of mbart
x = self.layer_norm(x)
if self.output_attentions:
all_self_attns += (layer_self_attn,)
# Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
all_hidden_states = [hidden_state.transpose(0, 1) for hidden_state in all_hidden_states]
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
xq = self.pointer_q(x)
xk = self.pointer_k(x)
if decoder_cached_states is not None:
if 'prev_key' in decoder_cached_states[-1].get('pointer', {}):
last_state = decoder_cached_states[-1]['pointer']
xk = torch.cat([last_state['prev_key'], xk], dim=1)
next_state = {'pointer': {'prev_key': xk}}
if use_cache:
next_decoder_cache.append(next_state)
if self.amr_mode:
scores = torch.einsum('bqh,bkh->bqk', xq, xk)
if decoder_cached_states:
mask = torch.full_like(scores[0], float('-inf'))
mask = mask.triu(diagonal=xk.size(1) - 1)
else:
mask = torch.full_like(scores[0], float('-inf'))
mask = mask.triu()
scores += mask.unsqueeze(0)
else:
scores = torch.full((xq.size(0), xq.size(1), xk.size(1)), float('-inf'), device=xq.device)
if use_cache:
next_cache = ((encoder_hidden_states, encoder_padding_mask), next_decoder_cache)
else:
next_cache = None
return (x, scores), next_cache, all_hidden_states, list(all_self_attns)
class AMRBartModel(bart.PretrainedBartModel):
def __init__(self, config: bart.BartConfig, backpointer_idx=None):
super().__init__(config)
self.output_attentions = True
self.output_hidden_states = config.output_hidden_states
self.padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, self.padding_idx)
if backpointer_idx is not None:
self.backpointer_idx = backpointer_idx
else:
self.backpointer_idx = self.shared.num_embeddings - 1
self.encoder = AMRBartEncoder(config, self.shared, backpointer_idx=self.backpointer_idx)
self.decoder = AMRBartDecoder(config, self.shared, backpointer_idx=self.backpointer_idx)
self.init_weights()
@property
def sentence_mode(self):
return self.decoder.amr_mode
@sentence_mode.setter
def sentence_mode(self, value):
assert isinstance(value, bool)
self.decoder.amr_mode = value
def forward(
self,
input_ids,
attention_mask=None,
decoder_input_ids=None,
encoder_outputs: Optional[Tuple] = None,
decoder_attention_mask=None,
decoder_cached_states=None,
use_cache=False,
):
# make masks if user doesn't supply
if not use_cache:
decoder_input_ids, decoder_padding_mask, causal_mask = bart._prepare_bart_decoder_inputs(
self.config,
input_ids,
decoder_input_ids=decoder_input_ids,
decoder_padding_mask=decoder_attention_mask,
causal_mask_dtype=self.shared.weight.dtype,
)
else:
decoder_padding_mask, causal_mask = None, None
assert decoder_input_ids is not None
if encoder_outputs is None:
encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask)
assert isinstance(encoder_outputs, tuple)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
decoder_input_ids,
encoder_outputs[0],
attention_mask,
decoder_padding_mask,
decoder_causal_mask=causal_mask,
decoder_cached_states=decoder_cached_states,
use_cache=use_cache,
)
# Attention and hidden_states will be [] or None if they aren't needed
# decoder_outputs: Tuple = bart._filter_out_falsey_values(decoder_outputs)
assert isinstance(decoder_outputs[0][0], torch.Tensor)
assert isinstance(decoder_outputs[0][1], torch.Tensor)
encoder_outputs: Tuple = bart._filter_out_falsey_values(encoder_outputs)
return decoder_outputs + encoder_outputs
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_output_embeddings(self):
return bart._make_linear_from_emb(self.shared) # make it on the fly
class AMRBartForConditionalGeneration(bart.PretrainedBartModel):
base_model_prefix = "model"
def __init__(self, config: bart.BartConfig, backpointer_idx=None):
super().__init__(config)
base_model = AMRBartModel(config, backpointer_idx)
self.model = base_model
self.pad_index = base_model.shared.padding_idx
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.backpointer_idx = backpointer_idx
self._rev = None
def init_reverse_model(self):
rev = AMRBartForConditionalGeneration(self.model.config, self.backpointer_idx)
rev.model.shared = self.model.shared
rev.model.encoder = self.model.encoder
rev.model.decoder.embed_tokens = self.model.decoder.embed_tokens
rev.model.decoder.embed_positions = self.model.decoder.embed_positions
self.amr_mode = True
rev.amr_mode = False
self._rev = rev
@property
def rev(self):
if self._rev is None:
return self
else:
return self._rev
@property
def amr_mode(self):
return self.model.decoder.amr_mode
@amr_mode.setter
def amr_mode(self, value):
assert isinstance(value, bool)
self.model.decoder.amr_mode = value
def forward(
self,
input_ids,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
decoder_cached_states=None,
lm_labels=None,
use_cache=False,
**unused
):
r"""
lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should either be in ``[0, ..., config.vocab_size]`` or -100 (see ``input_ids`` docstring).
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens
with labels
in ``[0, ..., config.vocab_size]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
masked_lm_loss (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
# Mask filling only works for bart-large
from transformers import BartTokenizer, BartForConditionalGeneration
tokenizer = BartTokenizer.from_pretrained('bart-large')
TXT = "My friends are <mask> but they eat too many carbs."
model = BartForConditionalGeneration.from_pretrained('bart-large')
input_ids = tokenizer.batch_encode_plus([TXT], return_tensors='pt')['input_ids']
logits = model(input_ids)[0]
masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
probs = logits[0, masked_index].softmax(dim=0)
values, predictions = probs.topk(5)
tokenizer.decode(predictions).split()
# ['good', 'great', 'all', 'really', 'very']
"""
# outputs = self.model(
# input_ids,
# attention_mask=attention_mask,
# decoder_input_ids=decoder_input_ids,
# encoder_outputs=encoder_outputs,
# decoder_attention_mask=decoder_attention_mask,
# decoder_cached_states=decoder_cached_states,
# use_cache=use_cache,
# )
# lm_logits = F.linear(outputs[0][0], self.model.shared.weight, bias=self.final_logits_bias)
# po_logits = outputs[0][1]
# po_padding = torch.full_like(po_logits[:, :, 0:1], float('-inf'))
# po_padding = po_padding.repeat(1, 1, 1024 - po_logits.size(-1))
# po_logits = torch.cat([po_logits, po_padding], -1)
# uni_logits = torch.cat([lm_logits, po_logits], -1)
#
# outputs = (uni_logits,) + outputs[1:] # Add cache, hidden states and attention if they are here
outputs = self.compute_logits(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
decoder_cached_states=decoder_cached_states,
use_cache=use_cache,
)
if lm_labels is not None:
uni_logits = outputs[0]
masked_lm_loss = F.nll_loss(
uni_logits.log_softmax(-1).contiguous().view(-1, uni_logits.size(-1)),
lm_labels.contiguous().view(-1),
ignore_index=self.pad_index)
outputs = (masked_lm_loss,) + outputs
return outputs
def compute_logits(
self,
input_ids,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
decoder_cached_states=None,
use_cache=False,
):
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
decoder_cached_states=decoder_cached_states,
use_cache=use_cache,
)
lm_logits = F.linear(outputs[0][0], self.model.shared.weight, bias=self.final_logits_bias)
po_logits = outputs[0][1]
po_padding = torch.full_like(po_logits[:, :, 0:1], float('-inf'))
po_padding = po_padding.repeat(1, 1, 1024 - po_logits.size(-1))
po_logits = torch.cat([po_logits, po_padding], -1)
uni_logits = torch.cat([lm_logits, po_logits], -1)
outputs = (uni_logits,) + outputs[1:] # Add cache, hidden states and attention if they are here
return outputs
@torch.no_grad()
def generate(
self,
input_ids: Optional[torch.LongTensor] = None,
max_length: Optional[int] = None,
min_length: Optional[int] = None,
do_sample: Optional[bool] = None,
early_stopping: Optional[bool] = None,
num_beams: Optional[int] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
repetition_penalty: Optional[float] = None,
bad_words_ids: Optional[Iterable[int]] = None,
bos_token_id: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
length_penalty: Optional[float] = None,
no_repeat_ngram_size: Optional[int] = None,
num_return_sequences: Optional[int] = None,
attention_mask: Optional[torch.LongTensor] = None,
decoder_start_token_id: Optional[int] = None,
use_cache: Optional[bool] = None,
**model_specific_kwargs
) -> torch.LongTensor:
r""" Generates sequences for models with a LM head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.
Adapted in part from `Facebook's XLM beam search code`_.
.. _`Facebook's XLM beam search code`:
https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529
Parameters:
input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`
The sequence used as a prompt for the generation. If `None` the method initializes
it as an empty `torch.LongTensor` of shape `(1,)`.
max_length: (`optional`) int
The max length of the sequence to be generated. Between `min_length` and infinity. Default to 20.
min_length: (`optional`) int
The min length of the sequence to be generated. Between 0 and infinity. Default to 0.
do_sample: (`optional`) bool
If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
early_stopping: (`optional`) bool
if set to `True` beam search is stopped when at least `num_beams` sentences finished per batch. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
num_beams: (`optional`) int
Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.
temperature: (`optional`) float
The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.
top_k: (`optional`) int
The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.
top_p: (`optional`) float
The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.
repetition_penalty: (`optional`) float
The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.
pad_token_id: (`optional`) int
Padding token. Default to specicic model pad_token_id or None if it does not exist.
bos_token_id: (`optional`) int
BOS token. Defaults to `bos_token_id` as defined in the models config.
eos_token_id: (`optional`) int
EOS token. Defaults to `eos_token_id` as defined in the models config.
length_penalty: (`optional`) float
Exponential penalty to the length. Default to 1.
no_repeat_ngram_size: (`optional`) int
If set to int > 0, all ngrams of size `no_repeat_ngram_size` can only occur once.
bad_words_ids: (`optional`) list of lists of int
`bad_words_ids` contains tokens that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`.
num_return_sequences: (`optional`) int
The number of independently computed returned sequences for each element in the batch. Default to 1.
attention_mask (`optional`) obj: `torch.LongTensor` of same shape as `input_ids`
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
Defaults to `None`.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_start_token_id=None: (`optional`) int
If an encoder-decoder model starts decoding with a different token than BOS.
Defaults to `None` and is changed to `BOS` later.
use_cache: (`optional`) bool
If `use_cache` is True, past key values are used to speed up decoding if applicable to model. Defaults to `True`.
model_specific_kwargs: (`optional`) dict
Additional model specific kwargs will be forwarded to the `forward` function of the model.
Return:
output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`
sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`
Examples::
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
outputs = model.generate(max_length=40) # do greedy decoding
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3) # 3 generate sequences using by sampling
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.
input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache.
input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl
bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated
"""
# We cannot generate if the model does not have a LM head
if self.get_output_embeddings() is None:
raise AttributeError(
"You tried to generate sequences with a model that does not have a LM Head."
"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )"
)
max_length = max_length if max_length is not None else self.config.max_length
min_length = min_length if min_length is not None else self.config.min_length
do_sample = do_sample if do_sample is not None else self.config.do_sample
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
use_cache = use_cache if use_cache is not None else self.config.use_cache
num_beams = num_beams if num_beams is not None else self.config.num_beams
temperature = temperature if temperature is not None else self.config.temperature
top_k = top_k if top_k is not None else self.config.top_k
top_p = top_p if top_p is not None else self.config.top_p
repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
no_repeat_ngram_size = (
no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size
)
bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
decoder_start_token_id = (
decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id
)
if input_ids is not None:
batch_size = input_ids.shape[0] # overriden by the input batch_size
else:
batch_size = 1
assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer."
assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer."
assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean."
assert isinstance(use_cache, bool), "`use_cache` should be a boolean."
assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer."
assert temperature > 0, "`temperature` should be strictly positive."
assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer."
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
assert input_ids is not None or (
isinstance(bos_token_id, int) and bos_token_id >= 0
), "If input_ids is not defined, `bos_token_id` should be a positive integer."
assert pad_token_id is None or (
isinstance(pad_token_id, int) and (pad_token_id >= 0)
), "`pad_token_id` should be a positive integer."
assert (eos_token_id is None) or (
isinstance(eos_token_id, int) and (eos_token_id >= 0)
), "`eos_token_id` should be a positive integer."
assert length_penalty > 0, "`length_penalty` should be strictly positive."
assert (
isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0
), "`no_repeat_ngram_size` should be a positive integer."
assert (
isinstance(num_return_sequences, int) and num_return_sequences > 0
), "`num_return_sequences` should be a strictly positive integer."
assert (
bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)
), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated"
if input_ids is None:
assert isinstance(bos_token_id, int) and bos_token_id >= 0, (
"you should either supply a context to complete as `input_ids` input "
"or a `bos_token_id` (integer >= 0) as a first token to start the generation."
)
input_ids = torch.full(
(batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device,
)
else:
assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)."
# not allow to duplicate outputs when greedy decoding
if do_sample is False:
if num_beams == 1:
# no_beam_search greedy generation conditions
assert (
num_return_sequences == 1
), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1"
else:
# beam_search greedy generation conditions
assert (
num_beams >= num_return_sequences
), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences"
# create attention mask if necessary
# TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140
if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids):
attention_mask = input_ids.ne(pad_token_id).long()
elif attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
# set pad_token_id to eos_token_id if not set. Important that this is done after
# attention_mask is created
if pad_token_id is None and eos_token_id is not None:
logger.warning(
"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(eos_token_id)
)
pad_token_id = eos_token_id
# current position and vocab size
if hasattr(self.config, "vocab_size"):
vocab_size = self.config.vocab_size
elif (
self.config.is_encoder_decoder
and hasattr(self.config, "decoder")
and hasattr(self.config.decoder, "vocab_size")
):
vocab_size = self.config.decoder.vocab_size
vocab_size += 1024
# set effective batch size and effective batch multiplier according to do_sample
if do_sample:
effective_batch_size = batch_size * num_return_sequences
effective_batch_mult = num_return_sequences
else:
effective_batch_size = batch_size
effective_batch_mult = 1
if self.config.is_encoder_decoder:
if decoder_start_token_id is None:
decoder_start_token_id = bos_token_id
assert (
decoder_start_token_id is not None
), "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation"
assert hasattr(self, "get_encoder"), "{} should have a 'get_encoder' function defined".format(self)
assert callable(self.get_encoder), "{} should be a method".format(self.get_encoder)
# get encoder and store encoder outputs
encoder = self.get_encoder()
encoder_outputs: tuple = encoder(input_ids, attention_mask=attention_mask)
# Expand input ids if num_beams > 1 or num_return_sequences > 1
if num_return_sequences > 1 or num_beams > 1:
input_ids_len = input_ids.shape[-1]
input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len)
attention_mask = attention_mask.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, input_ids_len
)
input_ids = input_ids.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
attention_mask = attention_mask.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
if self.config.is_encoder_decoder:
# create empty decoder_input_ids
input_ids = torch.full(
(effective_batch_size * num_beams, 1),
decoder_start_token_id,
dtype=torch.long,
device=next(self.parameters()).device,
)
cur_len = 1
assert (
batch_size == encoder_outputs[0].shape[0]
), f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} "
# expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)
expanded_batch_idxs = (
torch.arange(batch_size)
.view(-1, 1)
.repeat(1, num_beams * effective_batch_mult)
.view(-1)
.to(input_ids.device)
)
# expand encoder_outputs
encoder_outputs = (encoder_outputs[0].index_select(0, expanded_batch_idxs), *encoder_outputs[1:])
else:
encoder_outputs = None
cur_len = input_ids.shape[-1]
if num_beams > 1:
output = self._generate_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
early_stopping=early_stopping,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
decoder_start_token_id=decoder_start_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
num_return_sequences=num_return_sequences,
length_penalty=length_penalty,
num_beams=num_beams,
vocab_size=vocab_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
model_specific_kwargs=model_specific_kwargs,
)
else:
output = self._generate_no_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
decoder_start_token_id=decoder_start_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
model_specific_kwargs=model_specific_kwargs,
)
return output
def _generate_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
early_stopping,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
bos_token_id,
pad_token_id,
eos_token_id,
decoder_start_token_id,
batch_size,
num_return_sequences,
length_penalty,
num_beams,
vocab_size,
encoder_outputs,
attention_mask,
use_cache,
model_specific_kwargs,
):
""" Generate sequences for each example with beam search.
"""
# generated hypotheses
generated_hyps = [
BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping)
for _ in range(batch_size)
]
# scores for each sentence in the beam
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
# for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times
if do_sample is False:
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
# cache compute states
past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models
# done sentences
done = [False for _ in range(batch_size)]
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(
input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs
)
outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size)
next_token_logits = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size)
# if model has past, then set the past variable to speed up decoding
if self._use_cache(outputs, use_cache):
past = outputs[1]
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
self.enforce_repetition_penalty_(
next_token_logits, batch_size, num_beams, input_ids, repetition_penalty,
)
if temperature != 1.0:
next_token_logits = next_token_logits / temperature
if self.config.is_encoder_decoder and do_sample is False:
# TODO (PVP) still a bit hacky here - there might be a better solution
next_token_logits = self.prepare_logits_for_generation(
next_token_logits, cur_len=cur_len, max_length=max_length
)
scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size)
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
scores[:, eos_token_id] = -float("inf")
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
num_batch_hypotheses = batch_size * num_beams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_batch_tokens = calc_banned_ngram_tokens(
input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len
)
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -float("inf")
if bad_words_ids is not None:
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
for i, banned_tokens in enumerate(banned_tokens):
scores[i, banned_tokens] = -float("inf")
assert scores.shape == (batch_size * num_beams, vocab_size), "Shapes of scores: {} != {}".format(
scores.shape, (batch_size * num_beams, vocab_size)
)
if do_sample:
_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# Top-p/top-k filtering
_scores = top_k_top_p_filtering(
_scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together to sample from all beam_idxs
_scores = _scores.contiguous().view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)
probs = F.softmax(_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2)
# Compute next scores
next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2)
# sort the sampled vector to make sure that the first num_beams samples are the best
next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1)
next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2)
else:
next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
next_scores = next_scores.view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True)
assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)
# next batch beam content
next_batch_beam = []
# for each sentence
for batch_idx in range(batch_size):
# if we are done with this sentence
if done[batch_idx]:
assert (
len(generated_hyps[batch_idx]) >= num_beams
), "Batch can only be done if at least {} beams have been generated".format(num_beams)
assert (
eos_token_id is not None and pad_token_id is not None
), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined"
next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch
continue
# next sentence beam content
next_sent_beam = []
# next tokens for this sentence
for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx])
):
# get beam and token IDs
beam_id = beam_token_id // vocab_size
token_id = beam_token_id % vocab_size
effective_beam_id = batch_idx * num_beams + beam_id
# add to generated hypotheses if end of sentence or last iteration
if (eos_token_id is not None) and (token_id.item() == eos_token_id):
# if beam_token does not belong to top num_beams tokens, it should not be added
is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams
if is_beam_token_worse_than_top_num_beams:
continue
generated_hyps[batch_idx].add(
input_ids[effective_beam_id].clone(), beam_token_score.item(),
)
else:
# add next predicted token if it is not eos_token
next_sent_beam.append((beam_token_score, token_id, effective_beam_id))
# the beam for next step is full
if len(next_sent_beam) == num_beams:
break
# Check if were done so that we can save a pad step if all(done)
done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(
next_scores[batch_idx].max().item(), cur_len=cur_len
)
# update next beam content
assert len(next_sent_beam) == num_beams, "Beam should always be full"
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == num_beams * (batch_idx + 1)
# stop when we are done with each sentence
if all(done):
break
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * num_beams
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_tokens = input_ids.new([x[1] for x in next_batch_beam])
beam_idx = input_ids.new([x[2] for x in next_batch_beam])
# re-order batch and update current length
input_ids = input_ids[beam_idx, :]
input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1)
cur_len = cur_len + 1
# re-order internal states
if past is not None:
past = self._reorder_cache(past, beam_idx)
# extend attention_mask for new generated input if only decoder
if self.config.is_encoder_decoder is False:
attention_mask = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
)
# finalize all open beam hypotheses and end to generated hypotheses
for batch_idx in range(batch_size):
if done[batch_idx]:
continue
# test that beam scores match previously calculated scores if not eos and batch_idx not done
if eos_token_id is not None and all(
(token_id % vocab_size).item() is not eos_token_id for token_id in next_tokens[batch_idx]
):
assert torch.all(
next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[batch_idx]
), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format(
next_scores[:, :num_beams][batch_idx], beam_scores.view(batch_size, num_beams)[batch_idx],
)
# need to add best num_beams hypotheses to generated hyps
for beam_id in range(num_beams):
effective_beam_id = batch_idx * num_beams + beam_id
final_score = beam_scores[effective_beam_id].item()
final_tokens = input_ids[effective_beam_id]
generated_hyps[batch_idx].add(final_tokens, final_score)
# depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch
output_batch_size = batch_size if do_sample else batch_size * num_return_sequences
output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences
# select the best hypotheses
sent_lengths = input_ids.new(output_batch_size)
best = []
# retrieve best hypotheses
for i, hypotheses in enumerate(generated_hyps):
sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])
for j in range(output_num_return_sequences_per_batch):
effective_batch_idx = output_num_return_sequences_per_batch * i + j
best_hyp = sorted_hyps.pop()[1]
sent_lengths[effective_batch_idx] = len(best_hyp)
best.append(best_hyp)
# shorter batches are filled with pad_token
if sent_lengths.min().item() != sent_lengths.max().item():
assert pad_token_id is not None, "`Pad_token_id` has to be defined"
sent_max_len = min(sent_lengths.max().item() + 1, max_length)
decoded = input_ids.new(output_batch_size, sent_max_len).fill_(pad_token_id)
# fill with hypothesis and eos_token_id if necessary
for i, hypo in enumerate(best):
decoded[i, : sent_lengths[i]] = hypo
if sent_lengths[i] < max_length:
decoded[i, sent_lengths[i]] = eos_token_id
else:
# none of the hypotheses have an eos_token
assert (len(hypo) == max_length for hypo in best)
decoded = torch.stack(best).type(torch.long).to(next(self.parameters()).device)
return decoded
@staticmethod
def _reorder_cache(past: Tuple, beam_idx: Tensor) -> Tuple[Tensor]:
return tuple(layer_past.index_select(1, beam_idx) for layer_past in past)
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
old_num_tokens = self.model.shared.num_embeddings
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self.model.shared = new_embeddings
self._resize_final_logits_bias(new_num_tokens, old_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int, old_num_tokens: int) -> None:
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
def prepare_inputs_for_generation(self, decoder_input_ids, past, attention_mask, use_cache, **kwargs):
assert past is not None, "past has to be defined for encoder_outputs"
# first step, decoder_cached_states are empty
if not past[1]:
encoder_outputs, decoder_cached_states = past, None
else:
encoder_outputs, decoder_cached_states = past
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"decoder_cached_states": decoder_cached_states,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def prepare_logits_for_generation(self, logits, cur_len, max_length):
#if cur_len == 1:
# self._force_token_ids_generation(logits, self.config.bos_token_id)
if cur_len == max_length - 1 and self.config.eos_token_id is not None:
self._force_token_ids_generation(logits, self.config.eos_token_id)
return logits
def _force_token_ids_generation(self, scores, token_ids) -> None:
"""force one of token_ids to be generated by setting prob of all other tokens to 0"""
if isinstance(token_ids, int):
token_ids = [token_ids]
all_but_token_ids_mask = torch.tensor(
[x for x in range(self.config.vocab_size) if x not in token_ids],
dtype=torch.long,
device=next(self.parameters()).device,
)
assert len(scores.shape) == 2, "scores should be of rank 2 with shape: [batch_size, vocab_size]"
scores[:, all_but_token_ids_mask] = -float("inf")
@staticmethod
def _reorder_cache(past, beam_idx):
((enc_out, enc_mask), decoder_cached_states) = past
reordered_past = []
for layer_past in decoder_cached_states:
# get the correct batch idx from decoder layer's batch dim for cross and self-attn
layer_past_new = {
attn_key: bart._reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items()
}
reordered_past.append(layer_past_new)
new_enc_out = enc_out if enc_out is None else enc_out.index_select(0, beam_idx)
new_enc_mask = enc_mask if enc_mask is None else enc_mask.index_select(0, beam_idx)
past = ((new_enc_out, new_enc_mask), reordered_past)
return past
def get_encoder(self):
return self.model.encoder
def get_output_embeddings(self):
return bart._make_linear_from_emb(self.model.shared) # make it on the fly
| 60,795 | 46.055728 | 236 | py |
spring | spring-main/spring_amr/evaluation.py | import datetime
from pathlib import Path
import penman
from sacrebleu import corpus_bleu
import torch
from tqdm import tqdm
import smatch
from spring_amr.dataset import reverse_direction
def predict_amrs(
loader, model, tokenizer, beam_size=1, tokens=None, restore_name_ops=False, return_all=False):
shuffle_orig = loader.shuffle
sort_orig = loader.sort
loader.shuffle = False
loader.sort = True
total = len(loader.dataset)
model.eval()
model.amr_mode = True
if tokens is None:
ids = []
tokens = []
with tqdm(total=total) as bar:
for x, y, extra in loader:
ii = extra['ids']
ids.extend(ii)
with torch.no_grad():
out = model.generate(
**x,
max_length=1024,
decoder_start_token_id=0,
num_beams=beam_size,
num_return_sequences=beam_size)
nseq = len(ii)
for i1 in range(0, out.size(0), beam_size):
tokens_same_source = []
tokens.append(tokens_same_source)
for i2 in range(i1, i1+beam_size):
tokk = out[i2].tolist()
tokens_same_source.append(tokk)
bar.update(nseq)
# reorder
tokens = [tokens[i] for i in ids]
tokens = [t for tt in tokens for t in tt]
graphs = []
for i1 in range(0, len(tokens), beam_size):
graphs_same_source = []
graphs.append(graphs_same_source)
for i2 in range(i1, i1+beam_size):
tokk = tokens[i2]
graph, status, (lin, backr) = tokenizer.decode_amr(tokk, restore_name_ops=restore_name_ops)
graph.status = status
graph.nodes = lin
graph.backreferences = backr
graph.tokens = tokk
graphs_same_source.append(graph)
graphs_same_source[:] = tuple(zip(*sorted(enumerate(graphs_same_source), key=lambda x: (x[1].status.value, x[0]))))[1]
for gps, gg in zip(graphs, loader.dataset.graphs):
for gp in gps:
metadata = gg.metadata.copy()
metadata['annotator'] = 'bart-amr'
metadata['date'] = str(datetime.datetime.now())
if 'save-date' in metadata:
del metadata['save-date']
gp.metadata = metadata
loader.shuffle = shuffle_orig
loader.sort = sort_orig
if not return_all:
graphs = [gg[0] for gg in graphs]
return graphs
def predict_sentences(loader, model, tokenizer, beam_size=1, tokens=None, return_all=False):
shuffle_orig = loader.shuffle
sort_orig = loader.sort
loader.shuffle = False
loader.sort = True
total = len(loader.dataset)
model.eval()
model.amr_mode = False
if tokens is None:
ids = []
tokens = []
with tqdm(total=total) as bar:
for x, y, extra in loader:
ids.extend(extra['ids'])
x, y = reverse_direction(x, y)
x['input_ids'] = x['input_ids'][:, :1024]
x['attention_mask'] = x['attention_mask'][:, :1024]
with torch.no_grad():
out = model.generate(
**x,
max_length=350,
decoder_start_token_id=0,
num_beams=beam_size,
num_return_sequences=beam_size)
for i1 in range(0, len(out), beam_size):
tokens_same_source = []
tokens.append(tokens_same_source)
for i2 in range(i1, i1+beam_size):
tokk = out[i2]
tokk = [t for t in tokk.tolist() if t > 2]
tokens_same_source.append(tokk)
bar.update(out.size(0) // beam_size)
#reorder
tokens = [tokens[i] for i in ids]
sentences = []
for tokens_same_source in tokens:
if return_all:
sentences.append([tokenizer.decode(tokk).strip() for tokk in tokens_same_source])
else:
sentences.append(tokenizer.decode(tokens_same_source[0]).strip())
loader.shuffle = shuffle_orig
loader.sort = sort_orig
return sentences
def write_predictions(predictions_path, tokenizer, graphs):
pieces = [penman.encode(g) for g in graphs]
Path(predictions_path).write_text('\n\n'.join(pieces).replace(tokenizer.INIT, ''))
return predictions_path
def compute_smatch(test_path, predictions_path):
with Path(predictions_path).open() as p, Path(test_path).open() as g:
score = next(smatch.score_amr_pairs(p, g))
return score[2]
def compute_bleu(gold_sentences, pred_sentences):
return corpus_bleu(pred_sentences, [gold_sentences])
| 4,920 | 32.937931 | 126 | py |
spring | spring-main/spring_amr/tokenization_bart.py | import copy
import sys
from pathlib import Path
import penman
import regex as re
import torch
from transformers import BartTokenizer
from spring_amr import ROOT, postprocessing
from spring_amr.linearization import AMRTokens, AMRLinearizer
from spring_amr.penman import encode
class AMRBartTokenizer(BartTokenizer):
INIT = 'Ġ'
ADDITIONAL = [
AMRTokens.PNTR_N,
AMRTokens.STOP_N,
AMRTokens.LIT_START,
AMRTokens.LIT_END,
AMRTokens.BACKR_SRC_N,
AMRTokens.BACKR_TRG_N,]
def __init__(self, *args, use_pointer_tokens=False, collapse_name_ops=False, **kwargs):
super().__init__(*args, **kwargs)
self.patterns = re.compile(
r""" ?<[a-z]+:?\d*>| ?:[^\s]+|'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
self.linearizer = AMRLinearizer(use_pointer_tokens=use_pointer_tokens, collapse_name_ops=collapse_name_ops)
self.use_pointer_tokens = use_pointer_tokens
self.collapse_name_ops = collapse_name_ops
self.recategorizations = set()
self.modified = 0
@classmethod
def from_pretrained(cls, pretrained_model_path, pred_min=5, *args, **kwargs):
inst = super().from_pretrained(pretrained_model_path, *args, **kwargs)
inst.init_amr_vocabulary(pred_min=pred_min)
return inst
def init_amr_vocabulary(self, pred_min=5):
for tok in [self.bos_token, self.eos_token, self.pad_token, '<mask>', '<unk>']:
ntok = self.INIT + tok
i = self.encoder[tok]
self.decoder[i] = ntok
del self.encoder[tok]
self.encoder[ntok] = i
tokens = []
for line in Path(ROOT/'data/vocab/predicates.txt').read_text().strip().splitlines():
tok, count = line.split()
if int(count) >= pred_min:
tokens.append(tok)
for tok in Path(ROOT/'data/vocab/additions.txt').read_text().strip().splitlines():
tokens.append(tok)
for tok in Path(ROOT/'data/vocab/recategorizations.txt').read_text().strip().splitlines():
if not tok.startswith('_'):
self.recategorizations.add(tok)
tokens.append(tok)
if self.use_pointer_tokens:
for cnt in range(512):
tokens.append(f"<pointer:{cnt}>")
tokens += self.ADDITIONAL
tokens = [self.INIT + t if t[0] not in ('_', '-') else t for t in tokens]
tokens = [t for t in tokens if t not in self.encoder]
self.old_enc_size = old_enc_size = len(self.encoder)
for i, t in enumerate(tokens, start= old_enc_size):
self.encoder[t] = i
self.encoder = {k: i for i, (k,v) in enumerate(sorted(self.encoder.items(), key=lambda x: x[1]))}
self.decoder = {v: k for k, v in sorted(self.encoder.items(), key=lambda x: x[1])}
self.modified = len(tokens)
self.bos_token = self.INIT + '<s>'
self.pad_token = self.INIT + '<pad>'
self.eos_token = self.INIT + '</s>'
self.unk_token = self.INIT + '<unk>'
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
if token_ids_1 is None:
return output
return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
def _tokenize(self, text):
""" Tokenize a string. Modified in order to handle sentences with recategorization pointers"""
bpe_tokens = []
for tok_span in text.lstrip().split(' '):
tok_span = tok_span.strip()
recats = tok_span.rsplit('_', 1)
if len(recats) == 2 and recats[0] in self.recategorizations and ('_' + recats[1]) in self.encoder:
bpe_tokens.extend([self.INIT + recats[0], '_' + recats[1]])
else:
for token in re.findall(self.pat, ' ' + tok_span):
token = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")
) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
return bpe_tokens
def _tok_bpe(self, token, add_space=True):
# if add_space:
# token = ' ' + token.lstrip()
tokk = []
tok = token.strip()
recats = tok.rsplit('_', 1)
if len(recats) == 2 and recats[0] in self.recategorizations and ('_' + recats[1]) in self.encoder:
tokk.extend([self.INIT + recats[0], '_' + recats[1]])
else:
for tok in self.patterns.findall(' ' + token):
tok = "".join(
self.byte_encoder[b] for b in tok.encode("utf-8"))
toks = self.bpe(tok).split(' ')
tokk.extend(toks)
return tokk
def _get_nodes_and_backreferences(self, graph):
lin = self.linearizer.linearize(graph)
linearized_nodes, backreferences = lin.nodes, lin.backreferences
return linearized_nodes, backreferences
def tokenize_amr(self, graph):
linearized_nodes, backreferences = self._get_nodes_and_backreferences(graph)
bpe_tokens = []
bpe_backreferences = []
counter = 0
for i, (backr, tokk) in enumerate(zip(backreferences, linearized_nodes)):
is_in_enc = self.INIT + tokk in self.encoder
is_rel = tokk.startswith(':') and len(tokk) > 1
is_spc = tokk.startswith('<') and tokk.endswith('>')
is_of = tokk.startswith(':') and tokk.endswith('-of')
is_frame = re.match(r'.+-\d\d', tokk) is not None
if tokk.startswith('"') and tokk.endswith('"'):
tokk = tokk[1:-1].replace('_', ' ')
bpe_toks = [self.INIT + AMRTokens.LIT_START]
bpe_toks += self._tok_bpe(tokk, add_space=True)
bpe_toks.append(self.INIT + AMRTokens.LIT_END)
elif (is_rel or is_spc or is_frame or is_of):
if is_in_enc:
bpe_toks = [self.INIT + tokk]
elif is_frame:
bpe_toks = self._tok_bpe(tokk[:-3], add_space=True) + [tokk[-3:]]
elif is_of:
rel = tokk[:-3]
if self.INIT + rel in self.encoder:
bpe_toks = [self.INIT + rel, '-of']
else:
bpe_toks = [self.INIT + ':'] + self._tok_bpe(rel[1:], add_space=True) + ['-of']
elif is_rel:
bpe_toks = [self.INIT + ':'] + self._tok_bpe(tokk[1:], add_space=True)
else:
raise
else:
if is_in_enc:
bpe_toks = [self.INIT + tokk]
else:
bpe_toks = self._tok_bpe(tokk, add_space=True)
bpe_tokens.append(bpe_toks)
if i == backr:
bpe_backr = list(range(counter, counter + len(bpe_toks)))
counter += len(bpe_toks)
bpe_backreferences.append(bpe_backr)
else:
bpe_backreferences.append(bpe_backreferences[backr][0:1])
counter += 1
bpe_tokens = [b for bb in bpe_tokens for b in bb]
bpe_token_ids = [self.encoder.get(b, self.unk_token_id) for b in bpe_tokens]
bpe_backreferences = [b for bb in bpe_backreferences for b in bb]
return bpe_tokens, bpe_token_ids, bpe_backreferences
def batch_encode_sentences(self, sentences, device=torch.device('cpu')):
sentences = [s for s in sentences]
extra = {'sentences': sentences}
batch = super().batch_encode_plus(sentences, return_tensors='pt', pad_to_max_length=True)
batch = {k: v.to(device) for k, v in batch.items()}
return batch, extra
def linearize(self, graph):
shift = len(self.encoder)
tokens, token_ids, backreferences = self.tokenize_amr(graph)
extra = {'linearized_graphs': tokens, 'graphs': graph}
token_uni_ids = \
[idx if i == b else b + shift for i, (idx, b) in enumerate(zip(token_ids, backreferences))]
if token_uni_ids[-1] != (self.INIT + AMRTokens.EOS_N):
tokens.append(self.INIT + AMRTokens.EOS_N)
token_ids.append(self.eos_token_id)
token_uni_ids.append(self.eos_token_id)
backreferences.append(len(backreferences))
return token_uni_ids, extra
def batch_encode_graphs(self, graphs, device=torch.device('cpu')):
linearized, extras = zip(*[self.linearize(g) for g in graphs])
return self.batch_encode_graphs_from_linearized(linearized, extras, device=device)
def batch_encode_graphs_from_linearized(self, linearized, extras=None, device=torch.device('cpu')):
if extras is not None:
batch_extra = {'linearized_graphs': [], 'graphs': []}
for extra in extras:
batch_extra['graphs'].append(extra['graphs'])
batch_extra['linearized_graphs'].append(extra['linearized_graphs'])
else:
batch_extra = {}
maxlen = 0
batch = []
for token_uni_ids in linearized:
maxlen = max(len(token_uni_ids), maxlen)
batch.append(token_uni_ids)
batch = [x + [self.pad_token_id] * (maxlen - len(x)) for x in batch]
batch = torch.tensor(batch).to(device)
batch = {'decoder_input_ids': batch[:, :-1], 'lm_labels': batch[:, 1:]}
return batch, batch_extra
def decode_amr(self, tokens, restore_name_ops=False):
try:
nodes, backreferences = postprocessing.decode_into_node_and_backreferences(tokens, self)
except Exception as e:
print('Decoding failure:', file=sys.stderr)
print(e, file=sys.stderr)
return postprocessing.BACKOFF, postprocessing.ParsedStatus.BACKOFF, (None, None)
if self.use_pointer_tokens:
nodes, backreferences = postprocessing.restore_backreferences_from_pointers(nodes)
try:
graph_ = graph = postprocessing.build_graph(nodes, backreferences, restore_name_ops=restore_name_ops)
except Exception as e:
print('Building failure:', file=sys.stderr)
print(nodes, file=sys.stderr)
print(backreferences, file=sys.stderr)
print(e, file=sys.stderr)
return postprocessing.BACKOFF, postprocessing.ParsedStatus.BACKOFF, (None, None)
try:
graph, status = postprocessing.connect_graph_if_not_connected(graph)
if status == postprocessing.ParsedStatus.BACKOFF:
print('Reconnection 1 failure:')
print(nodes, file=sys.stderr)
print(backreferences, file=sys.stderr)
print(graph_, file=sys.stderr)
return graph, status, (nodes, backreferences)
except Exception as e:
print('Reconnction 2 failure:', file=sys.stderr)
print(e, file=sys.stderr)
print(nodes, file=sys.stderr)
print(backreferences, file=sys.stderr)
print(graph_, file=sys.stderr)
return postprocessing.BACKOFF, postprocessing.ParsedStatus.BACKOFF, (nodes, backreferences)
class PENMANBartTokenizer(AMRBartTokenizer):
def __init__(self, *args, raw_graph=False, **kwargs):
super().__init__(*args, **kwargs)
self.linearizer = None
self.remove_pars = False
self.raw_graph = raw_graph
def _tokenize_encoded_graph(self, encoded):
linearized = re.sub(r"(\".+?\")", r' \1 ', encoded)
pieces = []
for piece in linearized.split():
if piece.startswith('"') and piece.endswith('"'):
pieces.append(piece)
else:
piece = piece.replace('(', ' ( ')
piece = piece.replace(')', ' ) ')
piece = piece.replace(':', ' :')
piece = piece.replace('/', ' / ')
piece = piece.strip()
pieces.append(piece)
linearized = re.sub(r'\s+', ' ', ' '.join(pieces)).strip()
linearized_nodes = [AMRTokens.BOS_N] + linearized.split(' ')
return linearized_nodes
def tokenize_amr(self, graph):
if self.raw_graph:
graph_ = copy.deepcopy(graph)
graph_.metadata = {}
linearized = penman.encode(graph_)
linearized = re.sub(r"\s+", ' ', linearized)
bpe_tokens = [self.bos_token] + self._tokenize(linearized)[:1022]
bpe_token_ids = [self.encoder.get(b, self.unk_token_id) for b in bpe_tokens]
bpe_backreferences = list(range(len(bpe_token_ids)))
return bpe_tokens, bpe_token_ids, bpe_backreferences
else:
return super().tokenize_amr(graph)
def _get_nodes_and_backreferences(self, graph):
graph_ = copy.deepcopy(graph)
graph_.metadata = {}
linearized = penman.encode(graph_)
linearized_nodes = self._tokenize_encoded_graph(linearized)
if self.use_pointer_tokens:
remap = {}
for i in range(1, len(linearized_nodes)):
nxt = linearized_nodes[i]
lst = linearized_nodes[i-1]
if nxt == '/':
remap[lst] = f'<pointer:{len(remap)}>'
i = 1
linearized_nodes_ = [linearized_nodes[0]]
while i < (len(linearized_nodes)):
nxt = linearized_nodes[i]
lst = linearized_nodes_[-1]
if nxt in remap:
if lst == '(' and linearized_nodes[i+1] == '/':
nxt = remap[nxt]
i += 1
elif lst.startswith(':'):
nxt = remap[nxt]
linearized_nodes_.append(nxt)
i += 1
linearized_nodes = linearized_nodes_
if self.remove_pars:
linearized_nodes = [n for n in linearized_nodes if n != '(']
backreferences = list(range(len(linearized_nodes)))
return linearized_nodes, backreferences
def _classify(self, node):
if not isinstance(node, str):
return "CONST"
elif node == 'i':
return "I"
elif re.match(r'^[a-z]\d*$', node) is not None:
return "VAR"
elif node[0].isdigit():
return "CONST"
elif node.startswith('"') and node.endswith('"'):
return "CONST"
elif node in ('+', '-'):
return "CONST"
elif node == ':mode':
return 'MODE'
elif node.startswith(':'):
return "EDGE"
elif node in ['/', '(', ')']:
return node
elif node[0].isalpha():
for char in (',', ':', '/', '(', ')', '.', '!', '?', '\\'):
if char in node:
return "CONST"
return "INST"
else:
return 'CONST'
def _fix_and_make_graph(self, nodes):
nodes_ = []
for n in nodes:
if isinstance(n, str):
if n.startswith('<') and n.endswith('>') and (not n.startswith('<pointer:')):
pass
else:
nodes_.append(n)
else:
nodes_.append(n)
nodes = nodes_
if self.use_pointer_tokens:
i = 0
nodes_ = []
while i < len(nodes):
nxt = nodes[i]
pst = None
if isinstance(nxt, str) and nxt.startswith('<pointer:'):
e = nxt.find('>')
if e != len(nxt) -1:
pst = nxt[e+1:]
nxt = nxt[:e+1]
nodes_.append(nxt)
if pst is not None:
nodes_.append(pst)
else:
nodes_.append(nxt)
i += 1
nodes = nodes_
i = 1
nodes_ = [nodes[0]]
while i < len(nodes):
nxt = nodes[i]
if isinstance(nxt, str) and nxt.startswith('<pointer:'):
nxt = 'z' + nxt[9:-1]
fol = nodes[i+1]
# is not expansion
if isinstance(fol, str) and (fol.startswith(':') or (fol == ')')):
nodes_.append(nxt)
else:
if self.remove_pars:
nodes_.append('(')
else:
if nodes_[-1] != '(':
nodes_.append('(')
#pass
nodes_.append(nxt)
nodes_.append('/')
else:
nodes_.append(nxt)
i += 1
nodes = nodes_
i = 0
nodes_ = []
while i < (len(nodes) - 1):
if nodes[i] == ':':
nodes_.append(nodes[i] + nodes[i+1])
i += 2
last = False
else:
nodes_.append(nodes[i])
i += 1
last = True
if last:
nodes_.append(nodes[-1])
nodes = nodes_
i = 0
nodes_ = []
while i < (len(nodes)):
if i < 2:
nodes_.append(nodes[i])
i += 1
elif nodes_[-2] == '/' and nodes[i] == '/':
i += 2
else:
nodes_.append(nodes[i])
i += 1
nodes = nodes_
i = 0
newvars = 0
variables = set()
remap = {}
nodes_ = []
while i < (len(nodes)):
next = nodes[i]
if next == '/':
last = nodes_[-1]
if last in variables:
last_remap = f"z{newvars+1000}"
newvars += 1
nodes_[-1] = last_remap
remap[last] = last_remap
variables.add(last)
nodes_.append(next)
elif self._classify(next) == 'VAR' and next in remap and (i < len(nodes) - 1) and nodes[i+1] != '/':
next = remap[next]
nodes_.append(next)
else:
nodes_.append(next)
i += 1
nodes = nodes_
pieces_ = []
open_cnt = 0
closed_cnt = 0
if nodes[0] != '(':
pieces_.append('(')
open_cnt += 1
for p in nodes:
if p == '(':
open_cnt += 1
elif p == ')':
closed_cnt += 1
pieces_.append(p)
if open_cnt == closed_cnt:
break
nodes = pieces_ + [')'] * (open_cnt - closed_cnt)
pieces = []
for piece in nodes:
if not pieces:
pieces.append('(')
else:
piece = str(piece)
if piece.startswith('"') or piece.startswith('"') or '"' in piece.strip('"'):
piece = '"' + piece.replace('"', '') + '"'
prev = self._classify(pieces[-1])
next = self._classify(piece)
if next == 'CONST':
quote = False
for char in (',', ':', '/', '(', ')', '.', '!', '?', '\\', '_', '='):
if char in piece:
quote = True
break
if quote:
piece = '"' + piece.strip('"') + '"'
if prev == '(':
if next in ('VAR', 'I'):
pieces.append(piece)
elif prev == ')':
if next in (')', 'EDGE', 'MODE'):
pieces.append(piece)
elif prev == 'VAR':
if next in ('/', 'EDGE', 'MODE', ')'):
pieces.append(piece)
elif prev == '/':
if next in ('INST', 'I'):
pieces.append(piece)
elif prev == 'INST':
if next in (')', 'EDGE', 'MODE'):
pieces.append(piece)
elif prev == 'I':
if next in ('/', ')', 'EDGE', 'MODE'):
pieces.append(piece)
elif prev == 'EDGE':
if next in ('(', 'VAR', 'CONST', 'I'):
pieces.append(piece)
elif next == ')':
pieces[-1] = piece
elif next in ('EDGE', 'MODE'):
pieces[-1] = piece
elif prev == 'MODE':
if next == 'INST':
pieces.append(piece)
elif prev == 'CONST':
if next in (')', 'EDGE', 'MODE'):
pieces.append(piece)
pieces_ = []
open_cnt = 0
closed_cnt = 0
if pieces[0] != '(':
pieces_.append('(')
open_cnt += 1
for p in pieces:
if p == '(':
open_cnt += 1
elif p == ')':
closed_cnt += 1
pieces_.append(p)
if open_cnt == closed_cnt:
break
pieces = pieces_ + [')'] * (open_cnt - closed_cnt)
linearized = re.sub(r'\s+', ' ', ' '.join(pieces)).strip()
"""
line = linearized
# make sure parentheses match
# copied from https://github.com/RikVN/AMR/blob/master/restoreAMR/restore_amr.py
open_count = 0
close_count = 0
for i, c in enumerate(line):
if c == '(':
open_count += 1
elif c == ')':
close_count += 1
if open_count == close_count and open_count > 0:
line = line[:i].strip()
break
old_line = line
while True:
open_count = len(re.findall(r'\(', line))
close_count = len(re.findall(r'\)', line))
if open_count > close_count:
line += ')' * (open_count - close_count)
elif close_count > open_count:
for i in range(close_count - open_count):
line = line.rstrip(')')
line = line.rstrip(' ')
if old_line == line:
break
old_line = line
"""
graph = penman.decode(linearized + ' ')
triples = []
newvars = 2000
for triple in graph.triples:
x, rel, y = triple
if x is None:
pass
elif rel == ':instance' and y is None:
triples.append(penman.Triple(x, rel, 'thing'))
elif y is None:
var = f'z{newvars}'
newvars += 1
triples.append(penman.Triple(x, rel, var))
triples.append(penman.Triple(var, ':instance', 'thing'))
else:
triples.append(triple)
graph = penman.Graph(triples)
linearized = encode(graph)
def fix_text(linearized=linearized):
n = 0
def _repl1(match):
nonlocal n
out = match.group(1) + match.group(2) + str(3000 + n) + ' / ' + match.group(2) + match.group(3)
n += 1
return out
linearized = re.sub(r'(\(\s?)([a-z])([^\/:\)]+[:\)])', _repl1, linearized,
flags=re.IGNORECASE | re.MULTILINE)
def _repl2(match):
return match.group(1)
linearized = re.sub(r'(\(\s*[a-z][\d+]\s*\/\s*[^\s\)\(:\/]+\s*)((?:/\s*[^\s\)\(:\/]+\s*)+)', _repl2,
linearized,
flags=re.IGNORECASE | re.MULTILINE)
# adds a ':' to args w/o it
linearized = re.sub(r'([^:])(ARG)', r'\1 :\2', linearized)
# removes edges with no node
# linearized = re.sub(r':[^\s\)\(:\/]+?\s*\)', ')', linearized, flags=re.MULTILINE)
return linearized
linearized = fix_text(linearized)
g = penman.decode(linearized)
return g
def decode_amr(self, tokens, restore_name_ops=None):
try:
if self.raw_graph:
nodes = self._tokenize_encoded_graph(self.decode(tokens))
backreferences = list(range(len(nodes)))
else:
nodes, backreferences = postprocessing.decode_into_node_and_backreferences(tokens, self)
nodes_ = nodes
except Exception as e:
print('Decoding failure:', file=sys.stderr)
print(e, file=sys.stderr)
return postprocessing.BACKOFF, postprocessing.ParsedStatus.BACKOFF, (None, None)
try:
graph_ = graph = self._fix_and_make_graph(nodes)
if self.collapse_name_ops:
graph_ = graph = postprocessing._split_name_ops(graph)
except Exception as e:
print('Building failure:', file=sys.stderr)
print(nodes, file=sys.stderr)
print(backreferences, file=sys.stderr)
print(e, file=sys.stderr)
return postprocessing.BACKOFF, postprocessing.ParsedStatus.BACKOFF, (None, None)
try:
graph, status = postprocessing.connect_graph_if_not_connected(graph)
if status == postprocessing.ParsedStatus.BACKOFF:
print('Reconnection 1 failure:')
print(nodes, file=sys.stderr)
print(backreferences, file=sys.stderr)
print(graph_, file=sys.stderr)
return graph, status, (nodes_, backreferences)
except Exception as e:
print('Reconnction 2 failure:', file=sys.stderr)
print(e, file=sys.stderr)
print(nodes, file=sys.stderr)
print(backreferences, file=sys.stderr)
print(graph_, file=sys.stderr)
return postprocessing.BACKOFF, postprocessing.ParsedStatus.BACKOFF, (nodes_, backreferences)
| 26,484 | 38.412202 | 120 | py |
spring | spring-main/bin/predict_sentences.py | from pathlib import Path
import penman
import torch
from spring_amr import ROOT
from spring_amr.evaluation import predict_amrs, compute_smatch, predict_sentences, compute_bleu
from spring_amr.penman import encode
from spring_amr.utils import instantiate_loader, instantiate_model_and_tokenizer
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(
description="Script to predict AMR graphs given sentences. LDC format as input.",
formatter_class=ArgumentDefaultsHelpFormatter,
)
parser.add_argument('--datasets', type=str, required=True, nargs='+',
help="Required. One or more glob patterns to use to load amr files.")
parser.add_argument('--checkpoint', type=str, required=True,
help="Required. Checkpoint to restore.")
parser.add_argument('--model', type=str, default='facebook/bart-large',
help="Model config to use to load the model class.")
parser.add_argument('--beam-size', type=int, default=1,
help="Beam size.")
parser.add_argument('--batch-size', type=int, default=1000,
help="Batch size (as number of linearized graph tokens per batch).")
parser.add_argument('--device', type=str, default='cuda',
help="Device. 'cpu', 'cuda', 'cuda:<n>'.")
parser.add_argument('--pred-path', type=Path, default=ROOT / 'data/tmp/inf-pred-sentences.txt',
help="Where to write predictions.")
parser.add_argument('--gold-path', type=Path, default=ROOT / 'data/tmp/inf-gold-sentences.txt',
help="Where to write the gold file.")
parser.add_argument('--add-to-graph-file', action='store_true')
parser.add_argument('--use-reverse-decoder', action='store_true')
parser.add_argument('--deinvert', action='store_true')
parser.add_argument('--penman-linearization', action='store_true',
help="Predict using PENMAN linearization instead of ours.")
parser.add_argument('--collapse-name-ops', action='store_true')
parser.add_argument('--use-pointer-tokens', action='store_true')
parser.add_argument('--raw-graph', action='store_true')
parser.add_argument('--return-all', action='store_true')
args = parser.parse_args()
device = torch.device(args.device)
model, tokenizer = instantiate_model_and_tokenizer(
args.model,
dropout=0.,
attention_dropout=0.,
penman_linearization=args.penman_linearization,
use_pointer_tokens=args.use_pointer_tokens,
collapse_name_ops=args.collapse_name_ops,
init_reverse=args.use_reverse_decoder,
raw_graph=args.raw_graph,
)
model.load_state_dict(torch.load(args.checkpoint, map_location='cpu')['model'])
model.to(device)
model.rev.amr_mode = False
loader = instantiate_loader(
args.datasets,
tokenizer,
batch_size=args.batch_size,
evaluation=True, out='/tmp/a.txt',
dereify=args.deinvert)
loader.device = device
pred_sentences = predict_sentences(loader, model.rev, tokenizer, beam_size=args.beam_size, return_all=args.return_all)
if args.add_to_graph_file:
graphs = loader.dataset.graphs
for ss, g in zip(pred_sentences, graphs):
if args.return_all:
g.metadata['snt-pred'] = '\t\t'.join(ss)
else:
g.metadata['snt-pred'] = ss
args.pred_path.write_text('\n\n'.join([encode(g) for g in graphs]))
else:
if args.return_all:
pred_sentences = [s for ss in pred_sentences for s in ss]
args.gold_path.write_text('\n'.join(loader.dataset.sentences))
args.pred_path.write_text('\n'.join(pred_sentences))
if not args.return_all:
score = compute_bleu(loader.dataset.sentences, pred_sentences)
print(f'BLEU: {score.score:.2f}')
| 3,988 | 45.383721 | 122 | py |
spring | spring-main/bin/patch_legacy_checkpoint.py | if __name__ == '__main__':
from argparse import ArgumentParser
import torch
parser = ArgumentParser()
parser.add_argument('legacy_checkpoint')
parser.add_argument('patched_checkpoint')
parser.parse_args()
args = parser.parse_args()
to_remove = []
fixed = False
w = torch.load(args.legacy_checkpoint, map_location='cpu')
for name in w['model']:
if 'backreferences' in name:
fixed = True
to_remove.append(name)
print('Deleting parameters:', name)
if not fixed:
print('The checkpoint was fine as it was!')
else:
for name in to_remove:
del w['model'][name]
torch.save(w, args.patched_checkpoint)
| 730 | 24.206897 | 62 | py |
spring | spring-main/bin/predict_amrs_from_plaintext.py | from pathlib import Path
import penman
import torch
from tqdm import tqdm
from spring_amr.penman import encode
from spring_amr.utils import instantiate_model_and_tokenizer
def read_file_in_batches(path, batch_size=1000, max_length=100):
data = []
idx = 0
for line in Path(path).read_text().strip().splitlines():
line = line.strip()
if not line:
continue
n = len(line.split())
if n > max_length:
continue
data.append((idx, line, n))
idx += 1
def _iterator(data):
data = sorted(data, key=lambda x: x[2], reverse=True)
maxn = 0
batch = []
for sample in data:
idx, line, n = sample
if n > batch_size:
if batch:
yield batch
maxn = 0
batch = []
yield [sample]
else:
curr_batch_size = maxn * len(batch)
cand_batch_size = max(maxn, n) * (len(batch) + 1)
if 0 < curr_batch_size <= batch_size and cand_batch_size > batch_size:
yield batch
maxn = 0
batch = []
maxn = max(maxn, n)
batch.append(sample)
if batch:
yield batch
return _iterator(data), len(data)
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(
description="Script to predict AMR graphs given sentences. LDC format as input.",
formatter_class=ArgumentDefaultsHelpFormatter,
)
parser.add_argument('--texts', type=str, required=True, nargs='+',
help="Required. One or more files containing \\n-separated sentences.")
parser.add_argument('--checkpoint', type=str, required=True,
help="Required. Checkpoint to restore.")
parser.add_argument('--model', type=str, default='facebook/bart-large',
help="Model config to use to load the model class.")
parser.add_argument('--beam-size', type=int, default=1,
help="Beam size.")
parser.add_argument('--batch-size', type=int, default=1000,
help="Batch size (as number of linearized graph tokens per batch).")
parser.add_argument('--penman-linearization', action='store_true',
help="Predict using PENMAN linearization instead of ours.")
parser.add_argument('--use-pointer-tokens', action='store_true')
parser.add_argument('--restore-name-ops', action='store_true')
parser.add_argument('--device', type=str, default='cuda',
help="Device. 'cpu', 'cuda', 'cuda:<n>'.")
parser.add_argument('--only-ok', action='store_true')
args = parser.parse_args()
device = torch.device(args.device)
model, tokenizer = instantiate_model_and_tokenizer(
args.model,
dropout=0.,
attention_dropout=0,
penman_linearization=args.penman_linearization,
use_pointer_tokens=args.use_pointer_tokens,
)
model.load_state_dict(torch.load(args.checkpoint, map_location='cpu')['model'])
model.to(device)
model.eval()
for path in tqdm(args.texts, desc='Files:'):
iterator, nsent = read_file_in_batches(path, args.batch_size)
with tqdm(desc=path, total=nsent) as bar:
for batch in iterator:
if not batch:
continue
ids, sentences, _ = zip(*batch)
x, _ = tokenizer.batch_encode_sentences(sentences, device=device)
with torch.no_grad():
model.amr_mode = True
out = model.generate(**x, max_length=512, decoder_start_token_id=0, num_beams=args.beam_size)
bgraphs = []
for idx, sent, tokk in zip(ids, sentences, out):
graph, status, (lin, backr) = tokenizer.decode_amr(tokk.tolist(), restore_name_ops=args.restore_name_ops)
if args.only_ok and ('OK' not in str(status)):
continue
graph.metadata['status'] = str(status)
graph.metadata['source'] = path
graph.metadata['nsent'] = str(idx)
graph.metadata['snt'] = sent
bgraphs.append((idx, graph))
for i, g in bgraphs:
print(encode(g))
print()
# if bgraphs and args.reverse:
# bgraphs = [x[1] for x in bgraphs]
# x, _ = tokenizer.batch_encode_graphs(bgraphs, device)
# x = torch.cat([x['decoder_input_ids'], x['lm_labels'][:, -1:]], 1)
# att = torch.ones_like(x)
# att[att == tokenizer.pad_token_id] = 0
# x = {
# 'input_ids': x,
# #'attention_mask': att,
# }
# with torch.no_grad():
# model.amr_mode = False
# out = model.generate(**x, max_length=1024, decoder_start_token_id=0, num_beams=args.beam_size)
#
# for graph, tokk in zip(bgraphs, out):
# tokk = [t for t in tokk.tolist() if t > 2]
# graph.metadata['snt-pred'] = tokenizer.decode(tokk).strip()
bar.update(len(sentences))
exit(0)
ids, graphs = zip(*sorted(results, key=lambda x:x[0]))
for g in graphs:
print(encode(g))
print()
| 5,603 | 36.610738 | 125 | py |
spring | spring-main/bin/predict_amrs.py | from pathlib import Path
import penman
import torch
from spring_amr import ROOT
from spring_amr.evaluation import predict_amrs, compute_smatch
from spring_amr.penman import encode
from spring_amr.utils import instantiate_loader, instantiate_model_and_tokenizer
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(
description="Script to predict AMR graphs given sentences. LDC format as input.",
formatter_class=ArgumentDefaultsHelpFormatter,
)
parser.add_argument('--datasets', type=str, required=True, nargs='+',
help="Required. One or more glob patterns to use to load amr files.")
parser.add_argument('--checkpoint', type=str, required=True,
help="Required. Checkpoint to restore.")
parser.add_argument('--model', type=str, default='facebook/bart-large',
help="Model config to use to load the model class.")
parser.add_argument('--beam-size', type=int, default=1,
help="Beam size.")
parser.add_argument('--batch-size', type=int, default=1000,
help="Batch size (as number of linearized graph tokens per batch).")
parser.add_argument('--device', type=str, default='cuda',
help="Device. 'cpu', 'cuda', 'cuda:<n>'.")
parser.add_argument('--pred-path', type=Path, default=ROOT / 'data/tmp/inf-pred.txt',
help="Where to write predictions.")
parser.add_argument('--gold-path', type=Path, default=ROOT / 'data/tmp/inf-gold.txt',
help="Where to write the gold file.")
parser.add_argument('--use-recategorization', action='store_true',
help="Predict using Zhang recategorization on top of our linearization (requires recategorized sentences in input).")
parser.add_argument('--penman-linearization', action='store_true',
help="Predict using PENMAN linearization instead of ours.")
parser.add_argument('--use-pointer-tokens', action='store_true')
parser.add_argument('--raw-graph', action='store_true')
parser.add_argument('--restore-name-ops', action='store_true')
parser.add_argument('--return-all', action='store_true')
args = parser.parse_args()
device = torch.device(args.device)
model, tokenizer = instantiate_model_and_tokenizer(
args.model,
dropout=0.,
attention_dropout=0.,
penman_linearization=args.penman_linearization,
use_pointer_tokens=args.use_pointer_tokens,
raw_graph=args.raw_graph,
)
model.amr_mode = True
model.load_state_dict(torch.load(args.checkpoint, map_location='cpu')['model'])
model.to(device)
gold_path = args.gold_path
pred_path = args.pred_path
loader = instantiate_loader(
args.datasets,
tokenizer,
batch_size=args.batch_size,
evaluation=True, out=gold_path,
use_recategorization=args.use_recategorization,
)
loader.device = device
graphs = predict_amrs(
loader,
model,
tokenizer,
beam_size=args.beam_size,
restore_name_ops=args.restore_name_ops,
return_all=args.return_all,
)
if args.return_all:
graphs = [g for gg in graphs for g in gg]
pieces = [encode(g) for g in graphs]
pred_path.write_text('\n\n'.join(pieces))
if not args.return_all:
score = compute_smatch(gold_path, pred_path)
print(f'Smatch: {score:.3f}')
| 3,415 | 38.264368 | 125 | py |
spring | spring-main/bin/inspect_.py | import torch
import penman
from spring_amr.utils import instantiate_model_and_tokenizer
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--checkpoint', type=str, required=True)
parser.add_argument('--beam-size', type=int, default=1)
parser.add_argument('--device', type=str, default='cpu')
parser.add_argument('--penman-linearization', action='store_true',
help="Predict using PENMAN linearization instead of ours.")
parser.add_argument('--use-pointer-tokens', action='store_true')
parser.add_argument('--restore-name-ops', action='store_true')
args = parser.parse_args()
device = torch.device(args.device)
model, tokenizer = instantiate_model_and_tokenizer(
name='facebook/bart-large',
checkpoint=args.checkpoint,
dropout=0., attention_dropout=0.,
penman_linearization=args.penman_linearization,
use_pointer_tokens=args.use_pointer_tokens,
)
model.eval().to(device)
while True:
sentence = [input('Sentence to parse:\n')]
x, extra = tokenizer.batch_encode_sentences(sentence, device)
with torch.no_grad():
out = model.generate(**x, max_length=1024, decoder_start_token_id=0, num_beams=args.beam_size)
out = out[0].tolist()
graph, status, (lin, backr) = tokenizer.decode_amr(out, restore_name_ops=args.restore_name_ops)
print('-' * 5)
print('Status:', status)
print('-' * 5)
print('Graph:')
print(penman.encode(graph))
print('-' * 5)
print('Linearization:')
print(lin)
print('\n')
| 1,673 | 37.045455 | 106 | py |
spring | spring-main/bin/train.py | from pathlib import Path
import torch
try:
from torch.cuda.amp import autocast
autocast_available = True
except ImportError:
class autocast:
def __init__(self, enabled=True): pass
def __enter__(self): return self
def __exit__(self, exc_type, exc_value, exc_traceback): pass
autocast_available = False
from torch.cuda.amp.grad_scaler import GradScaler
import transformers
from spring_amr import ROOT
from spring_amr.dataset import reverse_direction
from spring_amr.optim import RAdam
from spring_amr.evaluation import write_predictions, compute_smatch, predict_amrs, predict_sentences, compute_bleu
from spring_amr.utils import instantiate_model_and_tokenizer, instantiate_loader
from ignite.engine import Engine, Events
from ignite.metrics import RunningAverage
from ignite.handlers import ModelCheckpoint, global_step_from_engine
def do_train(checkpoint=None, direction='amr', split_both_decoder=False, fp16=False):
assert direction in ('amr', 'text', 'both')
model, tokenizer = instantiate_model_and_tokenizer(
config['model'],
checkpoint=checkpoint,
additional_tokens_smart_init=config['smart_init'],
dropout=config['dropout'],
attention_dropout=config['attention_dropout'],
from_pretrained=config['warm_start'],
init_reverse=split_both_decoder,
penman_linearization=config['penman_linearization'],
collapse_name_ops=config['collapse_name_ops'],
use_pointer_tokens=config['use_pointer_tokens'],
raw_graph=config.get('raw_graph', False)
)
print(model)
print(model.config)
if checkpoint is not None:
print(f'Checkpoint restored ({checkpoint})!')
if direction == 'both' and split_both_decoder:
params_dir_enc = list(model.model.encoder.parameters())
params_dir_enc_check = {id(p) for p in params_dir_enc}
params_dir_dec = set()
params_dir_dec |= {p for p in model.model.decoder.parameters() if id(p) not in params_dir_enc_check}
params_dir_dec |= {p for p in model.rev.model.decoder.parameters() if id(p) not in params_dir_enc_check}
params_dir_dec = list(params_dir_dec)
optimizer = RAdam(
[{'params': params_dir_enc, 'lr': config['learning_rate']},
{'params': params_dir_dec, 'lr': config['learning_rate'] * 2},],
weight_decay=config['weight_decay'])
else:
optimizer = RAdam(
model.parameters(),
lr=config['learning_rate'],
weight_decay=config['weight_decay'])
if checkpoint is not None:
optimizer.load_state_dict(torch.load(checkpoint)['optimizer'])
if config['scheduler'] == 'cosine':
scheduler = transformers.get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=config['warmup_steps'],
num_training_steps=config['training_steps'])
elif config['scheduler'] == 'constant':
scheduler = transformers.get_constant_schedule_with_warmup(
optimizer,
num_warmup_steps=config['warmup_steps'])
else:
raise ValueError
scaler = GradScaler(enabled=fp16)
train_loader = instantiate_loader(
config['train'],
tokenizer,
batch_size=config['batch_size'],
evaluation=False,
use_recategorization=config['use_recategorization'],
remove_longer_than=config['remove_longer_than'],
remove_wiki=config['remove_wiki'],
dereify=config['dereify'],
)
dev_gold_path = ROOT / 'data/tmp/dev-gold.txt'
dev_pred_path = ROOT / 'data/tmp/dev-pred.txt'
dev_loader = instantiate_loader(
config['dev'],
tokenizer,
batch_size=config['batch_size'],
evaluation=True, out=dev_gold_path,
use_recategorization=config['use_recategorization'],
remove_wiki=config['remove_wiki'],
dereify=config['dereify'],
)
if direction == 'amr':
def train_step(engine, batch):
model.train()
x, y, extra = batch
model.amr_mode = True
with autocast(enabled=fp16):
loss, *_ = model(**x, **y)
scaler.scale((loss / config['accum_steps'])).backward()
return loss.item()
@torch.no_grad()
def eval_step(engine, batch):
model.eval()
x, y, extra = batch
model.amr_mode = True
loss, *_ = model(**x, **y)
return loss.item()
elif direction == 'text':
def train_step(engine, batch):
model.train()
x, y, extra = batch
x, y = reverse_direction(x, y)
model.rev.amr_mode = False
with autocast(enabled=fp16):
loss, *_ = model.rev(**x, **y)
scaler.scale((loss / config['accum_steps'])).backward()
return loss.item()
@torch.no_grad()
def eval_step(engine, batch):
model.eval()
x, y, extra = batch
x, y = reverse_direction(x, y)
model.rev.amr_mode = False
loss, *_ = model(**x, **y)
return loss.item()
elif direction == 'both':
def train_step(engine, batch):
model.train()
x, y, extra = batch
model.amr_mode = True
with autocast(enabled=fp16):
loss1, *_ = model(**x, **y)
scaler.scale((loss1 / config['accum_steps'] * 0.5)).backward()
loss1 = loss1.item()
x, y = reverse_direction(x, y)
model.rev.amr_mode = False
with autocast(enabled=fp16):
loss2, *_ = model.rev(**x, **y)
scaler.scale((loss2 / config['accum_steps'] * 0.5)).backward()
return loss1, loss2.item()
@torch.no_grad()
def eval_step(engine, batch):
model.eval()
x, y, extra = batch
model.amr_mode = True
loss1, *_ = model(**x, **y)
x, y = reverse_direction(x, y)
model.rev.amr_mode = False
loss2, *_ = model.rev(**x, **y)
return loss1.item(), loss2.item()
else:
raise ValueError
trainer = Engine(train_step)
evaluator = Engine(eval_step)
@trainer.on(Events.STARTED)
def update(engine):
print('training started!')
@trainer.on(Events.EPOCH_COMPLETED)
@trainer.on(Events.ITERATION_COMPLETED(every=config['accum_steps']))
def update(engine):
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), config['grad_norm'])
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
scheduler.step()
@trainer.on(Events.EPOCH_COMPLETED)
def log_trn_loss(engine):
log_msg = f"training epoch: {engine.state.epoch}"
if direction in ('amr', 'both'):
log_msg += f" | loss_amr: {engine.state.metrics['trn_amr_loss']:.3f}"
if direction in ('text', 'both'):
log_msg += f" | loss_text: {engine.state.metrics['trn_text_loss']:.3f}"
print(log_msg)
@trainer.on(Events.EPOCH_COMPLETED)
def run_dev_eval(engine):
dev_loader.batch_size = config['batch_size']
dev_loader.device = next(model.parameters()).device
evaluator.run(dev_loader)
if not config['best_loss']:
if direction in ('amr', 'both'):
@evaluator.on(Events.EPOCH_COMPLETED)
def smatch_eval(engine):
device = next(model.parameters()).device
dev_loader.device = device
graphs = predict_amrs(dev_loader, model, tokenizer, restore_name_ops=config['collapse_name_ops'])
write_predictions(dev_pred_path, tokenizer, graphs)
try:
smatch = compute_smatch(dev_gold_path, dev_pred_path)
except:
smatch = 0.
engine.state.metrics['dev_smatch'] = smatch
if direction in ('text', 'both'):
@evaluator.on(Events.EPOCH_COMPLETED)
def smatch_eval(engine):
device = next(model.parameters()).device
dev_loader.device = device
pred_sentences = predict_sentences(dev_loader, model.rev, tokenizer, beam_size=config['beam_size'])
bleu = compute_bleu(dev_loader.dataset.sentences, pred_sentences)
engine.state.metrics['dev_bleu'] = bleu.score
@evaluator.on(Events.EPOCH_COMPLETED)
def log_dev_loss(engine):
log_msg = f"dev epoch: {trainer.state.epoch}"
if direction in ('amr', 'both'):
log_msg += f" | loss_amr: {engine.state.metrics['dev_amr_loss']:.3f}"
if not config['best_loss']:
log_msg += f" | smatch: {engine.state.metrics['dev_smatch']:.3f}"
if direction in ('text', 'both'):
log_msg += f" | loss_text: {engine.state.metrics['dev_text_loss']:.3f}"
if not config['best_loss']:
log_msg += f" | bleu: {engine.state.metrics['dev_bleu']:.3f}"
print(log_msg)
if direction == 'amr':
RunningAverage(output_transform=lambda out: out).attach(trainer, 'trn_amr_loss')
RunningAverage(output_transform=lambda out: out).attach(evaluator, 'dev_amr_loss')
elif direction == 'text':
RunningAverage(output_transform=lambda out: out).attach(trainer, 'trn_text_loss')
RunningAverage(output_transform=lambda out: out).attach(evaluator, 'dev_text_loss')
elif direction == 'both':
RunningAverage(output_transform=lambda out: out[0]).attach(trainer, 'trn_amr_loss')
RunningAverage(output_transform=lambda out: out[1]).attach(trainer, 'trn_text_loss')
RunningAverage(output_transform=lambda out: out[0]).attach(evaluator, 'dev_amr_loss')
RunningAverage(output_transform=lambda out: out[1]).attach(evaluator, 'dev_text_loss')
if config['log_wandb']:
from ignite.contrib.handlers.wandb_logger import WandBLogger
wandb_logger = WandBLogger(init=False)
if direction == 'amr':
wandb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="iterations/trn_amr_loss",
output_transform=lambda loss: loss
)
elif direction == 'text':
wandb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="iterations/trn_text_loss",
output_transform=lambda loss: loss
)
if direction == 'both':
wandb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="iterations/trn_amr_loss",
output_transform=lambda loss: loss[0]
)
wandb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="iterations/trn_text_loss",
output_transform=lambda loss: loss[1]
)
if direction == 'amr':
metric_names_trn = ['trn_amr_loss']
metric_names_dev = ['dev_amr_loss']
if not config['best_loss']:
metric_names_dev.append('dev_smatch')
elif direction == 'text':
metric_names_trn = ['trn_text_loss']
metric_names_dev = ['dev_text_loss']
if not config['best_loss']:
metric_names_dev.append('dev_bleu')
elif direction == 'both':
metric_names_trn = ['trn_amr_loss', 'trn_text_loss']
metric_names_dev = ['dev_amr_loss', 'dev_smatch']
if not config['best_loss']:
metric_names_dev.extend(['dev_text_loss', 'dev_bleu'])
wandb_logger.attach_output_handler(
trainer,
event_name=Events.EPOCH_COMPLETED,
tag="epochs",
metric_names=metric_names_trn,
global_step_transform=lambda *_: trainer.state.iteration,
)
wandb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="epochs",
metric_names=metric_names_dev,
global_step_transform=lambda *_: trainer.state.iteration,
)
@trainer.on(Events.ITERATION_COMPLETED)
def wandb_log_lr(engine):
wandb.log({'lr': scheduler.get_last_lr()[0]}, step=engine.state.iteration)
if config['save_checkpoints']:
if direction in ('amr', 'both'):
if config['best_loss']:
prefix = 'best-loss-amr'
score_function = lambda x: 1 / evaluator.state.metrics['dev_amr_loss']
else:
prefix = 'best-smatch'
score_function = lambda x: evaluator.state.metrics['dev_smatch']
else:
if config['best_loss']:
prefix = 'best-loss-text'
score_function = lambda x: 1 / evaluator.state.metrics['dev_amr_loss']
else:
prefix = 'best-bleu'
score_function = lambda x: evaluator.state.metrics['dev_bleu']
to_save = {'model': model, 'optimizer': optimizer}
if config['log_wandb']:
where_checkpoints = str(wandb_logger.run.dir)
else:
root = ROOT/'runs'
try:
root.mkdir()
except:
pass
where_checkpoints = root/str(len(list(root.iterdir())))
try:
where_checkpoints.mkdir()
except:
pass
where_checkpoints = str(where_checkpoints)
print(where_checkpoints)
handler = ModelCheckpoint(
where_checkpoints,
prefix,
n_saved=1,
create_dir=True,
score_function=score_function,
global_step_transform=global_step_from_engine(trainer),
)
evaluator.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)
model.cuda()
device = next(model.parameters()).device
train_loader.device = device
trainer.run(train_loader, max_epochs=config['max_epochs'])
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import yaml
import wandb
parser = ArgumentParser(
description="Trainer script",
formatter_class=ArgumentDefaultsHelpFormatter,
)
parser.add_argument('--direction', type=str, default='amr', choices=['amr', 'text', 'both'],
help='Train a uni- (amr, text) or bidirectional (both).')
parser.add_argument('--split-both-decoder', action='store_true')
parser.add_argument('--config', type=Path, default=ROOT/'configs/sweeped.yaml',
help='Use the following config for hparams.')
parser.add_argument('--checkpoint', type=str,
help='Warm-start from a previous fine-tuned checkpoint.')
parser.add_argument('--fp16', action='store_true')
args, unknown = parser.parse_known_args()
if args.fp16 and autocast_available:
raise ValueError('You\'ll need a newer PyTorch version to enable fp16 training.')
with args.config.open() as y:
config = yaml.load(y, Loader=yaml.FullLoader)
if config['log_wandb']:
wandb.init(
entity="SOME-RUNS",
project="SOME-PROJECT",
config=config,
dir=str(ROOT / 'runs/'))
config = wandb.config
print(config)
if args.checkpoint:
checkpoint = args.checkpoint
else:
checkpoint = None
do_train(
checkpoint=checkpoint,
direction=args.direction,
split_both_decoder=args.split_both_decoder,
fp16=args.fp16,
) | 15,893 | 36.574468 | 115 | py |
AOE-Net | AOE-Net-main/main.py | import sys
import os
import argparse
from tqdm import tqdm
import pandas as pd
import torch
import torch.nn.parallel
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from models.model import EventDetection
from dataset import VideoDataSet, Collator
from loss_function import bmn_loss_func, get_mask
from post_processing import PostProcessor, getDatasetDict
from utils import ProposalGenerator
from eval_anet import evaluate_proposals as anet_evaluate_prop
from eval_thumos import evaluate_proposals as thumos_evaluate_prop
from eval_det_anet import evaluate_detections as anet_evaluate_det
from eval_det_thumos import evaluate_detections as thumos_evaluate_det
from config.defaults import get_cfg
sys.dont_write_bytecode = True
class Solver:
def __init__(self, cfg):
self.cfg = cfg
self.model = EventDetection(cfg).cuda()
self.model = torch.nn.DataParallel(self.model, device_ids=cfg.GPU_IDS)
if cfg.MODE not in ['train', 'training']: # TODO: add condition for resume feature.
checkpoint = torch.load(cfg.TEST.CHECKPOINT_PATH)
print('Loaded model at epoch %d.' % checkpoint['epoch'])
self.model.load_state_dict(checkpoint['state_dict'])
if cfg.MODE in ['train', 'training']:
self.optimizer = optim.AdamW(
filter(lambda p: p.requires_grad, self.model.parameters()),
lr=cfg.TRAIN.LR, weight_decay=cfg.TRAIN.WEIGHT_DECAY)
#self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=10, gamma=0.1)
self.train_collator = Collator(cfg, 'train')
self.test_collator = Collator(cfg, 'test')
self.temporal_dim = cfg.DATA.TEMPORAL_DIM
self.max_duration = cfg.DATA.MAX_DURATION
self.evaluate_func = None
if cfg.DATASET == 'anet':
if cfg.EVAL_TYPE == 'proposal':
self.evaluate_func = anet_evaluate_prop
elif cfg.EVAL_TYPE == 'detection':
self.evaluate_func = anet_evaluate_det
elif cfg.DATASET == 'thumos':
if cfg.EVAL_TYPE == 'proposal':
self.evaluate_func = thumos_evaluate_prop
elif cfg.EVAL_TYPE == 'detection':
self.evaluate_func = thumos_evaluate_det
if self.evaluate_func is None:
print('Evaluation function [{}] of dataset [{}] is not implemented'.format(cfg.EVAL_TYPE, cfg.DATASET))
def train_epoch(self, data_loader, bm_mask, epoch, writer):
cfg = self.cfg
self.model.train()
self.optimizer.zero_grad()
loss_names = ['Loss', 'TemLoss', 'PemLoss Regression', 'PemLoss Classification']
epoch_losses = [0] * 4
period_losses = [0] * 4
last_period_size = len(data_loader) % cfg.TRAIN.STEP_PERIOD
last_period_start = cfg.TRAIN.STEP_PERIOD * (len(data_loader) // cfg.TRAIN.STEP_PERIOD)
for n_iter, (env_features, agent_features, agent_masks, obj_features, obj_masks, label_confidence, label_start, label_end) in enumerate(tqdm(data_loader)):
env_features = env_features.cuda() if cfg.USE_ENV else None
agent_features = agent_features.cuda() if cfg.USE_AGENT else None
agent_masks = agent_masks.cuda() if cfg.USE_AGENT else None
obj_features = obj_features.cuda() if cfg.USE_OBJ else None
obj_masks = obj_masks.cuda() if cfg.USE_OBJ else None
label_start = label_start.cuda()
label_end = label_end.cuda()
label_confidence = label_confidence.cuda()
confidence_map, start, end = self.model(env_features, agent_features, agent_masks, obj_features, obj_masks)
losses = bmn_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask)
period_size = cfg.TRAIN.STEP_PERIOD if n_iter < last_period_start else last_period_size
total_loss = losses[0] / period_size
total_loss.backward()
losses = [l.cpu().detach().numpy() / cfg.TRAIN.STEP_PERIOD for l in losses]
period_losses = [l + pl for l, pl in zip(losses, period_losses)]
if (n_iter + 1) % cfg.TRAIN.STEP_PERIOD != 0 and n_iter != (len(data_loader) - 1):
continue
self.optimizer.step()
self.optimizer.zero_grad()
epoch_losses = [el + pl for el, pl in zip(epoch_losses, period_losses)]
write_step = epoch * len(data_loader) + n_iter
for i, loss_name in enumerate(loss_names):
writer.add_scalar(loss_name, period_losses[i], write_step)
period_losses = [0] * 4
print(
"BMN training loss(epoch %d): tem_loss: %.03f, pem reg_loss: %.03f, pem cls_loss: %.03f, total_loss: %.03f" % (
epoch, epoch_losses[1] / (n_iter + 1),
epoch_losses[2] / (n_iter + 1),
epoch_losses[3] / (n_iter + 1),
epoch_losses[0] / (n_iter + 1)))
def train(self, n_epochs):
exp_id = max([0] + [int(run.split('_')[-1]) for run in os.listdir(self.cfg.TRAIN.LOG_DIR)]) + 1
log_dir = os.path.join(self.cfg.TRAIN.LOG_DIR, 'run_' + str(exp_id))
if not os.path.isdir(os.path.dirname(log_dir)):
os.makedirs(os.path.dirname(log_dir))
writer = SummaryWriter(log_dir)
checkpoint_dir = os.path.join(self.cfg.MODEL.CHECKPOINT_DIR, 'checkpoint_' + str(exp_id))
assert not os.path.isdir(checkpoint_dir), 'Checkpoint directory %s has already been created.' % checkpoint_dir
os.makedirs(checkpoint_dir)
train_loader = torch.utils.data.DataLoader(
VideoDataSet(self.cfg, split=self.cfg.TRAIN.SPLIT),
batch_size=self.cfg.TRAIN.BATCH_SIZE, shuffle=True,
num_workers=12, pin_memory=True, collate_fn=self.train_collator)
eval_loader = torch.utils.data.DataLoader(
VideoDataSet(self.cfg, split=self.cfg.VAL.SPLIT),
batch_size=self.cfg.VAL.BATCH_SIZE, shuffle=False,
num_workers=12, pin_memory=True, drop_last=False, collate_fn=self.test_collator)
bm_mask = get_mask(self.temporal_dim, self.max_duration).cuda()
scores = []
for epoch in range(n_epochs):
#print('Current LR: {}'.format(self.scheduler.get_last_lr()[0]))
self.train_epoch(train_loader, bm_mask, epoch, writer)
#self.scheduler.step()
score = self.evaluate(eval_loader, self.cfg.VAL.SPLIT)
state = {
'epoch': epoch + 1,
'score': score,
'state_dict': self.model.state_dict()
}
if len(scores) == 0 or score > max(scores):
torch.save(state, os.path.join(checkpoint_dir, "best_{}.pth".format(self.cfg.EVAL_SCORE)))
torch.save(state, os.path.join(checkpoint_dir, "model_{}.pth".format(epoch + 1)))
writer.add_scalar(self.cfg.EVAL_SCORE, score, epoch)
scores.append(score)
def evaluate(self, data_loader=None, split=None):
self.inference(data_loader, split, self.cfg.VAL.BATCH_SIZE)
score = self.evaluate_func(self.cfg) # AUC if dataset=anet, AR@100 if dataset=thumos
return score
def inference(self, data_loader=None, split=None, batch_size=None):
if not os.path.isdir('results/outputs/'):
os.makedirs('results/outputs/')
annotations = getDatasetDict(self.cfg.DATA.ANNOTATION_FILE, split) if self.cfg.DATASET == 'thumos' else None
self.prop_gen = ProposalGenerator(self.temporal_dim, self.max_duration, annotations)
self.post_processing = PostProcessor(self.cfg, split)
if data_loader is None:
data_loader = torch.utils.data.DataLoader(
VideoDataSet(self.cfg, split=split),
batch_size=batch_size, shuffle=False,
num_workers=12, pin_memory=True, drop_last=False, collate_fn=self.test_collator)
col_name = ["xmin", "xmax", "xmin_score", "xmax_score", "clr_score", "reg_score", "score"]
self.model.eval()
with torch.no_grad():
for video_names, env_features, agent_features, agent_masks, obj_features, obj_masks in tqdm(data_loader):
env_features = env_features.cuda() if self.cfg.USE_ENV else None
agent_features = agent_features.cuda() if self.cfg.USE_AGENT else None
agent_masks = agent_masks.cuda() if self.cfg.USE_AGENT else None
obj_features = obj_features.cuda() if self.cfg.USE_OBJ else None
obj_masks = obj_masks.cuda() if self.cfg.USE_OBJ else None
confidence_map, start_map, end_map = self.model(env_features, agent_features, agent_masks, obj_features, obj_masks)
confidence_map = confidence_map.cpu().numpy()
start_map = start_map.cpu().numpy()
end_map = end_map.cpu().numpy()
batch_props = self.prop_gen(start_map, end_map, confidence_map, video_names)
for video_name, new_props in zip(video_names, batch_props):
new_df = pd.DataFrame(new_props, columns=col_name)
new_df.to_feather("./results/outputs/" + video_name + ".feather")
self.post_processing()
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--cfg-file',
default=None,
type=str,
help='Path to YAML config file.'
)
parser.add_argument(
"opts",
help="See slowfast/config/defaults.py for all options",
default=None,
nargs=argparse.REMAINDER
)
return parser.parse_args()
def main(args):
cfg = get_cfg()
if args.cfg_file:
cfg.merge_from_file(args.cfg_file)
if args.opts is not None:
cfg.merge_from_list(args.opts)
cfg.freeze()
solver = Solver(cfg)
if cfg.MODE in ["train", "training"]:
solver.train(cfg.TRAIN.NUM_EPOCHS)
elif cfg.MODE in ['validate', 'validation']:
solver.evaluate(split=cfg.VAL.SPLIT)
elif cfg.MODE in ['test', 'testing']:
solver.inference(split=cfg.TEST.SPLIT, batch_size=cfg.TEST.BATCH_SIZE)
if __name__ == '__main__':
args = get_args()
main(args)
| 10,325 | 43.317597 | 163 | py |
AOE-Net | AOE-Net-main/dataset.py | # -*- coding: utf-8 -*-
import os
import json
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
from utils import ioa_with_anchors, iou_with_anchors
def load_json(file):
with open(file) as json_file:
json_data = json.load(json_file)
return json_data
class Collator(object):
def __init__(self, cfg, mode):
self.is_train = mode in ['train', 'training']
if self.is_train:
self.batch_names = ['env_feats', 'agent_feats', 'box_lens', 'obj_feats', 'obj_box_lens', 'conf_labels', 'start_labels', 'end_labels']
self.label_names = ['conf_labels', 'start_labels', 'end_labels']
else:
self.batch_names = ['video_ids', 'env_feats', 'agent_feats', 'box_lens', 'obj_feats', 'obj_box_lens']
self.label_names = []
self.feat_names = ['env_feats', 'agent_feats', 'box_lens', 'obj_feats', 'obj_box_lens']
self.tmp_dim = cfg.DATA.TEMPORAL_DIM
self.feat_dim = cfg.MODEL.AGENT_DIM
self.obj_feat_dim = cfg.MODEL.OBJ_DIM ####
def process_features(self, bsz, env_feats, agent_feats, box_lens, obj_feats, obj_box_lens):
if env_feats[0] is not None:
env_feats = torch.stack(env_feats)
else:
env_feats = None
# Make new order to inputs by their lengths (long-to-short)
if agent_feats[0] is not None:
box_lens = torch.stack(box_lens, dim=0)
max_box_dim = torch.max(box_lens).item()
# Make padding mask for self-attention
agent_mask = torch.arange(max_box_dim)[None, None, :] < box_lens[:, :, None]
# Pad agent features at temporal and box dimension
pad_agent_feats = torch.zeros(bsz, self.tmp_dim, max_box_dim, self.feat_dim)
for i, temporal_features in enumerate(agent_feats):
for j, box_features in enumerate(temporal_features):
if len(box_features) > 0:
pad_agent_feats[i, j, :len(box_features)] = torch.tensor(box_features)
else:
pad_agent_feats = None
agent_mask = None
# Make new order to inputs by their lengths (long-to-short)
if obj_feats[0] is not None:
obj_box_lens = torch.stack(obj_box_lens, dim=0)
max_box_dim = torch.max(obj_box_lens).item()
# Make padding mask for self-attention
obj_mask = torch.arange(max_box_dim)[None, None, :] < obj_box_lens[:, :, None]
# Pad agent features at temporal and box dimension
pad_obj_feats = torch.zeros(bsz, self.tmp_dim, max_box_dim, self.obj_feat_dim)
for i, temporal_features in enumerate(obj_feats):
for j, box_features in enumerate(temporal_features):
if len(box_features) > 0:
pad_obj_feats[i, j, :len(box_features)] = torch.tensor(box_features)
else:
pad_obj_feats = None
obj_mask = None
return env_feats, pad_agent_feats, agent_mask, pad_obj_feats, obj_mask
def __call__(self, batch):
input_batch = dict(zip(self.batch_names, zip(*batch)))
bsz = len(input_batch['env_feats'])
output_batch = [] if self.is_train else [input_batch['video_ids']]
# Process environment and agent features
input_feats = [input_batch[feat_name] for feat_name in self.feat_names]
output_batch.extend(self.process_features(bsz, *input_feats))
for label_name in self.label_names:
output_batch.append(torch.stack(input_batch[label_name]))
return output_batch
class VideoDataSet(Dataset):
def __init__(self, cfg, split='training'):
self.split = split
self.dataset_name = cfg.DATASET
self.video_anno_path = cfg.DATA.ANNOTATION_FILE
self.temporal_dim = cfg.DATA.TEMPORAL_DIM
self.max_duration = cfg.DATA.MAX_DURATION
self.temporal_gap = 1. / self.temporal_dim
self.env_feature_dir = cfg.DATA.ENV_FEATURE_DIR
self.agent_feature_dir = cfg.DATA.AGENT_FEATURE_DIR
self.obj_feature_dir = cfg.DATA.OBJ_FEATURE_DIR
self.use_env = cfg.USE_ENV
self.use_agent = cfg.USE_AGENT
self.use_obj = cfg.USE_OBJ
if split in ['train', 'training']:
self._get_match_map()
self.video_prefix = 'v_' if cfg.DATASET == 'anet' else ''
self._get_dataset()
def _get_match_map(self):
match_map = []
for idx in range(self.temporal_dim):
tmp_match_window = []
xmin = self.temporal_gap * idx
for jdx in range(1, self.max_duration + 1):
xmax = xmin + self.temporal_gap * jdx
tmp_match_window.append([xmin, xmax])
match_map.append(tmp_match_window)
match_map = np.array(match_map) # 100x100x2
match_map = np.transpose(match_map, [1, 0, 2]) # [0,1] [1,2] [2,3].....[99,100]
match_map = np.reshape(match_map, [-1, 2]) # [0,2] [1,3] [2,4].....[99,101] # duration x start
self.match_map = match_map
self.anchor_xmin = [self.temporal_gap * (i - 0.5) for i in range(self.temporal_dim)]
self.anchor_xmax = [self.temporal_gap * (i + 0.5) for i in range(1, self.temporal_dim + 1)]
# self.anchor_xmin = [self.temporal_gap * i for i in range(self.temporal_dim)]
# self.anchor_xmax = [self.temporal_gap * i for i in range(1, self.temporal_dim + 1)]
def get_filter_video_names(self, json_data, upper_thresh=.98, lower_thresh=.3):
"""
Select video according to length of ground truth
:param video_info_file: json file path of video information
:param gt_len_thres: max length of ground truth
:return: list of video names
"""
filter_video_names, augment_video_names = [], []
video_lists = list(json_data)
for video_name in video_lists:
# for video_name in video_lists[::-1]:
video_info = json_data[video_name]
if not os.path.isfile(os.path.join(self.env_feature_dir, 'v_' + video_name + '.json')):
filter_video_names.append(video_name)
continue
if video_info['subset'] != "training":
continue
video_second = video_info["duration"]
gt_lens = []
video_labels = video_info["annotations"]
for j in range(len(video_labels)):
tmp_info = video_labels[j]
tmp_start = tmp_info["segment"][0]
tmp_end = tmp_info["segment"][1]
tmp_start = max(min(1, tmp_start / video_second), 0)
tmp_end = max(min(1, tmp_end / video_second), 0)
gt_lens.append(tmp_end - tmp_start)
if len(gt_lens):
mean_len = np.mean(gt_lens)
if mean_len >= upper_thresh:
filter_video_names.append(video_name)
if mean_len < lower_thresh:
augment_video_names.append(video_name)
return filter_video_names, augment_video_names
def _get_dataset(self):
annotations = load_json(self.video_anno_path)['database']
if self.dataset_name == 'anet':
filter_video_names, augment_video_names = self.get_filter_video_names(annotations)
else:
filter_video_names, augment_video_names = [], []
# Read event segments
self.event_dict = {}
self.video_ids = []
for video_id, annotation in annotations.items():
if annotation['subset'] != self.split or video_id in filter_video_names:
continue
self.event_dict[video_id] = {
'duration': annotation['duration'],
'events': annotation['annotations']
# 'events': annotation['timestamps']
}
self.video_ids.append(video_id)
if self.split in ['train', 'training']:
self.video_ids.extend(augment_video_names)
print("Split: %s. Dataset size: %d" % (self.split, len(self.video_ids)))
def __getitem__(self, index):
env_features, agent_features, box_lengths, obj_features, obj_box_lengths = self._load_item(index)
if self.split == 'training':
match_score_start, match_score_end, confidence_score = self._get_train_label(index)
return env_features, agent_features, box_lengths, obj_features, obj_box_lengths, confidence_score, match_score_start, match_score_end
else:
return self.video_ids[index], env_features, agent_features, box_lengths, obj_features, obj_box_lengths
def _load_item(self, index):
video_name = self.video_prefix + self.video_ids[index]
'''
Read environment features at every timestamp
Feature size: TxF
T: number of timestamps
F: feature size
'''
if self.use_env is True:
env_features = load_json(os.path.join(self.env_feature_dir, video_name + '.json'))['video_features']
# env_segments = [env['segment'] for env in env_features]
env_features = torch.tensor([feature['features'] for feature in env_features]).float().squeeze(1)
else:
env_features = None
'''
Read agents features at every timestamp
Feature size: TxBxF
T: number of timestamps
B: max number of bounding boxes
F: feature size
'''
if self.use_agent is True:
agent_features = load_json(os.path.join(self.agent_feature_dir, video_name + '.json'))['video_features']
# agent_segments = [feature['segment'] for feature in agent_features]
agent_features = [feature['features'] for feature in agent_features]
# Create and pad agent_box_lengths if train
box_lengths = torch.tensor([len(x) for x in agent_features])
else:
agent_features = None
box_lengths = None
'''
Read agents features at every timestamp
Feature size: TxBxF
T: number of timestamps
B: max number of bounding boxes
F: feature size
'''
if self.use_obj is True:
try:
obj_features = load_json(os.path.join(self.obj_feature_dir, video_name + '.json'))['video_features']
except:
print('error', video_name)
pass
# agent_segments = [feature['segment'] for feature in agent_features]
obj_features = [feature['features'] for feature in obj_features]
# Create and pad agent_box_lengths if train
obj_box_lengths = torch.tensor([len(x) for x in obj_features])
else:
obj_features = None
obj_box_lengths = None
# assert env_segments == agent_segments and len(env_segments) == 100, 'Two streams must have 100 segments.'
return env_features, agent_features, box_lengths, obj_features, obj_box_lengths
def _get_train_label(self, index):
video_id = self.video_ids[index]
video_info = self.event_dict[video_id]
video_labels = video_info['events'] # the measurement is second, not frame
duration = video_info['duration']
##############################################################################################
# change the measurement from second to percentage
gt_bbox = []
gt_iou_map = []
for j in range(len(video_labels)):
tmp_info = video_labels[j]
tmp_start = max(min(1, tmp_info['segment'][0] / duration), 0)
tmp_end = max(min(1, tmp_info['segment'][1] / duration), 0)
gt_bbox.append([tmp_start, tmp_end])
tmp_gt_iou_map = iou_with_anchors(
self.match_map[:, 0], self.match_map[:, 1], tmp_start, tmp_end)
tmp_gt_iou_map = np.reshape(tmp_gt_iou_map,
[self.max_duration, self.temporal_dim])
gt_iou_map.append(tmp_gt_iou_map)
gt_iou_map = np.array(gt_iou_map)
gt_iou_map = np.max(gt_iou_map, axis=0)
gt_iou_map = torch.Tensor(gt_iou_map)
##############################################################################################
##############################################################################################
# generate R_s and R_e
gt_bbox = np.array(gt_bbox)
gt_xmins = gt_bbox[:, 0]
gt_xmaxs = gt_bbox[:, 1]
# gt_lens = gt_xmaxs - gt_xmins
gt_len_small = 3 * self.temporal_gap # np.maximum(self.temporal_gap, self.boundary_ratio * gt_lens)
gt_start_bboxs = np.stack((gt_xmins - gt_len_small / 2, gt_xmins + gt_len_small / 2), axis=1)
gt_end_bboxs = np.stack((gt_xmaxs - gt_len_small / 2, gt_xmaxs + gt_len_small / 2), axis=1)
##############################################################################################
##############################################################################################
# calculate the ioa for all timestamp
match_score_start = []
for jdx in range(len(self.anchor_xmin)):
match_score_start.append(np.max(
ioa_with_anchors(self.anchor_xmin[jdx], self.anchor_xmax[jdx], gt_start_bboxs[:, 0], gt_start_bboxs[:, 1])))
match_score_end = []
for jdx in range(len(self.anchor_xmin)):
match_score_end.append(np.max(
ioa_with_anchors(self.anchor_xmin[jdx], self.anchor_xmax[jdx], gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
match_score_start = torch.tensor(match_score_start)
match_score_end = torch.tensor(match_score_end)
##############################################################################################
return match_score_start, match_score_end, gt_iou_map
def __len__(self):
return len(self.video_ids)
| 14,047 | 44.170418 | 145 | py |
AOE-Net | AOE-Net-main/loss_function.py | # -*- coding: utf-8 -*-
import torch
import numpy as np
import torch.nn.functional as F
def get_mask(tscale, duration):
bm_mask = []
for idx in range(duration):
mask_vector = [1 for i in range(tscale - idx)
] + [0 for i in range(idx)]
bm_mask.append(mask_vector)
bm_mask = np.array(bm_mask, dtype=np.float32)
return torch.Tensor(bm_mask)
def bmn_loss_func(pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end, bm_mask):
pred_bm_reg = pred_bm[:, 0].contiguous()
pred_bm_cls = pred_bm[:, 1].contiguous()
gt_iou_map = gt_iou_map * bm_mask
pem_reg_loss = pem_reg_loss_func(pred_bm_reg, gt_iou_map, bm_mask)
pem_cls_loss = pem_cls_loss_func(pred_bm_cls, gt_iou_map, bm_mask)
tem_loss = tem_loss_func(pred_start, pred_end, gt_start, gt_end)
loss = tem_loss + 10 * pem_reg_loss + pem_cls_loss
return loss, tem_loss, pem_reg_loss, pem_cls_loss
def tem_loss_func(pred_start, pred_end, gt_start, gt_end):
def bi_loss(pred_score, gt_label):
pred_score = pred_score.view(-1)
gt_label = gt_label.view(-1)
pmask = (gt_label > 0.5).float()
num_entries = len(pmask)
num_positive = torch.sum(pmask)
ratio = num_entries / num_positive
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
epsilon = 0.000001
loss_pos = coef_1 * torch.log(pred_score + epsilon) * pmask
loss_neg = coef_0 * torch.log(1.0 - pred_score + epsilon)*(1.0 - pmask)
loss = -1 * torch.mean(loss_pos + loss_neg)
return loss
loss_start = bi_loss(pred_start, gt_start)
loss_end = bi_loss(pred_end, gt_end)
loss = loss_start + loss_end
return loss
def pem_reg_loss_func(pred_score, gt_iou_map, mask):
u_hmask = (gt_iou_map > 0.7).float()
u_mmask = ((gt_iou_map <= 0.7) & (gt_iou_map > 0.3)).float()
u_lmask = ((gt_iou_map <= 0.3) & (gt_iou_map > 0.)).float()
u_lmask = u_lmask * mask
num_h = torch.sum(u_hmask)
num_m = torch.sum(u_mmask)
num_l = torch.sum(u_lmask)
r_m = num_h / num_m
u_smmask = torch.Tensor(np.random.rand(*gt_iou_map.shape)).cuda()
u_smmask = u_mmask * u_smmask
u_smmask = (u_smmask > (1. - r_m)).float()
r_l = num_h / num_l
u_slmask = torch.Tensor(np.random.rand(*gt_iou_map.shape)).cuda()
u_slmask = u_lmask * u_slmask
u_slmask = (u_slmask > (1. - r_l)).float()
weights = u_hmask + u_smmask + u_slmask
loss = F.mse_loss(pred_score * weights, gt_iou_map * weights)
loss = 0.5 * torch.sum(loss * torch.ones(*weights.shape).cuda()) / torch.sum(weights)
return loss
def pem_cls_loss_func(pred_score, gt_iou_map, mask):
pmask = (gt_iou_map > 0.9).float()
nmask = (gt_iou_map <= 0.9).float()
nmask = nmask * mask
num_positive = torch.sum(pmask)
num_entries = num_positive + torch.sum(nmask)
ratio = num_entries / num_positive
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
epsilon = 0.000001
loss_pos = coef_1 * torch.log(pred_score + epsilon) * pmask
loss_neg = coef_0 * torch.log(1.0 - pred_score + epsilon) * nmask
loss = -1 * torch.sum(loss_pos + loss_neg) / num_entries
return loss
| 3,233 | 32 | 89 | py |
AOE-Net | AOE-Net-main/models/utils.py | import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
def masked_softmax(vector, mask, dim=-1, memory_efficient=False, mask_fill_value=-1e32):
"""A masked softmax module to correctly implement attention in Pytorch.
Implementation adapted from: https://github.com/allenai/allennlp/blob/master/allennlp/nn/util.py
``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
If ``memory_efficient`` is set to true, we will simply use a very large negative number for those
masked positions so that the probabilities of those positions would be approximately 0.
This is not accurate in math, but works for most cases and consumes less memory.
In the case that the input vector is completely masked and ``memory_efficient`` is false, this function
returns an array of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of
a model that uses categorical cross-entropy loss. Instead, if ``memory_efficient`` is true, this function
will treat every element as equal, and do softmax over equal numbers.
Args:
vector (torch.tensor): The tensor to softmax.
mask (torch.tensor): The tensor to indicate which indices are to be masked and not included in the softmax operation.
dim (int, optional): The dimension to softmax over.
Defaults to -1.
memory_efficient (bool, optional): Whether to use a less precise, but more memory efficient implementation of masked softmax.
Defaults to False.
mask_fill_value ([type], optional): The value to fill masked values with if `memory_efficient` is `True`.
Defaults to -1e32.
Returns:
torch.tensor: The masked softmaxed output
"""
if mask is None:
result = F.softmax(vector, dim=dim)
else:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
# To limit numerical errors from large vector elements outside the mask, we zero these out.
result = F.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
else:
masked_vector = vector.masked_fill((1 - mask).byte(), mask_fill_value)
result = F.softmax(masked_vector, dim=dim)
return result
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
else:
raise RuntimeError("activation should be relu/gelu, not %s." % activation)
class TransformerEncoder(nn.Module):
"""TransformerEncoder is a stack of N encoder layers
Args:
encoder_layer: an instance of the TransformerEncoderLayer() class (required).
num_layers: the number of sub-encoder-layers in the encoder (required).
norm: the layer normalization component (optional).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
>>> src = torch.rand(10, 32, 512)
>>> out = transformer_encoder(src)
"""
def __init__(self, cfg, drop_out=0.1, activation='relu', norm=None):
super(TransformerEncoder, self).__init__()
num_features = cfg.MODEL.FEAT_DIM
dim_feedforward = cfg.MODEL.TRANSFORMER_DIM
num_heads = cfg.MODEL.ATTENTION_HEADS
num_layers = cfg.MODEL.ATTENTION_LAYERS
encoder_layer = TransformerEncoderLayer(num_features, num_heads, dim_feedforward, drop_out, activation)
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src, mask=None, key_padding_mask=None):
"""Pass the input through the encoder layers in turn.
Args:
src: the sequnce to the encoder (required).
mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
output = src
for i in range(self.num_layers):
output = self.layers[i](output, src_mask=mask,
key_padding_mask=key_padding_mask)
if self.norm:
output = self.norm(output)
return output
class TransformerEncoderLayer(nn.Module):
"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer, relu or gelu (default=relu).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu"):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
def forward(self, src, src_mask=None, key_padding_mask=None):
"""Pass the input through the encoder layer.
Args:
src: the sequnce to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
src2 = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=key_padding_mask)[0]
if key_padding_mask is not None:
src2 = src2.masked_fill(key_padding_mask.permute(1, 0).unsqueeze(-1), 0)
src = src + self.dropout1(src2)
src = self.norm1(src)
if hasattr(self, "activation"):
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
else: # for backward compatibility
src2 = self.linear2(self.dropout(F.relu(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
if key_padding_mask is not None:
src = src.masked_fill(key_padding_mask.permute(1, 0).unsqueeze(-1), 0)
return src
| 8,205 | 44.588889 | 133 | py |
AOE-Net | AOE-Net-main/models/model.py | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import *
from .bmn import BoundaryMatchingNetwork
class EventDetection(nn.Module):
def __init__(self, cfg):
super(EventDetection, self).__init__()
self.use_env_linear = cfg.MODEL.ENV_HIDDEN_DIM is not None
self.use_agent_linear = cfg.MODEL.AGENT_HIDDEN_DIM is not None
self.use_obj_linear = cfg.MODEL.OBJ_HIDDEN_DIM is not None
if self.use_env_linear:
self.env_linear = nn.Linear(cfg.MODEL.ENV_DIM, cfg.MODEL.ENV_HIDDEN_DIM)
if self.use_agent_linear:
self.agent_linear = nn.Linear(cfg.MODEL.AGENT_DIM, cfg.MODEL.AGENT_HIDDEN_DIM)
if self.use_obj_linear:
self.obj_linear = nn.Linear(cfg.MODEL.OBJ_DIM, cfg.MODEL.OBJ_HIDDEN_DIM)
self.agents_fuser = TransformerEncoder(cfg)
self.agents_environment_fuser = TransformerEncoder(cfg)
self.objs_fuser = TransformerEncoder(cfg) #
self.objs_environment_fuser = TransformerEncoder(cfg) #
self.bmm_name = cfg.MODEL.BOUNDARY_MATCHING_MODULE
if self.bmm_name == 'bmn':
self.event_detector = BoundaryMatchingNetwork(cfg)
self.attention_steps = cfg.TRAIN.ATTENTION_STEPS
self.topk_hard_attention = cfg.MODEL.TOPK_AGENTS
def fuse_agent(self, agent_feats, agent_masks, env_feats):
bsz, tmprl_sz, n_boxes, ft_sz = agent_feats.size()
step = self.attention_steps
agent_env_feats = torch.unsqueeze(env_feats, 2) + agent_feats
# Fuse all agents together at every temporal point
smpl_bgn = 0
agent_fused_features = torch.zeros(bsz, tmprl_sz, ft_sz).cuda()
if n_boxes == 0:
return agent_fused_features
for smpl_bgn in range(0, tmprl_sz, step):
smpl_end = smpl_bgn + step
ae_feats = agent_env_feats[:, smpl_bgn:smpl_end].contiguous().view(-1, n_boxes, ft_sz) # bsz x n_boxes x feat_dim
masks = agent_masks[:, smpl_bgn:smpl_end].contiguous().view(-1, n_boxes) # bsz x n_boxes
#hard_attn_masks = masks
l2_norm = torch.norm(ae_feats, dim=-1) # bsz x n_boxes
l2_norm_softmax = masked_softmax(l2_norm, masks) # bsz x n_boxes
# Adaptive threshold is 1 / number of bounding boxes:
ada_thresh = torch.clamp(1. / torch.sum(masks, dim=-1, keepdim=True), 0., 1.)
# Generate hard attention masks
hard_attn_masks = l2_norm_softmax >= ada_thresh # bsz x n_boxes
keep_mask = (torch.sum(hard_attn_masks, dim=-1) > 0) # bsz
keep_indices = torch.masked_select(torch.arange(hard_attn_masks.size(0)).cuda(), keep_mask) # keep_mask
fuser_input = agent_feats[:, smpl_bgn:smpl_end].contiguous().view(-1, n_boxes, ft_sz).permute(1, 0, 2) # n_boxes x bsz x feat_dim
if len(keep_indices) > 0:
fuser_input = fuser_input[:, keep_indices] # n_boxes x keep_mask x feat_dim
hard_attn_masks = hard_attn_masks[keep_indices] # keep_mask x n_boxes
padded_output = torch.zeros(bsz * (smpl_end - smpl_bgn), ft_sz).cuda() # bsz x feat_dim
fuser_output = self.agents_fuser(fuser_input, key_padding_mask=~hard_attn_masks) # n_boxes x keep_mask x feat_dim
#fuser_output = fuser_input * hard_attn_masks.permute(1, 0).contiguous().unsqueeze(-1)
fuser_output = torch.sum(fuser_output, dim=0) / torch.sum(hard_attn_masks, dim=-1, keepdim=True) # keep_mask x feat_dim
padded_output[keep_indices] = fuser_output
agent_fused_features[:, smpl_bgn:smpl_end] = padded_output.view(bsz, -1, ft_sz)
return agent_fused_features
def fuse_obj(self, obj_feats, obj_masks, env_feats):
bsz, tmprl_sz, n_boxes, ft_sz = obj_feats.size()
step = self.attention_steps
obj_env_feats = torch.unsqueeze(env_feats, 2) + obj_feats
# Fuse all agents together at every temporal point
smpl_bgn = 0
obj_fused_features = torch.zeros(bsz, tmprl_sz, ft_sz).cuda()
if n_boxes == 0:
return obj_fused_features
for smpl_bgn in range(0, tmprl_sz, step):
smpl_end = smpl_bgn + step
ae_feats = obj_env_feats[:, smpl_bgn:smpl_end].contiguous().view(-1, n_boxes, ft_sz) # bsz x n_boxes x feat_dim
masks = obj_masks[:, smpl_bgn:smpl_end].contiguous().view(-1, n_boxes) # bsz x n_boxes
#hard_attn_masks = masks
l2_norm = torch.norm(ae_feats, dim=-1) # bsz x n_boxes
l2_norm_softmax = masked_softmax(l2_norm, masks) # bsz x n_boxes
# Adaptive threshold is 1 / number of bounding boxes:
ada_thresh = torch.clamp(1. / torch.sum(masks, dim=-1, keepdim=True), 0., 1.)
# Generate hard attention masks
hard_attn_masks = l2_norm_softmax >= ada_thresh # bsz x n_boxes
keep_mask = (torch.sum(hard_attn_masks, dim=-1) > 0) # bsz
keep_indices = torch.masked_select(torch.arange(hard_attn_masks.size(0)).cuda(), keep_mask) # keep_mask
fuser_input = obj_feats[:, smpl_bgn:smpl_end].contiguous().view(-1, n_boxes, ft_sz).permute(1, 0, 2) # n_boxes x bsz x feat_dim
if len(keep_indices) > 0:
fuser_input = fuser_input[:, keep_indices] # n_boxes x keep_mask x feat_dim
hard_attn_masks = hard_attn_masks[keep_indices] # keep_mask x n_boxes
padded_output = torch.zeros(bsz * (smpl_end - smpl_bgn), ft_sz).cuda() # bsz x feat_dim
fuser_output = self.objs_fuser(fuser_input, key_padding_mask=~hard_attn_masks) # n_boxes x keep_mask x feat_dim
#fuser_output = fuser_input * hard_attn_masks.permute(1, 0).contiguous().unsqueeze(-1)
fuser_output = torch.sum(fuser_output, dim=0) / torch.sum(hard_attn_masks, dim=-1, keepdim=True) # keep_mask x feat_dim
padded_output[keep_indices] = fuser_output
obj_fused_features[:, smpl_bgn:smpl_end] = padded_output.view(bsz, -1, ft_sz)
return obj_fused_features
def forward(self, env_features=None, agent_features=None, agent_masks=None, obj_features=None, obj_masks=None):
if self.use_env_linear and env_features is not None:
env_features = self.env_linear(env_features)
if self.use_agent_linear and agent_features is not None:
agent_features = self.agent_linear(agent_features)
if self.use_obj_linear and obj_features is not None:
obj_features = self.obj_linear(obj_features)
if agent_features is None and obj_features is None:
return self.event_detector(env_features.permute(0, 2, 1))
agent_fused_features, selected_agents = self.fuse_agent(agent_features, agent_masks, env_features)
obj_fused_features, selected_objs = self.fuse_obj(obj_features, obj_masks, env_features)
if env_features is None and obj_features is None:
return self.event_detector(agent_fused_features.permute(0, 2, 1))
if env_features is None and agent_features is None:
return self.event_detector(obj_fused_features.permute(0, 2, 1))
### Stack 2 fts 3 case
if obj_features is None:
env_agent_obj_cat_features = torch.stack([env_features, agent_fused_features], dim=2)
bsz, tmprl_sz, ft_sz = env_features.shape
step = self.attention_steps
smpl_bgn = 0
context_features = torch.zeros(bsz, tmprl_sz, ft_sz).cuda()
for smpl_bgn in range(0, tmprl_sz, step):
smpl_end = smpl_bgn + step
fuser_input = env_agent_obj_cat_features[:, smpl_bgn:smpl_end].contiguous()
fuser_input = fuser_input.view(-1, 2, ft_sz).permute(1, 0, 2)
fuser_output = self.agents_environment_fuser(fuser_input)
fuser_output = torch.mean(fuser_output, dim=0)
context_features[:, smpl_bgn:smpl_end] = fuser_output.view(bsz, -1, ft_sz)
return self.event_detector(context_features.permute(0, 2, 1))
if agent_features is None:
env_agent_obj_cat_features = torch.stack([env_features, obj_fused_features], dim=2)
bsz, tmprl_sz, ft_sz = env_features.shape
step = self.attention_steps
smpl_bgn = 0
context_features = torch.zeros(bsz, tmprl_sz, ft_sz).cuda()
for smpl_bgn in range(0, tmprl_sz, step):
smpl_end = smpl_bgn + step
fuser_input = env_agent_obj_cat_features[:, smpl_bgn:smpl_end].contiguous()
fuser_input = fuser_input.view(-1, 2, ft_sz).permute(1, 0, 2)
fuser_output = self.agents_environment_fuser(fuser_input)
fuser_output = torch.mean(fuser_output, dim=0)
context_features[:, smpl_bgn:smpl_end] = fuser_output.view(bsz, -1, ft_sz)
return self.event_detector(context_features.permute(0, 2, 1))
if env_features is None:
env_agent_obj_cat_features = torch.stack([agent_fused_features, obj_fused_features], dim=2)
bsz, tmprl_sz, ft_sz = agent_fused_features.shape
step = self.attention_steps
smpl_bgn = 0
context_features = torch.zeros(bsz, tmprl_sz, ft_sz).cuda()
for smpl_bgn in range(0, tmprl_sz, step):
smpl_end = smpl_bgn + step
fuser_input = env_agent_obj_cat_features[:, smpl_bgn:smpl_end].contiguous()
fuser_input = fuser_input.view(-1, 2, ft_sz).permute(1, 0, 2)
fuser_output = self.agents_environment_fuser(fuser_input)
fuser_output = torch.mean(fuser_output, dim=0)
context_features[:, smpl_bgn:smpl_end] = fuser_output.view(bsz, -1, ft_sz)
return self.event_detector(context_features.permute(0, 2, 1))
### stack all 3 e a o
env_agent_obj_cat_features = torch.stack([env_features, agent_fused_features,obj_fused_features], dim=2)
bsz, tmprl_sz, ft_sz = env_features.shape
step = self.attention_steps
smpl_bgn = 0
context_features = torch.zeros(bsz, tmprl_sz, ft_sz).cuda()
for smpl_bgn in range(0, tmprl_sz, step):
smpl_end = smpl_bgn + step
fuser_input = env_agent_obj_cat_features[:, smpl_bgn:smpl_end].contiguous()
fuser_input = fuser_input.view(-1, 3, ft_sz).permute(1, 0, 2)
fuser_output = self.agents_environment_fuser(fuser_input)
fuser_output = torch.mean(fuser_output, dim=0)
context_features[:, smpl_bgn:smpl_end] = fuser_output.view(bsz, -1, ft_sz)
selected_agents = torch.tensor(selected_agents).cuda()
selected_objs = torch.tensor(selected_objs).cuda()
conf_map, start_map, end_map = self.event_detector(context_features.permute(0, 2, 1))
return conf_map, start_map, end_map, torch.tensor(selected_agents).cuda(), torch.tensor(selected_objs).cuda()
| 11,169 | 47.146552 | 142 | py |
AOE-Net | AOE-Net-main/models/bmn.py | # -*- coding: utf-8 -*-
import math
import numpy as np
import torch
import torch.nn as nn
class BoundaryMatchingNetwork(nn.Module):
def __init__(self, cfg):
super(BoundaryMatchingNetwork, self).__init__()
self.prop_boundary_ratio = cfg.BMN.PROP_BOUNDARY_RATIO
self.num_sample = cfg.BMN.NUM_SAMPLES
self.num_sample_perbin = cfg.BMN.NUM_SAMPLES_PER_BIN
self.temporal_dim = cfg.DATA.TEMPORAL_DIM
self.max_duration = cfg.DATA.MAX_DURATION
self.feat_dim = cfg.MODEL.FEAT_DIM
self.hidden_dim_1d = cfg.MODEL.HIDDEN_DIM_1D
self.hidden_dim_2d = cfg.MODEL.HIDDEN_DIM_2D
self.hidden_dim_3d = cfg.MODEL.HIDDEN_DIM_3D
self.sample_mask = self._get_interp1d_mask()
# Base Module
self.x_1d_b = nn.Sequential(
nn.Conv1d(self.feat_dim, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True)
)
# Temporal Evaluation Module
self.x_1d_s = nn.Sequential(
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1),
nn.Sigmoid()
)
self.x_1d_e = nn.Sequential(
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1, groups=4),
nn.ReLU(inplace=True),
nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1),
nn.Sigmoid()
)
# Proposal Evaluation Module
self.x_1d_p = nn.Sequential(
nn.Conv1d(self.hidden_dim_1d, self.hidden_dim_1d, kernel_size=3, padding=1),
nn.ReLU(inplace=True)
)
self.x_3d_p = nn.Sequential(
nn.Conv3d(self.hidden_dim_1d, self.hidden_dim_3d, kernel_size=(self.num_sample, 1, 1)),
nn.ReLU(inplace=True)
)
self.x_2d_p = nn.Sequential(
nn.Conv2d(self.hidden_dim_3d, self.hidden_dim_2d, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, self.hidden_dim_2d, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, self.hidden_dim_2d, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(self.hidden_dim_2d, 2, kernel_size=1),
nn.Sigmoid()
)
def forward(self, x):
base_feature = self.x_1d_b(x)
start = self.x_1d_s(base_feature).squeeze(1)
end = self.x_1d_e(base_feature).squeeze(1)
confidence_map = self.x_1d_p(base_feature)
confidence_map = self._boundary_matching_layer(confidence_map)
confidence_map = self.x_3d_p(confidence_map).squeeze(2)
confidence_map = self.x_2d_p(confidence_map)
return confidence_map, start, end
def _boundary_matching_layer(self, x):
input_size = x.size()
out = torch.matmul(x, self.sample_mask).reshape(
input_size[0],
input_size[1],
self.num_sample,
self.max_duration,
self.temporal_dim
)
return out
def _get_interp1d_bin_mask(self, seg_xmin, seg_xmax, tscale, num_sample, num_sample_perbin):
# generate sample mask for a boundary-matching pair
plen = float(seg_xmax - seg_xmin)
plen_sample = plen / (num_sample * num_sample_perbin - 1.0)
total_samples = [
seg_xmin + plen_sample * ii
for ii in range(num_sample * num_sample_perbin)
]
p_mask = []
for idx in range(num_sample):
bin_samples = total_samples[idx * num_sample_perbin:(idx + 1) * num_sample_perbin]
bin_vector = np.zeros([tscale])
for sample in bin_samples:
sample_upper = math.ceil(sample)
sample_decimal, sample_down = math.modf(sample)
if int(sample_down) <= (tscale - 1) and int(sample_down) >= 0:
bin_vector[int(sample_down)] += 1 - sample_decimal
if int(sample_upper) <= (tscale - 1) and int(sample_upper) >= 0:
bin_vector[int(sample_upper)] += sample_decimal
bin_vector = 1.0 / num_sample_perbin * bin_vector
p_mask.append(bin_vector)
p_mask = np.stack(p_mask, axis=1)
return p_mask
def _get_interp1d_mask(self):
# generate sample mask for each point in Boundary-Matching Map
mask_mat = []
for start_index in range(self.temporal_dim):
mask_mat_vector = []
for duration_index in range(self.max_duration):
if start_index + duration_index < self.temporal_dim:
p_xmin = start_index
p_xmax = start_index + duration_index
center_len = float(p_xmax - p_xmin) + 1
sample_xmin = p_xmin - center_len * self.prop_boundary_ratio
sample_xmax = p_xmax + center_len * self.prop_boundary_ratio
p_mask = self._get_interp1d_bin_mask(
sample_xmin, sample_xmax, self.temporal_dim, self.num_sample,
self.num_sample_perbin)
else:
p_mask = np.zeros([self.temporal_dim, self.num_sample])
mask_mat_vector.append(p_mask)
mask_mat_vector = np.stack(mask_mat_vector, axis=2)
mask_mat.append(mask_mat_vector)
mask_mat = np.stack(mask_mat, axis=3)
mask_mat = mask_mat.astype(np.float32)
return nn.Parameter(torch.Tensor(mask_mat).view(self.temporal_dim, -1), requires_grad=False)
| 5,810 | 41.416058 | 100 | py |
flair | flair-master/collect_env.py | import torch
import transformers
import flair
def main():
print("#### Versions:")
print(f"##### Flair\n{flair.__version__}")
print(f"##### Pytorch\n{torch.__version__}")
print(f"##### Transformers\n{transformers.__version__}")
print(f"#### GPU\n{torch.cuda.is_available()}")
if __name__ == "__main__":
main()
| 338 | 18.941176 | 60 | py |
flair | flair-master/examples/ner/run_ner.py | import inspect
import json
import logging
import os
import sys
from dataclasses import dataclass, field
import torch
from transformers import HfArgumentParser
import flair
from flair import set_seed
from flair.embeddings import TransformerWordEmbeddings
from flair.models import SequenceTagger
from flair.trainers import ModelTrainer
logger = logging.getLogger("flair")
logger.setLevel(level="INFO")
@dataclass
class ModelArguments:
model_name_or_path: str = field(
metadata={"help": "The model checkpoint for weights initialization."},
)
layers: str = field(default="-1", metadata={"help": "Layers to be fine-tuned."})
subtoken_pooling: str = field(
default="first",
metadata={"help": "Subtoken pooling strategy used for fine-tuned."},
)
hidden_size: int = field(default=256, metadata={"help": "Hidden size for NER model."})
use_crf: bool = field(default=False, metadata={"help": "Whether to use a CRF on-top or not."})
@dataclass
class TrainingArguments:
num_epochs: int = field(default=10, metadata={"help": "The number of training epochs."})
batch_size: int = field(default=8, metadata={"help": "Batch size used for training."})
mini_batch_chunk_size: int = field(
default=1,
metadata={"help": "If smaller than batch size, batches will be chunked."},
)
learning_rate: float = field(default=5e-05, metadata={"help": "Learning rate"})
seed: int = field(default=42, metadata={"help": "Seed used for reproducible fine-tuning results."})
device: str = field(default="cuda:0", metadata={"help": "CUDA device string."})
weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for optimizer."})
embeddings_storage_mode: str = field(default="none", metadata={"help": "Defines embedding storage method."})
@dataclass
class FlertArguments:
context_size: int = field(default=0, metadata={"help": "Context size when using FLERT approach."})
respect_document_boundaries: bool = field(
default=False,
metadata={"help": "Whether to respect document boundaries or not when using FLERT."},
)
@dataclass
class DataArguments:
dataset_name: str = field(metadata={"help": "Flair NER dataset name."})
dataset_arguments: str = field(default="", metadata={"help": "Dataset arguments for Flair NER dataset."})
output_dir: str = field(
default="resources/taggers/ner",
metadata={"help": "Defines output directory for final fine-tuned model."},
)
def get_flair_corpus(data_args):
ner_task_mapping = {}
for name, obj in inspect.getmembers(flair.datasets.sequence_labeling):
if inspect.isclass(obj) and name.startswith(("NER", "CONLL", "WNUT")):
ner_task_mapping[name] = obj
dataset_args = {}
dataset_name = data_args.dataset_name
if data_args.dataset_arguments:
dataset_args = json.loads(data_args.dataset_arguments)
if dataset_name not in ner_task_mapping:
raise ValueError(f"Dataset name {dataset_name} is not a valid Flair datasets name!")
return ner_task_mapping[dataset_name](**dataset_args)
def main():
parser = HfArgumentParser((ModelArguments, TrainingArguments, FlertArguments, DataArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
(
model_args,
training_args,
flert_args,
data_args,
) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(
model_args,
training_args,
flert_args,
data_args,
) = parser.parse_args_into_dataclasses()
set_seed(training_args.seed)
flair.device = training_args.device
corpus = get_flair_corpus(data_args)
logger.info(corpus)
tag_type: str = "ner"
tag_dictionary = corpus.make_label_dictionary(tag_type, add_unk=False)
logger.info(tag_dictionary)
embeddings = TransformerWordEmbeddings(
model=model_args.model_name_or_path,
layers=model_args.layers,
subtoken_pooling=model_args.subtoken_pooling,
fine_tune=True,
use_context=flert_args.context_size,
respect_document_boundaries=flert_args.respect_document_boundaries,
)
tagger = SequenceTagger(
hidden_size=model_args.hidden_size,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type,
use_crf=model_args.use_crf,
use_rnn=False,
reproject_embeddings=False,
)
trainer = ModelTrainer(tagger, corpus)
trainer.fine_tune(
data_args.output_dir,
learning_rate=training_args.learning_rate,
mini_batch_size=training_args.batch_size,
mini_batch_chunk_size=training_args.mini_batch_chunk_size,
max_epochs=training_args.num_epochs,
embeddings_storage_mode=training_args.embeddings_storage_mode,
weight_decay=training_args.weight_decay,
)
torch.save(model_args, os.path.join(data_args.output_dir, "model_args.bin"))
torch.save(training_args, os.path.join(data_args.output_dir, "training_args.bin"))
# finally, print model card for information
tagger.print_model_card()
if __name__ == "__main__":
main()
| 5,261 | 32.303797 | 112 | py |
flair | flair-master/flair/optim.py | import logging
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR, ReduceLROnPlateau, _LRScheduler
from torch.optim.optimizer import required # type: ignore[attr-defined]
log = logging.getLogger("flair")
class SGDW(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum) with weight decay.
Implementation from the paper `Fixing Weight Decay Regularization in Adam`_.
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
----
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay factor (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
.. _Fixing Weight Decay Regularization in Adam:
https://arxiv.org/abs/1711.05101
Example:
-------
>>> optimizer = torch.optim.SGDW(model.parameters(), lr=0.1, momentum=0.9,
weight_decay=1e-5)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
.. note::
The implementation of SGD with Momentum/Nesterov subtly differs from
Sutskever et. al. and implementations in some other frameworks.
Considering the specific case of Momentum, the update can be written as
.. math::
v = \rho * v + g \\
p = p - lr * v
where p, g, v and :math:`\rho` denote the parameters, gradient,
velocity, and momentum respectively.
This is in contrast to Sutskever et. al. and
other frameworks which employ an update of the form
.. math::
v = \rho * v + lr * g \\
p = p - v
The Nesterov version is analogously modified.
"""
def __init__(
self,
params,
lr=required,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
) -> None:
if lr is not required and lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr}")
if momentum < 0.0:
raise ValueError(f"Invalid momentum value: {momentum}")
if weight_decay < 0.0:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
defaults = {
"lr": lr,
"momentum": momentum,
"dampening": dampening,
"weight_decay": weight_decay,
"nesterov": nesterov,
}
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("nesterov", False)
def step(self, closure=None):
"""Performs a single optimization step.
Parameters
----------
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
Returns:
-------
loss (float, optional): The loss if closure was set
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad.data
if momentum != 0:
param_state = self.state[p]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_(1 - dampening, d_p)
d_p = d_p.add(momentum, buf) if nesterov else buf
if weight_decay != 0:
p.data.add_(-weight_decay, p.data)
p.data.add_(-group["lr"], d_p)
return loss
class ExpAnnealLR(_LRScheduler):
"""Exponentially anneal the lr of each parameter group from the initial lr to end_lr over a number of iterations.
Args:
----
optimizer (Optimizer): Wrapped optimizer.
end_lr (float): The final learning rate.
iterations (int): The number of iterations over which to increase the
learning rate.
last_epoch (int): The index of the last iteration. Default: -1.
"""
def __init__(self, optimizer, end_lr, iterations, last_epoch=-1) -> None:
self.end_lr = end_lr
self.iterations = iterations
super().__init__(optimizer, last_epoch=last_epoch)
def get_lr(self):
iteration = self.last_epoch + 1
pct = iteration / self.iterations
return [base_lr * (self.end_lr / base_lr) ** pct for base_lr in self.base_lrs]
class LinearSchedulerWithWarmup(LambdaLR):
"""Linearly increase the lr from 0 to initial lr during warmup and decrease the lr to 0 after the warmup.
Uses LambaLR scheduler where the learning rate is multiplied by a lambda factor after calling scheduler.step().
Args:
----
optimizer (Optimizer): Wrapped optimizer.
num_train_steps (int): total number of training steps (number of batches * epochs).
num_warmup_steps (int): number of training steps for learning rate warmup.
last_epoch (int): The index of the last iteration. Default: -1. The scheduler
will simply restart when resuming training from a checkpoint.
"""
def __init__(self, optimizer, num_train_steps, num_warmup_steps, last_epoch=-1) -> None:
def linear_lr_lambda(current_step: int):
lambda_during_warmup = float(current_step) / float(max(1, num_warmup_steps))
lambda_after_warmup = max(
0.0,
float(num_train_steps - current_step) / float(max(1, num_train_steps - num_warmup_steps)),
)
if current_step < num_warmup_steps:
return lambda_during_warmup
return lambda_after_warmup
super().__init__(optimizer, lr_lambda=linear_lr_lambda, last_epoch=last_epoch)
class ReduceLRWDOnPlateau(ReduceLROnPlateau):
"""Reduce learning rate and weight decay when a metric has stopped improving.
Models often benefit from reducing the learning rate by
a factor of 2-10 once learning stagnates. This scheduler reads a metric
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate and weight decay factor is reduced for
optimizers that implement the the weight decay method from the paper
`Fixing Weight Decay Regularization in Adam`_.
.. _Fixing Weight Decay Regularization in Adam:
https://arxiv.org/abs/1711.05101
Args:
----
optimizer (Optimizer): Wrapped optimizer.
mode (str): One of `min`, `max`. In `min` mode, lr will
be reduced when the quantity monitored has stopped
decreasing; in `max` mode it will be reduced when the
quantity monitored has stopped increasing. Default: 'min'.
factor (float): Factor by which the learning rate will be
reduced. new_lr = lr * factor. Default: 0.1.
patience (int): Number of epochs with no improvement after
which learning rate will be reduced. For example, if
`patience = 2`, then we will ignore the first 2 epochs
with no improvement, and will only decrease the LR after the
3rd epoch if the loss still hasn't improved then.
Default: 10.
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
threshold (float): Threshold for measuring the new optimum,
to only focus on significant changes. Default: 1e-4.
threshold_mode (str): One of `rel`, `abs`. In `rel` mode,
dynamic_threshold = best * ( 1 + threshold ) in 'max'
mode or best * ( 1 - threshold ) in `min` mode.
In `abs` mode, dynamic_threshold = best + threshold in
`max` mode or best - threshold in `min` mode. Default: 'rel'.
cooldown (int): Number of epochs to wait before resuming
normal operation after lr has been reduced. Default: 0.
min_lr (float or list): A scalar or a list of scalars. A
lower bound on the learning rate of all param groups
or each group respectively. Default: 0.
eps (float): Minimal decay applied to lr. If the difference
between new and old lr is smaller than eps, the update is
ignored. Default: 1e-8.
Example:
-------
>>> optimizer = AdamW(model.parameters(), lr=0.1, weight_decay=1e-3)
>>> scheduler = ReduceLRWDOnPlateau(optimizer, 'min')
>>> for epoch in range(10):
>>> train(...)
>>> val_loss = validate(...)
>>> # Note that step should be called after validate()
>>> scheduler.step(val_loss)
"""
def step(self, metrics, epoch=None):
current = metrics
if epoch is None:
epoch = self.last_epoch = self.last_epoch + 1
self.last_epoch = epoch
if self.is_better(current, self.best):
self.best = current
self.num_bad_epochs = 0
else:
self.num_bad_epochs += 1
if self.in_cooldown:
self.cooldown_counter -= 1
self.num_bad_epochs = 0 # ignore any bad epochs in cooldown
if self.num_bad_epochs > self.patience:
self._reduce_lr(epoch)
self._reduce_weight_decay(epoch)
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
def _reduce_weight_decay(self, epoch):
for i, param_group in enumerate(self.optimizer.param_groups):
if param_group["weight_decay"] != 0:
old_weight_decay = float(param_group["weight_decay"])
new_weight_decay = max(old_weight_decay * self.factor, self.min_lrs[i])
if old_weight_decay - new_weight_decay > self.eps:
param_group["weight_decay"] = new_weight_decay
if self.verbose:
log.info(f"Epoch {epoch}: reducing weight decay factor of group {i} to {new_weight_decay:.4e}.")
| 11,041 | 38.435714 | 120 | py |
flair | flair-master/flair/inference_utils.py | import logging
import pickle
import re
import shutil
import sqlite3
from pathlib import Path
from typing import Union
import numpy as np
import torch
from tqdm import tqdm
import flair
from flair.embeddings import WordEmbeddings
# this is the default init size of a lmdb database for embeddings
DEFAULT_MAP_SIZE = 100 * 1024 * 1024 * 1024
logger = logging.getLogger("flair")
class WordEmbeddingsStore:
"""Class to simulate a WordEmbeddings class from flair.
Run this to generate a headless (without word embeddings) model as well a stored word embeddings:
>>> from flair.inference_utils import WordEmbeddingsStore
>>> from flair.models import SequenceTagger
>>> import pickle
>>> tagger = SequenceTagger.load("multi-ner-fast")
>>> WordEmbeddingsStore.create_stores(tagger)
>>> pickle.dump(tagger, open("multi-ner-fast-headless.pickle", "wb"))
The same but using LMDB as memory database:
>>> from flair.inference_utils import WordEmbeddingsStore
>>> from flair.models import SequenceTagger
>>> import pickle
>>> tagger = SequenceTagger.load("multi-ner-fast")
>>> WordEmbeddingsStore.create_stores(tagger, backend='lmdb')
>>> pickle.dump(tagger, open("multi-ner-fast-headless.pickle", "wb"))
Then this can be used as follows:
>>> from flair.data import Sentence
>>> tagger = pickle.load(open("multi-ner-fast-headless.pickle", "rb"))
>>> WordEmbeddingsStore.load_stores(tagger)
>>> text = "Schade um den Ameisenbären. Lukas Bärfuss veröffentlicht Erzählungen aus zwanzig Jahren."
>>> sentence = Sentence(text)
>>> tagger.predict(sentence)
>>> print(sentence.get_spans('ner'))
The same but using LMDB as memory database:
>>> from flair.data import Sentence
>>> tagger = pickle.load(open("multi-ner-fast-headless.pickle", "rb"))
>>> WordEmbeddingsStore.load_stores(tagger, backend='lmdb')
>>> text = "Schade um den Ameisenbären. Lukas Bärfuss veröffentlicht Erzählungen aus zwanzig Jahren."
>>> sentence = Sentence(text)
>>> tagger.predict(sentence)
>>> print(sentence.get_spans('ner'))
"""
def __init__(self, embedding: WordEmbeddings, backend="sqlite", verbose=True) -> None:
"""Instantiates the WordEmbeddingsStore.
:param embedding: Flair WordEmbeddings instance.
:param backend: cache database backend name e.g ``'sqlite'``, ``'lmdb'``.
Default value is ``'sqlite'``.
:param verbose: If `True` print information on standard output
"""
self.items = ""
# get db filename from embedding name
self.name = embedding.name
self.store_path: Path = WordEmbeddingsStore._get_store_path(embedding, backend)
if verbose:
logger.info(f"store filename: {self.store_path!s}")
self.backend: Union[WordEmbeddings, WordEmbeddingsStoreBackend]
if backend == "sqlite":
self.backend = SqliteWordEmbeddingsStoreBackend(embedding, verbose)
elif backend == "lmdb":
self.backend = LmdbWordEmbeddingsStoreBackend(embedding, verbose)
else:
raise ValueError(f'The given backend "{backend}" is not available.')
# In case initialization of cached version failed, just fallback to the original WordEmbeddings
if not self.backend.is_ok:
self.backend = WordEmbeddings(embedding.embeddings)
def _get_vector(self, word="house"):
return self.backend._get_vector(word)
def embed(self, sentences):
for sentence in sentences:
for token in sentence:
t = torch.tensor(self._get_vector(word=token.text.lower()))
token.set_embedding(self.name, t)
def get_names(self):
return [self.name]
@staticmethod
def _get_store_path(embedding, backend="sqlite"):
"""Get the filename of the store."""
cache_dir = flair.cache_root
embedding_filename = re.findall("/(embeddings/.*)", embedding.name)[0]
store_path = cache_dir / (embedding_filename + "." + backend)
return store_path
@staticmethod
def _word_embeddings(model):
# SequenceTagger
if hasattr(model, "embeddings"):
embeds = model.embeddings.embeddings
# TextClassifier
elif hasattr(model, "document_embeddings") and hasattr(model.document_embeddings, "embeddings"):
embeds = model.document_embeddings.embeddings.embeddings
else:
embeds = []
return embeds
@staticmethod
def create_stores(model, backend="sqlite"):
"""Creates database versions of all word embeddings in the model.
Also deletes the original vectors to save memory.
"""
for embedding in WordEmbeddingsStore._word_embeddings(model):
if type(embedding) == WordEmbeddings:
WordEmbeddingsStore(embedding, backend)
del embedding.precomputed_word_embeddings
@staticmethod
def load_stores(model, backend="sqlite"):
"""Loads the db versions of all word embeddings in the model."""
embeds = WordEmbeddingsStore._word_embeddings(model)
for i, embedding in enumerate(embeds):
if type(embedding) == WordEmbeddings:
embeds[i] = WordEmbeddingsStore(embedding, backend)
@staticmethod
def delete_stores(model, backend="sqlite"):
"""Deletes the db versions of all word embeddings."""
for embedding in WordEmbeddingsStore._word_embeddings(model):
store_path: Path = WordEmbeddingsStore._get_store_path(embedding)
logger.info(f"delete store: {store_path!s}")
if store_path.is_file():
store_path.unlink()
elif store_path.is_dir():
shutil.rmtree(store_path, ignore_errors=False, onerror=None)
class WordEmbeddingsStoreBackend:
def __init__(self, embedding, backend, verbose=True) -> None:
# get db filename from embedding name
self.name = embedding.name
self.store_path: Path = WordEmbeddingsStore._get_store_path(embedding, backend)
@property
def is_ok(self):
return hasattr(self, "k")
def _get_vector(self, word="house"):
pass
class SqliteWordEmbeddingsStoreBackend(WordEmbeddingsStoreBackend):
def __init__(self, embedding, verbose) -> None:
super().__init__(embedding, "sqlite", verbose)
# if embedding database already exists
if self.store_path.exists() and self.store_path.is_file():
try:
self.db = sqlite3.connect(str(self.store_path))
cursor = self.db.cursor()
cursor.execute("SELECT * FROM embedding LIMIT 1;")
result = list(cursor)
self.k = len(result[0]) - 1
return
except sqlite3.Error as err:
logger.exception(f"Fail to open sqlite database {self.store_path!s}: {err!s}")
# otherwise, push embedding to database
if hasattr(embedding, "precomputed_word_embeddings"):
self.db = sqlite3.connect(str(self.store_path))
pwe = embedding.precomputed_word_embeddings
self.k = pwe.vector_size
self.db.execute("DROP TABLE IF EXISTS embedding;")
self.db.execute(
f"CREATE TABLE embedding(word text,{','.join('v' + str(i) + ' float' for i in range(self.k))});"
)
vectors_it = ([word, *pwe.get_vector(word).tolist()] for word in pwe.vocab)
if verbose:
logger.info("load vectors to store")
self.db.executemany(
f"INSERT INTO embedding(word,{','.join('v' + str(i) for i in range(self.k))}) \
values ({','.join(['?'] * (1 + self.k))})",
tqdm(vectors_it),
)
self.db.execute("DROP INDEX IF EXISTS embedding_index;")
self.db.execute("CREATE INDEX embedding_index ON embedding(word);")
self.db.commit()
self.db.close()
def _get_vector(self, word="house"):
db = sqlite3.connect(str(self.store_path))
cursor = db.cursor()
word = word.replace('"', "")
cursor.execute(f'SELECT * FROM embedding WHERE word="{word}";')
result = list(cursor)
db.close()
if not result:
return [0.0] * self.k
return result[0][1:]
class LmdbWordEmbeddingsStoreBackend(WordEmbeddingsStoreBackend):
def __init__(self, embedding, verbose) -> None:
super().__init__(embedding, "lmdb", verbose)
try:
import lmdb
# if embedding database already exists
if self.store_path.exists() and self.store_path.is_dir():
# open the database in read mode
try:
self.env = lmdb.open(
str(self.store_path),
readonly=True,
max_readers=2048,
max_spare_txns=4,
)
if self.env:
# we need to set self.k
with self.env.begin() as txn:
cursor = txn.cursor()
for _key, value in cursor:
vector = pickle.loads(value)
self.k = vector.shape[0]
break
cursor.close()
return
except lmdb.Error as err:
logger.exception(f"Fail to open lmdb database {self.store_path!s}: {err!s}")
# create and load the database in write mode
if hasattr(embedding, "precomputed_word_embeddings"):
pwe = embedding.precomputed_word_embeddings
self.k = pwe.vector_size
self.store_path.mkdir(parents=True, exist_ok=True)
self.env = lmdb.open(str(self.store_path), map_size=DEFAULT_MAP_SIZE)
if verbose:
logger.info("load vectors to store")
txn = self.env.begin(write=True)
for word in tqdm(pwe.vocab.keys()):
vector = pwe.get_vector(word)
if len(word.encode(encoding="UTF-8")) < self.env.max_key_size():
txn.put(word.encode(encoding="UTF-8"), pickle.dumps(vector))
txn.commit()
return
except ModuleNotFoundError:
logger.warning("-" * 100)
logger.warning('ATTENTION! The library "lmdb" is not installed!')
logger.warning('To use LMDB, please first install with "pip install lmdb"')
logger.warning("-" * 100)
def _get_vector(self, word="house"):
try:
import lmdb
with self.env.begin() as txn:
vector = txn.get(word.encode(encoding="UTF-8"))
if vector:
word_vector = pickle.loads(vector)
vector = None
else:
word_vector = np.zeros((self.k,), dtype=np.float32)
except lmdb.Error:
# no idea why, but we need to close and reopen the environment to avoid
# mdb_txn_begin: MDB_BAD_RSLOT: Invalid reuse of reader locktable slot
# when opening new transaction !
self.env.close()
self.env = lmdb.open(
self.store_path,
readonly=True,
max_readers=2048,
max_spare_txns=2,
lock=False,
)
return self._get_vector(word)
except ModuleNotFoundError:
logger.warning("-" * 100)
logger.warning('ATTENTION! The library "lmdb" is not installed!')
logger.warning('To use LMDB, please first install with "pip install lmdb"')
logger.warning("-" * 100)
word_vector = np.zeros((self.k,), dtype=np.float32)
return word_vector
| 12,086 | 39.834459 | 112 | py |
flair | flair-master/flair/data.py | import bisect
import logging
import re
import typing
from abc import ABC, abstractmethod
from collections import Counter, defaultdict, namedtuple
from operator import itemgetter
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Union, cast
import torch
from deprecated import deprecated
from torch.utils.data import Dataset, IterableDataset
from torch.utils.data.dataset import ConcatDataset, Subset
import flair
from flair.file_utils import Tqdm
from flair.tokenization import SegtokTokenizer, SpaceTokenizer, Tokenizer
T_co = typing.TypeVar("T_co", covariant=True)
log = logging.getLogger("flair")
def _iter_dataset(dataset: Optional[Dataset]) -> typing.Iterable:
if dataset is None:
return []
from flair.datasets import DataLoader
return (x[0] for x in DataLoader(dataset, batch_size=1))
def _len_dataset(dataset: Optional[Dataset]) -> int:
if dataset is None:
return 0
from flair.datasets import DataLoader
loader = DataLoader(dataset, batch_size=1)
return len(loader)
BoundingBox = namedtuple("BoundingBox", ["left", "top", "right", "bottom"])
class Dictionary:
"""This class holds a dictionary that maps strings to IDs, used to generate one-hot encodings of strings."""
def __init__(self, add_unk=True) -> None:
# init dictionaries
self.item2idx: Dict[bytes, int] = {}
self.idx2item: List[bytes] = []
self.add_unk = add_unk
self.multi_label = False
self.span_labels = False
# in order to deal with unknown tokens, add <unk>
if add_unk:
self.add_item("<unk>")
def remove_item(self, item: str):
bytes_item = item.encode("utf-8")
if bytes_item in self.item2idx:
self.idx2item.remove(bytes_item)
del self.item2idx[bytes_item]
def add_item(self, item: str) -> int:
"""Add string - if already in dictionary returns its ID. if not in dictionary, it will get a new ID.
:param item: a string for which to assign an id.
:return: ID of string
"""
bytes_item = item.encode("utf-8")
if bytes_item not in self.item2idx:
self.idx2item.append(bytes_item)
self.item2idx[bytes_item] = len(self.idx2item) - 1
return self.item2idx[bytes_item]
def get_idx_for_item(self, item: str) -> int:
"""Returns the ID of the string, otherwise 0.
:param item: string for which ID is requested
:return: ID of string, otherwise 0
"""
item_encoded = item.encode("utf-8")
if item_encoded in self.item2idx:
return self.item2idx[item_encoded]
elif self.add_unk:
return 0
else:
log.error(f"The string '{item}' is not in dictionary! Dictionary contains only: {self.get_items()}")
log.error(
"You can create a Dictionary that handles unknown items with an <unk>-key by setting add_unk = True in the construction."
)
raise IndexError
def get_idx_for_items(self, items: List[str]) -> List[int]:
"""Returns the IDs for each item of the list of string, otherwise 0 if not found.
:param items: List of string for which IDs are requested
:return: List of ID of strings
"""
if not hasattr(self, "item2idx_not_encoded"):
d = {key.decode("UTF-8"): value for key, value in self.item2idx.items()}
self.item2idx_not_encoded = defaultdict(int, d)
if not items:
return []
results = itemgetter(*items)(self.item2idx_not_encoded)
if isinstance(results, int):
return [results]
return list(results)
def get_items(self) -> List[str]:
items = []
for item in self.idx2item:
items.append(item.decode("UTF-8"))
return items
def __len__(self) -> int:
return len(self.idx2item)
def get_item_for_index(self, idx):
return self.idx2item[idx].decode("UTF-8")
def set_start_stop_tags(self):
self.add_item("<START>")
self.add_item("<STOP>")
def is_span_prediction_problem(self) -> bool:
if self.span_labels:
return True
return any(item.startswith(("B-", "S-", "I-")) for item in self.get_items())
def start_stop_tags_are_set(self) -> bool:
return {b"<START>", b"<STOP>"}.issubset(self.item2idx.keys())
def save(self, savefile):
import pickle
with open(savefile, "wb") as f:
mappings = {"idx2item": self.idx2item, "item2idx": self.item2idx}
pickle.dump(mappings, f)
def __setstate__(self, d):
self.__dict__ = d
# set 'add_unk' if the dictionary was created with a version of Flair older than 0.9
if "add_unk" not in self.__dict__.keys():
self.__dict__["add_unk"] = b"<unk>" in self.__dict__["idx2item"]
@classmethod
def load_from_file(cls, filename: Union[str, Path]):
import pickle
with Path(filename).open("rb") as f:
mappings = pickle.load(f, encoding="latin1")
idx2item = mappings["idx2item"]
item2idx = mappings["item2idx"]
# set 'add_unk' depending on whether <unk> is a key
add_unk = b"<unk>" in idx2item
dictionary: Dictionary = Dictionary(add_unk=add_unk)
dictionary.item2idx = item2idx
dictionary.idx2item = idx2item
return dictionary
@classmethod
def load(cls, name: str):
from flair.file_utils import cached_path
hu_path: str = "https://flair.informatik.hu-berlin.de/resources/characters"
if name == "chars" or name == "common-chars":
char_dict = cached_path(f"{hu_path}/common_characters", cache_dir="datasets")
return Dictionary.load_from_file(char_dict)
if name == "chars-large" or name == "common-chars-large":
char_dict = cached_path(f"{hu_path}/common_characters_large", cache_dir="datasets")
return Dictionary.load_from_file(char_dict)
if name == "chars-xl" or name == "common-chars-xl":
char_dict = cached_path(f"{hu_path}/common_characters_xl", cache_dir="datasets")
return Dictionary.load_from_file(char_dict)
if name == "chars-lemmatizer" or name == "common-chars-lemmatizer":
char_dict = cached_path(f"{hu_path}/common_characters_lemmatizer", cache_dir="datasets")
return Dictionary.load_from_file(char_dict)
return Dictionary.load_from_file(name)
def __eq__(self, o: object) -> bool:
if not isinstance(o, Dictionary):
return False
return self.item2idx == o.item2idx and self.idx2item == o.idx2item and self.add_unk == o.add_unk
def __str__(self) -> str:
tags = ", ".join(self.get_item_for_index(i) for i in range(min(len(self), 50)))
return f"Dictionary with {len(self)} tags: {tags}"
class Label:
"""This class represents a label.
Each label has a value and optionally a confidence score. The score needs to be between 0.0 and 1.0.
Default value for the score is 1.0.
"""
def __init__(self, data_point: "DataPoint", value: str, score: float = 1.0) -> None:
self._value = value
self._score = score
self.data_point: DataPoint = data_point
super().__init__()
def set_value(self, value: str, score: float = 1.0):
self._value = value
self._score = score
@property
def value(self) -> str:
return self._value
@property
def score(self) -> float:
return self._score
def to_dict(self):
return {"value": self.value, "confidence": self.score}
def __str__(self) -> str:
return f"{self.data_point.unlabeled_identifier}{flair._arrow}{self._value} ({round(self._score, 4)})"
@property
def shortstring(self):
return f'"{self.data_point.text}"/{self._value}'
def __repr__(self) -> str:
return f"'{self.data_point.unlabeled_identifier}'/'{self._value}' ({round(self._score, 4)})"
def __eq__(self, other):
return self.value == other.value and self.score == other.score and self.data_point == other.data_point
def __hash__(self):
return hash(self.__repr__())
def __lt__(self, other):
return self.data_point < other.data_point
@property
def labeled_identifier(self):
return f"{self.data_point.unlabeled_identifier}/{self.value}"
@property
def unlabeled_identifier(self):
return f"{self.data_point.unlabeled_identifier}"
class DataPoint:
"""This is the parent class of all data points in Flair.
Examples for data points are Token, Sentence, Image, etc.
Each DataPoint must be embeddable (hence the abstract property embedding() and methods to() and clear_embeddings()).
Also, each DataPoint may have Labels in several layers of annotation (hence the functions add_label(), get_labels()
and the property 'label')
"""
def __init__(self) -> None:
self.annotation_layers: Dict[str, List[Label]] = {}
self._embeddings: Dict[str, torch.Tensor] = {}
self._metadata: Dict[str, typing.Any] = {}
@property
@abstractmethod
def embedding(self):
pass
def set_embedding(self, name: str, vector: torch.Tensor):
self._embeddings[name] = vector
def get_embedding(self, names: Optional[List[str]] = None) -> torch.Tensor:
# if one embedding name, directly return it
if names and len(names) == 1:
if names[0] in self._embeddings:
return self._embeddings[names[0]].to(flair.device)
else:
return torch.tensor([], device=flair.device)
# if multiple embedding names, concatenate them
embeddings = self.get_each_embedding(names)
if embeddings:
return torch.cat(embeddings, dim=0)
else:
return torch.tensor([], device=flair.device)
def get_each_embedding(self, embedding_names: Optional[List[str]] = None) -> List[torch.Tensor]:
embeddings = []
for embed_name in sorted(self._embeddings.keys()):
if embedding_names and embed_name not in embedding_names:
continue
embed = self._embeddings[embed_name].to(flair.device)
embeddings.append(embed)
return embeddings
def to(self, device: str, pin_memory: bool = False):
for name, vector in self._embeddings.items():
if str(vector.device) != str(device):
if pin_memory:
self._embeddings[name] = vector.to(device, non_blocking=True).pin_memory()
else:
self._embeddings[name] = vector.to(device, non_blocking=True)
def clear_embeddings(self, embedding_names: Optional[List[str]] = None):
if embedding_names is None:
self._embeddings = {}
else:
for name in embedding_names:
if name in self._embeddings:
del self._embeddings[name]
def has_label(self, type) -> bool:
return type in self.annotation_layers
def add_metadata(self, key: str, value: typing.Any) -> None:
self._metadata[key] = value
def get_metadata(self, key: str) -> typing.Any:
return self._metadata[key]
def has_metadata(self, key: str) -> bool:
return key in self._metadata
def add_label(self, typename: str, value: str, score: float = 1.0):
if typename not in self.annotation_layers:
self.annotation_layers[typename] = [Label(self, value, score)]
else:
self.annotation_layers[typename].append(Label(self, value, score))
return self
def set_label(self, typename: str, value: str, score: float = 1.0):
self.annotation_layers[typename] = [Label(self, value, score)]
return self
def remove_labels(self, typename: str):
if typename in self.annotation_layers:
del self.annotation_layers[typename]
def get_label(self, label_type: Optional[str] = None, zero_tag_value="O"):
if len(self.get_labels(label_type)) == 0:
return Label(self, zero_tag_value)
return self.get_labels(label_type)[0]
def get_labels(self, typename: Optional[str] = None):
if typename is None:
return self.labels
return self.annotation_layers[typename] if typename in self.annotation_layers else []
@property
def labels(self) -> List[Label]:
all_labels = []
for key in self.annotation_layers:
all_labels.extend(self.annotation_layers[key])
return all_labels
@property
@abstractmethod
def unlabeled_identifier(self):
raise NotImplementedError
def _printout_labels(self, main_label=None, add_score: bool = True):
all_labels = []
keys = [main_label] if main_label is not None else self.annotation_layers.keys()
if add_score:
for key in keys:
all_labels.extend(
[
f"{label.value} ({round(label.score, 4)})"
for label in self.get_labels(key)
if label.data_point == self
]
)
labels = "; ".join(all_labels)
if labels != "":
labels = flair._arrow + labels
else:
for key in keys:
all_labels.extend([f"{label.value}" for label in self.get_labels(key) if label.data_point == self])
labels = "/".join(all_labels)
if labels != "":
labels = "/" + labels
return labels
def __str__(self) -> str:
return self.unlabeled_identifier + self._printout_labels()
@property
@abstractmethod
def start_position(self) -> int:
raise NotImplementedError
@property
@abstractmethod
def end_position(self) -> int:
raise NotImplementedError
@property
@abstractmethod
def text(self):
raise NotImplementedError
@property
def tag(self):
return self.labels[0].value
@property
def score(self):
return self.labels[0].score
def __lt__(self, other):
return self.start_position < other.start_position
def __len__(self) -> int:
raise NotImplementedError
DT = typing.TypeVar("DT", bound=DataPoint)
DT2 = typing.TypeVar("DT2", bound=DataPoint)
class _PartOfSentence(DataPoint, ABC):
def __init__(self, sentence) -> None:
super().__init__()
self.sentence: Sentence = sentence
def add_label(self, typename: str, value: str, score: float = 1.0):
super().add_label(typename, value, score)
self.sentence.annotation_layers.setdefault(typename, []).append(Label(self, value, score))
def set_label(self, typename: str, value: str, score: float = 1.0):
if len(self.annotation_layers.get(typename, [])) > 0:
# First we remove any existing labels for this PartOfSentence in self.sentence
self.sentence.annotation_layers[typename] = [
label for label in self.sentence.annotation_layers.get(typename, []) if label.data_point != self
]
self.sentence.annotation_layers.setdefault(typename, []).append(Label(self, value, score))
super().set_label(typename, value, score)
return self
def remove_labels(self, typename: str):
# labels also need to be deleted at Sentence object
for label in self.get_labels(typename):
self.sentence.annotation_layers[typename].remove(label)
# delete labels at object itself
super().remove_labels(typename)
class Token(_PartOfSentence):
"""This class represents one word in a tokenized sentence.
Each token may have any number of tags. It may also point to its head in a dependency tree.
"""
def __init__(
self,
text: str,
head_id: Optional[int] = None,
whitespace_after: int = 1,
start_position: int = 0,
sentence=None,
) -> None:
super().__init__(sentence=sentence)
self.form: str = text
self._internal_index: Optional[int] = None
self.head_id: Optional[int] = head_id
self.whitespace_after: int = whitespace_after
self._start_position = start_position
self._embeddings: Dict = {}
self.tags_proba_dist: Dict[str, List[Label]] = {}
@property
def idx(self) -> int:
if self._internal_index is not None:
return self._internal_index
else:
return -1
@property
def text(self) -> str:
return self.form
@property
def unlabeled_identifier(self) -> str:
return f'Token[{self.idx - 1}]: "{self.text}"'
def add_tags_proba_dist(self, tag_type: str, tags: List[Label]):
self.tags_proba_dist[tag_type] = tags
def get_tags_proba_dist(self, tag_type: str) -> List[Label]:
if tag_type in self.tags_proba_dist:
return self.tags_proba_dist[tag_type]
return []
def get_head(self):
return self.sentence.get_token(self.head_id)
@property
def start_position(self) -> int:
return self._start_position
@start_position.setter
def start_position(self, value: int) -> None:
self._start_position = value
@property
def end_position(self) -> int:
return self.start_position + len(self.text)
@property
def embedding(self):
return self.get_embedding()
def __len__(self) -> int:
return 1
def __repr__(self) -> str:
return self.__str__()
def add_label(self, typename: str, value: str, score: float = 1.0):
# The Token is a special _PartOfSentence in that it may be initialized without a Sentence.
# therefore, labels get added only to the Sentence if it exists
if self.sentence:
super().add_label(typename=typename, value=value, score=score)
else:
DataPoint.add_label(self, typename=typename, value=value, score=score)
def set_label(self, typename: str, value: str, score: float = 1.0):
# The Token is a special _PartOfSentence in that it may be initialized without a Sentence.
# Therefore, labels get set only to the Sentence if it exists
if self.sentence:
super().set_label(typename=typename, value=value, score=score)
else:
DataPoint.set_label(self, typename=typename, value=value, score=score)
class Span(_PartOfSentence):
"""This class represents one textual span consisting of Tokens."""
def __new__(self, tokens: List[Token]):
# check if the span already exists. If so, return it
unlabeled_identifier = self._make_unlabeled_identifier(tokens)
if unlabeled_identifier in tokens[0].sentence._known_spans:
span = tokens[0].sentence._known_spans[unlabeled_identifier]
return span
# else make a new span
else:
span = super().__new__(self)
span.initialized = False
tokens[0].sentence._known_spans[unlabeled_identifier] = span
return span
def __init__(self, tokens: List[Token]) -> None:
if not self.initialized:
super().__init__(tokens[0].sentence)
self.tokens = tokens
self.initialized: bool = True
@property
def start_position(self) -> int:
return self.tokens[0].start_position
@property
def end_position(self) -> int:
return self.tokens[-1].end_position
@property
def text(self) -> str:
return "".join([t.text + t.whitespace_after * " " for t in self.tokens]).strip()
@staticmethod
def _make_unlabeled_identifier(tokens: List[Token]):
text = "".join([t.text + t.whitespace_after * " " for t in tokens]).strip()
return f'Span[{tokens[0].idx - 1}:{tokens[-1].idx}]: "{text}"'
@property
def unlabeled_identifier(self) -> str:
return self._make_unlabeled_identifier(self.tokens)
def __repr__(self) -> str:
return self.__str__()
def __getitem__(self, idx: int) -> Token:
return self.tokens[idx]
def __iter__(self):
return iter(self.tokens)
def __len__(self) -> int:
return len(self.tokens)
@property
def embedding(self):
return self.get_embedding()
class Relation(_PartOfSentence):
def __new__(self, first: Span, second: Span):
# check if the relation already exists. If so, return it
unlabeled_identifier = self._make_unlabeled_identifier(first, second)
if unlabeled_identifier in first.sentence._known_spans:
span = first.sentence._known_spans[unlabeled_identifier]
return span
# else make a new relation
else:
span = super().__new__(self)
span.initialized = False
first.sentence._known_spans[unlabeled_identifier] = span
return span
def __init__(self, first: Span, second: Span) -> None:
if not self.initialized:
super().__init__(sentence=first.sentence)
self.first: Span = first
self.second: Span = second
self.initialized: bool = True
def __repr__(self) -> str:
return str(self)
@property
def tag(self):
return self.labels[0].value
@property
def text(self):
return f"{self.first.text} -> {self.second.text}"
@staticmethod
def _make_unlabeled_identifier(first, second):
text = f"{first.text} -> {second.text}"
return (
f"Relation"
f"[{first.tokens[0].idx - 1}:{first.tokens[-1].idx}]"
f"[{second.tokens[0].idx - 1}:{second.tokens[-1].idx}]"
f': "{text}"'
)
@property
def unlabeled_identifier(self) -> str:
return self._make_unlabeled_identifier(self.first, self.second)
@property
def start_position(self) -> int:
return min(self.first.start_position, self.second.start_position)
@property
def end_position(self) -> int:
return max(self.first.end_position, self.second.end_position)
@property
def embedding(self):
pass
class Sentence(DataPoint):
"""A Sentence is a list of tokens and is used to represent a sentence or text fragment."""
def __init__(
self,
text: Union[str, List[str], List[Token]],
use_tokenizer: Union[bool, Tokenizer] = True,
language_code: Optional[str] = None,
start_position: int = 0,
) -> None:
"""Class to hold all metadata related to a text.
Metadata can be tokens, predictions, language code, ...
:param text: original string (sentence), or a list of string tokens (words)
:param use_tokenizer: a custom tokenizer (default is :class:`SpaceTokenizer`)
more advanced options are :class:`SegTokTokenizer` to use segtok or :class:`SpacyTokenizer`
to use Spacy library if available). Check the implementations of abstract class Tokenizer or
implement your own subclass (if you need it). If instead of providing a Tokenizer, this parameter
is just set to True (deprecated), :class:`SegtokTokenizer` will be used.
:param language_code: Language of the sentence
:param start_position: Start char offset of the sentence in the superordinate document
"""
super().__init__()
self.tokens: List[Token] = []
# private field for all known spans
self._known_spans: Dict[str, _PartOfSentence] = {}
self.language_code: Optional[str] = language_code
self._start_position = start_position
# the tokenizer used for this sentence
if isinstance(use_tokenizer, Tokenizer):
tokenizer = use_tokenizer
elif type(use_tokenizer) == bool:
tokenizer = SegtokTokenizer() if use_tokenizer else SpaceTokenizer()
else:
raise AssertionError("Unexpected type of parameter 'use_tokenizer'. Parameter should be bool or Tokenizer")
self.tokenized: Optional[str] = None
# some sentences represent a document boundary (but most do not)
self.is_document_boundary: bool = False
# internal variables to denote position inside dataset
self._previous_sentence: Optional[Sentence] = None
self._has_context: bool = False
self._next_sentence: Optional[Sentence] = None
self._position_in_dataset: Optional[typing.Tuple[Dataset, int]] = None
# if text is passed, instantiate sentence with tokens (words)
if isinstance(text, str):
text = Sentence._handle_problem_characters(text)
words = tokenizer.tokenize(text)
elif text and isinstance(text[0], Token):
for t in text:
self._add_token(t)
self.tokens[-1].whitespace_after = 0
return
else:
words = cast(List[str], text)
text = " ".join(words)
# determine token positions and whitespace_after flag
current_offset: int = 0
previous_token: Optional[Token] = None
for word in words:
word_start_position: int = text.index(word, current_offset)
delta_offset: int = word_start_position - current_offset
token: Token = Token(text=word, start_position=word_start_position)
self._add_token(token)
if previous_token is not None:
previous_token.whitespace_after = delta_offset
current_offset = token.end_position
previous_token = token
# the last token has no whitespace after
if len(self) > 0:
self.tokens[-1].whitespace_after = 0
# log a warning if the dataset is empty
if text == "":
log.warning("Warning: An empty Sentence was created! Are there empty strings in your dataset?")
@property
def unlabeled_identifier(self):
return f'Sentence[{len(self)}]: "{self.text}"'
def get_relations(self, type: str) -> List[Relation]:
relations: List[Relation] = []
for label in self.get_labels(type):
if isinstance(label.data_point, Relation):
relations.append(label.data_point)
return relations
def get_spans(self, type: str) -> List[Span]:
spans: List[Span] = []
for potential_span in self._known_spans.values():
if isinstance(potential_span, Span) and potential_span.has_label(type):
spans.append(potential_span)
return sorted(spans)
def get_token(self, token_id: int) -> Optional[Token]:
for token in self.tokens:
if token.idx == token_id:
return token
return None
def _add_token(self, token: Union[Token, str]):
if isinstance(token, Token):
assert token.sentence is None
if type(token) is str:
token = Token(token)
token = cast(Token, token)
# data with zero-width characters cannot be handled
if token.text == "":
return
# set token idx and sentence
token.sentence = self
token._internal_index = len(self.tokens) + 1
if token.start_position == 0 and token._internal_index > 1:
token.start_position = len(self.to_original_text()) + self[-1].whitespace_after
# append token to sentence
self.tokens.append(token)
# register token annotations on sentence
for typename in token.annotation_layers:
for label in token.get_labels(typename):
if typename not in token.sentence.annotation_layers:
token.sentence.annotation_layers[typename] = [Label(token, label.value, label.score)]
else:
token.sentence.annotation_layers[typename].append(Label(token, label.value, label.score))
@property
def embedding(self):
return self.get_embedding()
def to(self, device: str, pin_memory: bool = False):
# move sentence embeddings to device
super().to(device=device, pin_memory=pin_memory)
# also move token embeddings to device
for token in self:
token.to(device, pin_memory)
def clear_embeddings(self, embedding_names: Optional[List[str]] = None):
super().clear_embeddings(embedding_names)
# clear token embeddings
for token in self:
token.clear_embeddings(embedding_names)
def left_context(self, context_length: int, respect_document_boundaries: bool = True) -> List[Token]:
sentence = self
left_context: List[Token] = []
while len(left_context) < context_length:
sentence = sentence.previous_sentence()
if sentence is None:
break
if respect_document_boundaries and sentence.is_document_boundary:
break
left_context = sentence.tokens + left_context
return left_context[-context_length:]
def right_context(self, context_length: int, respect_document_boundaries: bool = True) -> List[Token]:
sentence = self
right_context: List[Token] = []
while len(right_context) < context_length:
sentence = sentence.next_sentence()
if sentence is None:
break
if respect_document_boundaries and sentence.is_document_boundary:
break
right_context += sentence.tokens
return right_context[:context_length]
def __str__(self) -> str:
return self.to_tagged_string()
def to_tagged_string(self, main_label=None) -> str:
already_printed = [self]
output = super().__str__()
label_append = []
for label in self.get_labels(main_label):
if label.data_point in already_printed:
continue
label_append.append(
f'"{label.data_point.text}"{label.data_point._printout_labels(main_label=main_label, add_score=False)}'
)
already_printed.append(label.data_point)
if len(label_append) > 0:
output += f"{flair._arrow}[" + ", ".join(label_append) + "]"
return output
@property
def text(self):
return self.to_original_text()
def to_tokenized_string(self) -> str:
if self.tokenized is None:
self.tokenized = " ".join([t.text for t in self.tokens])
return self.tokenized
def to_plain_string(self):
plain = ""
for token in self.tokens:
plain += token.text
if token.whitespace_after > 0:
plain += token.whitespace_after * " "
return plain.rstrip()
def infer_space_after(self):
"""Heuristics in case you wish to infer whitespace_after values for tokenized text.
This is useful for some old NLP tasks (such as CoNLL-03 and CoNLL-2000) that provide only tokenized data with
no info of original whitespacing.
:return:
"""
last_token = None
quote_count: int = 0
# infer whitespace after field
for token in self.tokens:
if token.text == '"':
quote_count += 1
if quote_count % 2 != 0:
token.whitespace_after = 0
elif last_token is not None:
last_token.whitespace_after = 0
if last_token is not None:
if token.text in [".", ":", ",", ";", ")", "n't", "!", "?"]:
last_token.whitespace_after = 0
if token.text.startswith("'"):
last_token.whitespace_after = 0
if token.text in ["("]:
token.whitespace_after = 0
last_token = token
return self
def to_original_text(self) -> str:
# if sentence has no tokens, return empty string
if len(self) == 0:
return ""
# otherwise, return concatenation of tokens with the correct offsets
return (self[0].start_position - self.start_position) * " " + "".join(
[t.text + t.whitespace_after * " " for t in self.tokens]
).strip()
def to_dict(self, tag_type: Optional[str] = None):
labels = []
if tag_type:
labels = [label.to_dict() for label in self.get_labels(tag_type)]
return {"text": self.to_original_text(), tag_type: labels}
if self.labels:
labels = [label.to_dict() for label in self.labels]
return {"text": self.to_original_text(), "all labels": labels}
def get_span(self, start: int, stop: int):
span_slice = slice(start, stop)
return self[span_slice]
@typing.overload
def __getitem__(self, idx: int) -> Token:
...
@typing.overload
def __getitem__(self, s: slice) -> Span:
...
def __getitem__(self, subscript):
if isinstance(subscript, slice):
return Span(self.tokens[subscript])
else:
return self.tokens[subscript]
def __iter__(self):
return iter(self.tokens)
def __len__(self) -> int:
return len(self.tokens)
def __repr__(self) -> str:
return self.__str__()
@property
def start_position(self) -> int:
return self._start_position
@start_position.setter
def start_position(self, value: int) -> None:
self._start_position = value
@property
def end_position(self) -> int:
# The sentence's start position is not propagated to its tokens.
# Therefore, we need to add the sentence's start position to its last token's end position, including whitespaces.
return self.start_position + self[-1].end_position + self[-1].whitespace_after
def get_language_code(self) -> str:
if self.language_code is None:
import langdetect
try:
self.language_code = langdetect.detect(self.to_plain_string())
except Exception:
self.language_code = "en"
return self.language_code
@staticmethod
def _handle_problem_characters(text: str) -> str:
text = Sentence.__remove_zero_width_characters(text)
text = Sentence.__restore_windows_1252_characters(text)
return text
@staticmethod
def __remove_zero_width_characters(text: str) -> str:
text = text.replace("\u200c", "")
text = text.replace("\u200b", "")
text = text.replace("\ufe0f", "")
text = text.replace("\ufeff", "")
return text
@staticmethod
def __restore_windows_1252_characters(text: str) -> str:
def to_windows_1252(match):
try:
return bytes([ord(match.group(0))]).decode("windows-1252")
except UnicodeDecodeError:
# No character at the corresponding code point: remove it
return ""
return re.sub(r"[\u0080-\u0099]", to_windows_1252, text)
def next_sentence(self):
"""Get the next sentence in the document.
This only works if context is set through dataloader or elsewhere
:return: next Sentence in document if set, otherwise None
"""
if self._next_sentence is not None:
return self._next_sentence
if self._position_in_dataset is not None:
dataset = self._position_in_dataset[0]
index = self._position_in_dataset[1] + 1
if index < len(dataset):
return dataset[index]
return None
def previous_sentence(self):
"""Get the previous sentence in the document.
works only if context is set through dataloader or elsewhere
:return: previous Sentence in document if set, otherwise None
"""
if self._previous_sentence is not None:
return self._previous_sentence
if self._position_in_dataset is not None:
dataset = self._position_in_dataset[0]
index = self._position_in_dataset[1] - 1
if index >= 0:
return dataset[index]
return None
def is_context_set(self) -> bool:
"""Determines if this sentence has a context of sentences before or after set.
Return True or False depending on whether context is set (for instance in dataloader or elsewhere)
:return: True if context is set, else False
"""
return (
self._has_context
or self._previous_sentence is not None
or self._next_sentence is not None
or self._position_in_dataset is not None
)
def copy_context_from_sentence(self, sentence: "Sentence") -> None:
self._previous_sentence = sentence._previous_sentence
self._next_sentence = sentence._next_sentence
self._position_in_dataset = sentence._position_in_dataset
@classmethod
def set_context_for_sentences(cls, sentences: List["Sentence"]) -> None:
previous_sentence = None
for sentence in sentences:
if sentence.is_context_set():
continue
sentence._previous_sentence = previous_sentence
sentence._next_sentence = None
sentence._has_context = True
if previous_sentence is not None:
previous_sentence._next_sentence = sentence
previous_sentence = sentence
def get_labels(self, label_type: Optional[str] = None):
# if no label if specified, return all labels
if label_type is None:
return sorted(self.labels)
# if the label type exists in the Sentence, return it
if label_type in self.annotation_layers:
return sorted(self.annotation_layers[label_type])
# return empty list if none of the above
return []
def remove_labels(self, typename: str):
# labels also need to be deleted at all tokens
for token in self:
token.remove_labels(typename)
# labels also need to be deleted at all known spans
for span in self._known_spans.values():
span.remove_labels(typename)
# remove spans without labels
self._known_spans = {k: v for k, v in self._known_spans.items() if len(v.labels) > 0}
# delete labels at object itself
super().remove_labels(typename)
class DataPair(DataPoint, typing.Generic[DT, DT2]):
def __init__(self, first: DT, second: DT2) -> None:
super().__init__()
self.first = first
self.second = second
def to(self, device: str, pin_memory: bool = False):
self.first.to(device, pin_memory)
self.second.to(device, pin_memory)
def clear_embeddings(self, embedding_names: Optional[List[str]] = None):
self.first.clear_embeddings(embedding_names)
self.second.clear_embeddings(embedding_names)
@property
def embedding(self):
return torch.cat([self.first.embedding, self.second.embedding])
def __len__(self) -> int:
return len(self.first) + len(self.second)
@property
def unlabeled_identifier(self):
return f"DataPair: '{self.first.unlabeled_identifier}' + '{self.second.unlabeled_identifier}'"
@property
def start_position(self) -> int:
return self.first.start_position
@property
def end_position(self) -> int:
return self.first.end_position
@property
def text(self):
return self.first.text + " || " + self.second.text
TextPair = DataPair[Sentence, Sentence]
class Image(DataPoint):
def __init__(self, data=None, imageURL=None) -> None:
super().__init__()
self.data = data
self._embeddings: Dict = {}
self.imageURL = imageURL
@property
def embedding(self):
return self.get_embedding()
def __str__(self) -> str:
image_repr = self.data.size() if self.data else ""
image_url = self.imageURL if self.imageURL else ""
return f"Image: {image_repr} {image_url}"
@property
def start_position(self) -> int:
raise NotImplementedError
@property
def end_position(self) -> int:
raise NotImplementedError
@property
def text(self) -> str:
raise NotImplementedError
@property
def unlabeled_identifier(self) -> str:
raise NotImplementedError
class Corpus(typing.Generic[T_co]):
def __init__(
self,
train: Optional[Dataset[T_co]] = None,
dev: Optional[Dataset[T_co]] = None,
test: Optional[Dataset[T_co]] = None,
name: str = "corpus",
sample_missing_splits: Union[bool, str] = True,
) -> None:
# set name
self.name: str = name
# abort if no data is provided
if not train and not dev and not test:
raise RuntimeError("No data provided when initializing corpus object.")
# sample test data from train if none is provided
if test is None and sample_missing_splits and train and sample_missing_splits != "only_dev":
train_length = _len_dataset(train)
test_size: int = round(train_length / 10)
test, train = randomly_split_into_two_datasets(train, test_size)
# sample dev data from train if none is provided
if dev is None and sample_missing_splits and train and sample_missing_splits != "only_test":
train_length = _len_dataset(train)
dev_size: int = round(train_length / 10)
dev, train = randomly_split_into_two_datasets(train, dev_size)
# set train dev and test data
self._train: Optional[Dataset[T_co]] = train
self._test: Optional[Dataset[T_co]] = test
self._dev: Optional[Dataset[T_co]] = dev
@property
def train(self) -> Optional[Dataset[T_co]]:
return self._train
@property
def dev(self) -> Optional[Dataset[T_co]]:
return self._dev
@property
def test(self) -> Optional[Dataset[T_co]]:
return self._test
def downsample(
self,
percentage: float = 0.1,
downsample_train=True,
downsample_dev=True,
downsample_test=True,
):
if downsample_train and self._train is not None:
self._train = self._downsample_to_proportion(self._train, percentage)
if downsample_dev and self._dev is not None:
self._dev = self._downsample_to_proportion(self._dev, percentage)
if downsample_test and self._test is not None:
self._test = self._downsample_to_proportion(self._test, percentage)
return self
def filter_empty_sentences(self):
log.info("Filtering empty sentences")
if self._train is not None:
self._train = Corpus._filter_empty_sentences(self._train)
if self._test is not None:
self._test = Corpus._filter_empty_sentences(self._test)
if self._dev is not None:
self._dev = Corpus._filter_empty_sentences(self._dev)
log.info(self)
def filter_long_sentences(self, max_charlength: int):
log.info("Filtering long sentences")
if self._train is not None:
self._train = Corpus._filter_long_sentences(self._train, max_charlength)
if self._test is not None:
self._test = Corpus._filter_long_sentences(self._test, max_charlength)
if self._dev is not None:
self._dev = Corpus._filter_long_sentences(self._dev, max_charlength)
log.info(self)
@staticmethod
def _filter_long_sentences(dataset, max_charlength: int) -> Dataset:
# find out empty sentence indices
empty_sentence_indices = []
non_empty_sentence_indices = []
for index, sentence in Tqdm.tqdm(enumerate(_iter_dataset(dataset))):
if len(sentence.to_plain_string()) > max_charlength:
empty_sentence_indices.append(index)
else:
non_empty_sentence_indices.append(index)
# create subset of non-empty sentence indices
subset = Subset(dataset, non_empty_sentence_indices)
return subset
@staticmethod
def _filter_empty_sentences(dataset) -> Dataset:
# find out empty sentence indices
empty_sentence_indices = []
non_empty_sentence_indices = []
for index, sentence in enumerate(_iter_dataset(dataset)):
if len(sentence) == 0:
empty_sentence_indices.append(index)
else:
non_empty_sentence_indices.append(index)
# create subset of non-empty sentence indices
subset = Subset(dataset, non_empty_sentence_indices)
return subset
def make_vocab_dictionary(self, max_tokens=-1, min_freq=1) -> Dictionary:
"""Creates a dictionary of all tokens contained in the corpus.
By defining `max_tokens` you can set the maximum number of tokens that should be contained in the dictionary.
If there are more than `max_tokens` tokens in the corpus, the most frequent tokens are added first.
If `min_freq` is set the a value greater than 1 only tokens occurring more than `min_freq` times are considered
to be added to the dictionary.
:param max_tokens: the maximum number of tokens that should be added to the dictionary (-1 = take all tokens)
:param min_freq: a token needs to occur at least `min_freq` times to be added to the dictionary (-1 = there is no limitation)
:return: dictionary of tokens
"""
tokens = self._get_most_common_tokens(max_tokens, min_freq)
vocab_dictionary: Dictionary = Dictionary()
for token in tokens:
vocab_dictionary.add_item(token)
return vocab_dictionary
def _get_most_common_tokens(self, max_tokens, min_freq) -> List[str]:
tokens_and_frequencies = Counter(self._get_all_tokens())
tokens: List[str] = []
for token, freq in tokens_and_frequencies.most_common():
if (min_freq != -1 and freq < min_freq) or (max_tokens != -1 and len(tokens) == max_tokens):
break
tokens.append(token)
return tokens
def _get_all_tokens(self) -> List[str]:
assert self.train
tokens = [s.tokens for s in _iter_dataset(self.train)]
tokens = [token for sublist in tokens for token in sublist]
return [t.text for t in tokens]
@staticmethod
def _downsample_to_proportion(dataset: Dataset, proportion: float):
sampled_size: int = round(_len_dataset(dataset) * proportion)
splits = randomly_split_into_two_datasets(dataset, sampled_size)
return splits[0]
def obtain_statistics(self, label_type: Optional[str] = None, pretty_print: bool = True) -> Union[dict, str]:
"""Print statistics about the class distribution and sentence sizes.
only labels of sentences are taken into account
"""
json_data = {
"TRAIN": self._obtain_statistics_for(self.train, "TRAIN", label_type),
"TEST": self._obtain_statistics_for(self.test, "TEST", label_type),
"DEV": self._obtain_statistics_for(self.dev, "DEV", label_type),
}
if pretty_print:
import json
return json.dumps(json_data, indent=4)
return json_data
@staticmethod
def _obtain_statistics_for(sentences, name, tag_type) -> dict:
if len(sentences) == 0:
return {}
classes_to_count = Corpus._count_sentence_labels(sentences)
tags_to_count = Corpus._count_token_labels(sentences, tag_type)
tokens_per_sentence = Corpus._get_tokens_per_sentence(sentences)
label_size_dict = {}
for label, c in classes_to_count.items():
label_size_dict[label] = c
tag_size_dict = {}
for tag, c in tags_to_count.items():
tag_size_dict[tag] = c
return {
"dataset": name,
"total_number_of_documents": len(sentences),
"number_of_documents_per_class": label_size_dict,
"number_of_tokens_per_tag": tag_size_dict,
"number_of_tokens": {
"total": sum(tokens_per_sentence),
"min": min(tokens_per_sentence),
"max": max(tokens_per_sentence),
"avg": sum(tokens_per_sentence) / len(sentences),
},
}
@staticmethod
def _get_tokens_per_sentence(sentences):
return [len(x.tokens) for x in sentences]
@staticmethod
def _count_sentence_labels(sentences):
label_count = defaultdict(lambda: 0)
for sent in sentences:
for label in sent.labels:
label_count[label.value] += 1
return label_count
@staticmethod
def _count_token_labels(sentences, label_type):
label_count = defaultdict(lambda: 0)
for sent in sentences:
for token in sent.tokens:
if label_type in token.annotation_layers:
label = token.get_label(label_type)
label_count[label.value] += 1
return label_count
def __str__(self) -> str:
return "Corpus: %d train + %d dev + %d test sentences" % (
_len_dataset(self.train) if self.train else 0,
_len_dataset(self.dev) if self.dev else 0,
_len_dataset(self.test) if self.test else 0,
)
def make_label_dictionary(
self, label_type: str, min_count: int = -1, add_unk: bool = False, add_dev_test: bool = False
) -> Dictionary:
"""Creates a dictionary of all labels assigned to the sentences in the corpus.
:return: dictionary of labels
"""
if min_count > 0 and not add_unk:
add_unk = True
log.info("Adding <unk>-token to dictionary since min_count is set.")
label_dictionary: Dictionary = Dictionary(add_unk=add_unk)
label_dictionary.span_labels = False
assert self.train
datasets = [self.train]
if add_dev_test and self.dev is not None:
datasets.append(self.dev)
if add_dev_test and self.test is not None:
datasets.append(self.test)
data: ConcatDataset = ConcatDataset(datasets)
log.info("Computing label dictionary. Progress:")
sentence_label_type_counter: typing.Counter[str] = Counter()
label_value_counter: typing.Counter[str] = Counter()
all_sentence_labels: List[str] = []
# first, determine the datapoint type by going through dataset until first label is found
datapoint_type = None
for sentence in Tqdm.tqdm(_iter_dataset(data)):
labels = sentence.get_labels(label_type)
for label in labels:
datapoint_type = type(label.data_point)
if datapoint_type:
break
if datapoint_type == Span:
label_dictionary.span_labels = True
for sentence in Tqdm.tqdm(_iter_dataset(data)):
# count all label types per sentence
sentence_label_type_counter.update(sentence.annotation_layers.keys())
# go through all labels of label_type and count values
labels = sentence.get_labels(label_type)
label_value_counter.update(label.value for label in labels if label.value not in all_sentence_labels)
# special handling for Token-level annotations. Add all untagged as 'O' label
if datapoint_type == Token and len(sentence) > len(labels):
label_value_counter["O"] += len(sentence) - len(labels)
if not label_dictionary.multi_label and len(labels) > 1:
label_dictionary.multi_label = True
# if an unk threshold is set, UNK all label values below this threshold
total_count = 0
unked_count = 0
for label, count in label_value_counter.most_common():
if count >= min_count:
label_dictionary.add_item(label)
total_count += count
else:
unked_count += count
if len(label_dictionary.idx2item) == 0 or (
len(label_dictionary.idx2item) == 1 and "<unk>" in label_dictionary.get_items()
):
log.error(f"ERROR: You specified label_type='{label_type}' which is not in this dataset!")
contained_labels = ", ".join(
[f"'{label[0]}' (in {label[1]} sentences)" for label in sentence_label_type_counter.most_common()]
)
log.error(f"ERROR: The corpus contains the following label types: {contained_labels}")
raise Exception
log.info(
f"Dictionary created for label '{label_type}' with {len(label_dictionary)} "
f"values: {', '.join([label[0] + f' (seen {label[1]} times)' for label in label_value_counter.most_common(20)])}"
)
if unked_count > 0:
log.info(f" - at UNK threshold {min_count}, {unked_count} instances are UNK'ed and {total_count} remain")
return label_dictionary
def add_label_noise(
self,
label_type: str,
labels: List[str],
noise_share: float = 0.2,
split: str = "train",
noise_transition_matrix: Optional[Dict[str, List[float]]] = None,
):
"""Generates uniform label noise distribution in the chosen dataset split.
:label_type: the type of labels for which the noise should be simulated.
:labels: an array with unique labels of said type (retrievable from label dictionary).
:noise_share: the desired share of noise in the train split.
:split: in which dataset split the noise is to be simulated.
:noise_transition_matrix: provides pre-defined probabilities for label flipping based on the
initial label value (relevant for class-dependent label noise simulation).
"""
import numpy as np
if split == "train":
assert self.train
datasets = [self.train]
elif split == "dev":
assert self.dev
datasets = [self.dev]
elif split == "test":
assert self.test
datasets = [self.test]
else:
raise ValueError("split must be either train, dev or test.")
data: ConcatDataset = ConcatDataset(datasets)
corrupted_count = 0
total_label_count = 0
if noise_transition_matrix:
ntm_labels = noise_transition_matrix.keys()
if set(ntm_labels) != set(labels):
raise AssertionError(
"Label values in the noise transition matrix have to coincide with label values in the dataset"
)
log.info("Generating noisy labels. Progress:")
for data_point in Tqdm.tqdm(_iter_dataset(data)):
for label in data_point.get_labels(label_type):
total_label_count += 1
orig_label = label.value
# sample randomly from a label distribution according to the probabilities defined by the noise transition matrix
new_label = np.random.default_rng().choice(
a=list(ntm_labels),
p=noise_transition_matrix[orig_label],
)
# replace the old label with the new one
label.data_point.set_label(label_type, new_label)
# keep track of the old (clean) label using another label type category
label.data_point.add_label(label_type + "_clean", orig_label)
# keep track of how many labels in total are flipped
if new_label != orig_label:
corrupted_count += 1
else:
if noise_share < 0 or noise_share > 1:
raise ValueError("noise_share must be between 0 and 1.")
orig_label_p = 1 - noise_share
other_label_p = noise_share / (len(labels) - 1)
log.info("Generating noisy labels. Progress:")
for data_point in Tqdm.tqdm(_iter_dataset(data)):
for label in data_point.get_labels(label_type):
total_label_count += 1
orig_label = label.value
prob_dist = [other_label_p] * len(labels)
prob_dist[labels.index(orig_label)] = orig_label_p
# sample randomly from a label distribution according to the probabilities defined by the desired noise share
new_label = np.random.default_rng().choice(a=labels, p=prob_dist)
# replace the old label with the new one
label.data_point.set_label(label_type, new_label)
# keep track of the old (clean) label using another label type category
label.data_point.add_label(label_type + "_clean", orig_label)
# keep track of how many labels in total are flipped
if new_label != orig_label:
corrupted_count += 1
log.info(
f"Total labels corrupted: {corrupted_count}. Resulting noise share: {round((corrupted_count / total_label_count) * 100, 2)}%."
)
def get_label_distribution(self):
class_to_count = defaultdict(lambda: 0)
for sent in self.train:
for label in sent.labels:
class_to_count[label.value] += 1
return class_to_count
def get_all_sentences(self) -> ConcatDataset:
parts = []
if self.train:
parts.append(self.train)
if self.dev:
parts.append(self.dev)
if self.test:
parts.append(self.test)
return ConcatDataset(parts)
@deprecated(version="0.8", reason="Use 'make_label_dictionary' instead.")
def make_tag_dictionary(self, tag_type: str) -> Dictionary:
# Make the tag dictionary
tag_dictionary: Dictionary = Dictionary(add_unk=False)
tag_dictionary.add_item("O")
for sentence in _iter_dataset(self.get_all_sentences()):
for token in sentence.tokens:
tag_dictionary.add_item(token.get_label(tag_type).value)
tag_dictionary.add_item("<START>")
tag_dictionary.add_item("<STOP>")
return tag_dictionary
class MultiCorpus(Corpus):
def __init__(
self,
corpora: List[Corpus],
task_ids: Optional[List[str]] = None,
name: str = "multicorpus",
**corpusargs,
) -> None:
self.corpora: List[Corpus] = corpora
ids = task_ids if task_ids else [f"Task_{i}" for i in range(len(corpora))]
train_parts = []
dev_parts = []
test_parts = []
for corpus in self.corpora:
if corpus.train:
train_parts.append(corpus.train)
if corpus.dev:
dev_parts.append(corpus.dev)
if corpus.test:
test_parts.append(corpus.test)
super().__init__(
ConcatFlairDataset(train_parts, ids) if len(train_parts) > 0 else None,
ConcatFlairDataset(dev_parts, ids) if len(dev_parts) > 0 else None,
ConcatFlairDataset(test_parts, ids) if len(test_parts) > 0 else None,
name=name,
**corpusargs,
)
def __str__(self) -> str:
output = (
f"MultiCorpus: " # type: ignore[arg-type]
f"{len(self.train) if self.train else 0} train + "
f"{len(self.dev) if self.dev else 0} dev + "
f"{len(self.test) if self.test else 0} test sentences\n - "
)
output += "\n - ".join([f"{type(corpus).__name__} {corpus!s} - {corpus.name}" for corpus in self.corpora])
return output
class FlairDataset(Dataset):
@abstractmethod
def is_in_memory(self) -> bool:
pass
class ConcatFlairDataset(Dataset):
r"""Dataset as a concatenation of multiple datasets.
This class is useful to assemble different existing datasets.
Args:
----
datasets (sequence): List of datasets to be concatenated
"""
datasets: List[Dataset]
cumulative_sizes: List[int]
@staticmethod
def cumsum(sequence):
r, s = [], 0
for e in sequence:
length_of_e = len(e)
r.append(length_of_e + s)
s += length_of_e
return r
def __init__(self, datasets: Iterable[Dataset], ids: Iterable[str]) -> None:
super().__init__()
self.datasets = list(datasets)
self.ids = list(ids)
assert len(self.datasets) > 0, "datasets should not be an empty iterable"
for d in self.datasets:
assert not isinstance(d, IterableDataset), "ConcatSentenceDataset does not support IterableDataset"
self.cumulative_sizes = self.cumsum(self.datasets)
def __len__(self) -> int:
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
sample_idx = idx if dataset_idx == 0 else idx - self.cumulative_sizes[dataset_idx - 1]
sentence = self.datasets[dataset_idx][sample_idx]
sentence.set_label("multitask_id", self.ids[dataset_idx])
return sentence
@property
def cummulative_sizes(self):
return self.cumulative_sizes
def iob2(tags):
"""Converts the tags to the IOB2 format.
Check that tags have a valid IOB format.
Tags in IOB1 format are converted to IOB2.
"""
for i, tag in enumerate(tags):
if tag.value == "O":
continue
split = tag.value.split("-")
if len(split) != 2 or split[0] not in ["I", "B"]:
return False
if split[0] == "B":
continue
elif i == 0 or tags[i - 1].value == "O": # conversion IOB1 to IOB2
tags[i].value = "B" + tag.value[1:]
elif tags[i - 1].value[1:] == tag.value[1:]:
continue
else: # conversion IOB1 to IOB2
tags[i].value = "B" + tag.value[1:]
return True
def randomly_split_into_two_datasets(dataset, length_of_first):
import random
indices = list(range(len(dataset)))
random.shuffle(indices)
first_dataset = indices[:length_of_first]
second_dataset = indices[length_of_first:]
first_dataset.sort()
second_dataset.sort()
return Subset(dataset, first_dataset), Subset(dataset, second_dataset)
def get_spans_from_bio(bioes_tags: List[str], bioes_scores=None) -> List[typing.Tuple[List[int], float, str]]:
# add a dummy "O" to close final prediction
bioes_tags.append("O")
# return complex list
found_spans = []
# internal variables
current_tag_weights: Dict[str, float] = {}
previous_tag = "O-"
current_span: List[int] = []
current_span_scores: List[float] = []
for idx, bioes_tag in enumerate(bioes_tags):
# non-set tags are OUT tags
if bioes_tag == "" or bioes_tag == "O" or bioes_tag == "_":
bioes_tag = "O-"
# anything that is not OUT is IN
in_span = bioes_tag != "O-"
# does this prediction start a new span?
starts_new_span = False
if bioes_tag[:2] in {"B-", "S-"} or (
in_span and previous_tag[2:] != bioes_tag[2:] and (bioes_tag[:2] == "I-" or previous_tag[2:] == "S-")
):
# B- and S- always start new spans
# if the predicted class changes, I- starts a new span
# if the predicted class changes and S- was previous tag, start a new span
starts_new_span = True
# if an existing span is ended (either by reaching O or starting a new span)
if (starts_new_span or not in_span) and len(current_span) > 0:
# determine score and value
span_score = sum(current_span_scores) / len(current_span_scores)
span_value = max(current_tag_weights.keys(), key=current_tag_weights.__getitem__)
# append to result list
found_spans.append((current_span, span_score, span_value))
# reset for-loop variables for new span
current_span = []
current_span_scores = []
current_tag_weights = {}
if in_span:
current_span.append(idx)
current_span_scores.append(bioes_scores[idx] if bioes_scores else 1.0)
weight = 1.1 if starts_new_span else 1.0
current_tag_weights[bioes_tag[2:]] = current_tag_weights.setdefault(bioes_tag[2:], 0.0) + weight
# remember previous tag
previous_tag = bioes_tag
return found_spans
| 65,248 | 34.694201 | 138 | py |
flair | flair-master/flair/training_utils.py | import logging
import random
import sys
from collections import defaultdict
from enum import Enum
from functools import reduce
from math import inf
from pathlib import Path
from typing import Dict, List, Optional, Union
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import mean_absolute_error, mean_squared_error
from torch.optim import Optimizer
from torch.utils.data import Dataset
import flair
from flair.data import DT, Dictionary, Sentence, _iter_dataset
log = logging.getLogger("flair")
class Result:
def __init__(
self,
main_score: float,
detailed_results: str,
classification_report: dict = {},
scores: dict = {},
) -> None:
assert "loss" in scores, "No loss provided."
self.main_score: float = main_score
self.scores = scores
self.detailed_results: str = detailed_results
self.classification_report = classification_report
@property
def loss(self):
return self.scores["loss"]
def __str__(self) -> str:
return f"{self.detailed_results!s}\nLoss: {self.loss}'"
class MetricRegression:
def __init__(self, name) -> None:
self.name = name
self.true: List[float] = []
self.pred: List[float] = []
def mean_squared_error(self):
return mean_squared_error(self.true, self.pred)
def mean_absolute_error(self):
return mean_absolute_error(self.true, self.pred)
def pearsonr(self):
return pearsonr(self.true, self.pred)[0]
def spearmanr(self):
return spearmanr(self.true, self.pred)[0]
# dummy return to fulfill trainer.train() needs
def micro_avg_f_score(self):
return self.mean_squared_error()
def to_tsv(self):
return "{}\t{}\t{}\t{}".format(
self.mean_squared_error(),
self.mean_absolute_error(),
self.pearsonr(),
self.spearmanr(),
)
@staticmethod
def tsv_header(prefix=None):
if prefix:
return "{0}_MEAN_SQUARED_ERROR\t{0}_MEAN_ABSOLUTE_ERROR\t{0}_PEARSON\t{0}_SPEARMAN".format(prefix)
return "MEAN_SQUARED_ERROR\tMEAN_ABSOLUTE_ERROR\tPEARSON\tSPEARMAN"
@staticmethod
def to_empty_tsv():
return "\t_\t_\t_\t_"
def __str__(self) -> str:
line = "mean squared error: {:.4f} - mean absolute error: {:.4f} - pearson: {:.4f} - spearman: {:.4f}".format(
self.mean_squared_error(),
self.mean_absolute_error(),
self.pearsonr(),
self.spearmanr(),
)
return line
class EvaluationMetric(Enum):
MICRO_ACCURACY = "micro-average accuracy"
MICRO_F1_SCORE = "micro-average f1-score"
MACRO_ACCURACY = "macro-average accuracy"
MACRO_F1_SCORE = "macro-average f1-score"
MEAN_SQUARED_ERROR = "mean squared error"
class WeightExtractor:
def __init__(self, directory: Union[str, Path], number_of_weights: int = 10) -> None:
if type(directory) is str:
directory = Path(directory)
self.weights_file = init_output_file(directory, "weights.txt")
self.weights_dict: Dict[str, Dict[int, List[float]]] = defaultdict(lambda: defaultdict(list))
self.number_of_weights = number_of_weights
def extract_weights(self, state_dict, iteration):
for key in state_dict:
vec = state_dict[key]
# print(vec)
try:
weights_to_watch = min(self.number_of_weights, reduce(lambda x, y: x * y, list(vec.size())))
except Exception:
continue
if key not in self.weights_dict:
self._init_weights_index(key, state_dict, weights_to_watch)
for i in range(weights_to_watch):
vec = state_dict[key]
for index in self.weights_dict[key][i]:
vec = vec[index]
value = vec.item()
with open(self.weights_file, "a") as f:
f.write(f"{iteration}\t{key}\t{i}\t{float(value)}\n")
def _init_weights_index(self, key, state_dict, weights_to_watch):
indices = {}
i = 0
while len(indices) < weights_to_watch:
vec = state_dict[key]
cur_indices = []
for _x in range(len(vec.size())):
index = random.randint(0, len(vec) - 1)
vec = vec[index]
cur_indices.append(index)
if cur_indices not in list(indices.values()):
indices[i] = cur_indices
i += 1
self.weights_dict[key] = indices
class AnnealOnPlateau:
"""A learningrate sheduler for annealing on plateau.
This class is a modification of
torch.optim.lr_scheduler.ReduceLROnPlateau that enables
setting an "auxiliary metric" to break ties.
Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This scheduler reads a metrics
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Args:
----
optimizer (Optimizer): Wrapped optimizer.
mode (str): One of `min`, `max`. In `min` mode, lr will
be reduced when the quantity monitored has stopped
decreasing; in `max` mode it will be reduced when the
quantity monitored has stopped increasing. Default: 'min'.
factor (float): Factor by which the learning rate will be
reduced. new_lr = lr * factor. Default: 0.1.
patience (int): Number of epochs with no improvement after
which learning rate will be reduced. For example, if
`patience = 2`, then we will ignore the first 2 epochs
with no improvement, and will only decrease the LR after the
3rd epoch if the loss still hasn't improved then.
Default: 10.
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
cooldown (int): Number of epochs to wait before resuming
normal operation after lr has been reduced. Default: 0.
min_lr (float or list): A scalar or a list of scalars. A
lower bound on the learning rate of all param groups
or each group respectively. Default: 0.
eps (float): Minimal decay applied to lr. If the difference
between new and old lr is smaller than eps, the update is
ignored. Default: 1e-8.
Example:
-------
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = ReduceLROnPlateau(optimizer, 'min')
>>> for epoch in range(10):
>>> train(...)
>>> val_loss = validate(...)
>>> # Note that step should be called after validate()
>>> scheduler.step(val_loss)
"""
def __init__(
self,
optimizer,
mode="min",
aux_mode="min",
factor=0.1,
patience=10,
initial_extra_patience=0,
verbose=False,
cooldown=0,
min_lr=0,
eps=1e-8,
) -> None:
if factor >= 1.0:
raise ValueError("Factor should be < 1.0.")
self.factor = factor
# Attach optimizer
if not isinstance(optimizer, Optimizer):
raise TypeError(f"{type(optimizer).__name__} is not an Optimizer")
self.optimizer = optimizer
if isinstance(min_lr, (list, tuple)):
if len(min_lr) != len(optimizer.param_groups):
raise ValueError(f"expected {len(optimizer.param_groups)} min_lrs, got {len(min_lr)}")
self.min_lrs = list(min_lr)
else:
self.min_lrs = [min_lr] * len(optimizer.param_groups)
self.default_patience = patience
self.effective_patience = patience + initial_extra_patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0
self.mode = mode
self.aux_mode = aux_mode
self.best = None
self.best_aux = None
self.num_bad_epochs = None
self.mode_worse = None # the worse value for the chosen mode
self.eps = eps
self.last_epoch = 0
self._init_is_better(mode=mode)
self._reset()
def _reset(self):
"""Resets num_bad_epochs counter and cooldown counter."""
self.best = self.mode_worse
self.cooldown_counter = 0
self.num_bad_epochs = 0
def step(self, metric, auxiliary_metric=None) -> bool:
# convert `metrics` to float, in case it's a zero-dim Tensor
current = float(metric)
epoch = self.last_epoch + 1
self.last_epoch = epoch
is_better = False
assert self.best is not None
if self.mode == "min" and current < self.best:
is_better = True
if self.mode == "max" and current > self.best:
is_better = True
if current == self.best and auxiliary_metric:
current_aux = float(auxiliary_metric)
if self.aux_mode == "min" and current_aux < self.best_aux:
is_better = True
if self.aux_mode == "max" and current_aux > self.best_aux:
is_better = True
if is_better:
self.best = current
if auxiliary_metric:
self.best_aux = auxiliary_metric
self.num_bad_epochs = 0
else:
self.num_bad_epochs += 1
if self.in_cooldown:
self.cooldown_counter -= 1
self.num_bad_epochs = 0 # ignore any bad epochs in cooldown
reduce_learning_rate = self.num_bad_epochs > self.effective_patience
if reduce_learning_rate:
self._reduce_lr(epoch)
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
self.effective_patience = self.default_patience
self._last_lr = [group["lr"] for group in self.optimizer.param_groups]
return reduce_learning_rate
def _reduce_lr(self, epoch):
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group["lr"])
new_lr = max(old_lr * self.factor, self.min_lrs[i])
if old_lr - new_lr > self.eps:
param_group["lr"] = new_lr
if self.verbose:
log.info(f" - reducing learning rate of group {epoch} to {new_lr}")
@property
def in_cooldown(self):
return self.cooldown_counter > 0
def _init_is_better(self, mode):
if mode not in {"min", "max"}:
raise ValueError("mode " + mode + " is unknown!")
if mode == "min":
self.mode_worse = inf
else: # mode == 'max':
self.mode_worse = -inf
self.mode = mode
def state_dict(self):
return {key: value for key, value in self.__dict__.items() if key != "optimizer"}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
self._init_is_better(mode=self.mode)
def init_output_file(base_path: Union[str, Path], file_name: str) -> Path:
"""Creates a local file which can be appended to.
:param base_path: the path to the directory
:param file_name: the file name
:return: the created file
"""
base_path = Path(base_path)
base_path.mkdir(parents=True, exist_ok=True)
file = base_path / file_name
file.touch(exist_ok=True)
return file
def convert_labels_to_one_hot(label_list: List[List[str]], label_dict: Dictionary) -> List[List[int]]:
"""Convert list of labels to a one hot list.
:param label_list: list of labels
:param label_dict: label dictionary
:return: converted label list
"""
return [[1 if label in labels else 0 for label in label_dict.get_items()] for labels in label_list]
def log_line(log):
if sys.version_info >= (3, 8):
log.info("-" * 100, stacklevel=3)
else:
log.info("-" * 100)
def add_file_handler(log, output_file):
init_output_file(output_file.parents[0], output_file.name)
fh = logging.FileHandler(output_file, mode="w", encoding="utf-8")
fh.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)-15s %(message)s")
fh.setFormatter(formatter)
log.addHandler(fh)
return fh
def store_embeddings(
data_points: Union[List[DT], Dataset], storage_mode: str, dynamic_embeddings: Optional[List[str]] = None
):
if isinstance(data_points, Dataset):
data_points = list(_iter_dataset(data_points))
# if memory mode option 'none' delete everything
if storage_mode == "none":
dynamic_embeddings = None
# if dynamic embedding keys not passed, identify them automatically
elif dynamic_embeddings is None:
dynamic_embeddings = identify_dynamic_embeddings(data_points)
# always delete dynamic embeddings
for data_point in data_points:
data_point.clear_embeddings(dynamic_embeddings)
# if storage mode is "cpu", send everything to CPU (pin to memory if we train on GPU)
if storage_mode == "cpu":
pin_memory = str(flair.device) != "cpu"
for data_point in data_points:
data_point.to("cpu", pin_memory=pin_memory)
def identify_dynamic_embeddings(data_points: List[DT]):
dynamic_embeddings = []
all_embeddings = []
for data_point in data_points:
if isinstance(data_point, Sentence):
first_token = data_point[0]
for name, vector in first_token._embeddings.items():
if vector.requires_grad:
dynamic_embeddings.append(name)
all_embeddings.append(name)
for name, vector in data_point._embeddings.items():
if vector.requires_grad:
dynamic_embeddings.append(name)
all_embeddings.append(name)
if dynamic_embeddings:
return dynamic_embeddings
if not all_embeddings:
return None
return list(set(dynamic_embeddings))
| 14,157 | 32.709524 | 118 | py |
flair | flair-master/flair/file_utils.py | """Utilities for working with the local dataset cache. Copied from AllenNLP."""
import base64
import functools
import io
import logging
import mmap
import os
import re
import shutil
import tempfile
import typing
import warnings
import zipfile
from pathlib import Path
from typing import Optional, Sequence, Tuple, Union, cast
from urllib.parse import urlparse
import boto3
import requests
import torch
from botocore import UNSIGNED
from botocore.config import Config
from tqdm import tqdm as _tqdm
import flair
logger = logging.getLogger("flair")
url_proxies: Optional[typing.Dict[str, str]] = None
def set_proxies(proxies: typing.Dict[str, str]) -> None:
"""Allows for data downloaded from urls to be forwarded to a proxy.
see https://requests.readthedocs.io/en/latest/user/advanced/#proxies
:param proxies: A dictionary of proxies according to the requests documentation.
:return: None
"""
global url_proxies
url_proxies = proxies
def load_big_file(f: str):
"""Workaround for loading a big pickle file.
Files over 2GB cause pickle errors on certain Mac and Windows distributions.
:param f:
:return:
"""
with open(f, "rb") as f_in:
# mmap seems to be much more memory efficient
bf = mmap.mmap(f_in.fileno(), 0, access=mmap.ACCESS_READ)
f_in.close()
return bf
def url_to_filename(url: str, etag: Optional[str] = None) -> str:
"""Converts an url into a filename in a reversible way.
If `etag` is specified, add it on the end, separated by a period
(which necessarily won't appear in the base64-encoded filename).
Get rid of the quotes in the etag, since Windows doesn't like them.
"""
url_bytes = url.encode("utf-8")
b64_bytes = base64.b64encode(url_bytes)
decoded = b64_bytes.decode("utf-8")
if etag:
# Remove quotes from etag
etag = etag.replace('"', "")
return f"{decoded}.{etag}"
else:
return decoded
def filename_to_url(filename: str) -> Tuple[str, Optional[str]]:
"""Recovers the the url from the encoded filename.
Returns it and the ETag (which may be ``None``)
"""
etag: Optional[str]
try:
# If there is an etag, it's everything after the first period
decoded, etag = filename.split(".", 1)
except ValueError:
# Otherwise, use None
decoded, etag = filename, None
filename_bytes = decoded.encode("utf-8")
url_bytes = base64.b64decode(filename_bytes)
return url_bytes.decode("utf-8"), etag
def cached_path(url_or_filename: str, cache_dir: Union[str, Path]) -> Path:
"""Download the given path and return the local path from the cache.
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
cache_dir = Path(cache_dir)
dataset_cache = flair.cache_root / cache_dir if flair.cache_root not in cache_dir.parents else cache_dir
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https"):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, dataset_cache)
elif parsed.scheme == "s3":
return download_s3_to_path(parsed.netloc, dataset_cache)
elif len(parsed.scheme) < 2 and Path(url_or_filename).exists():
# File, and it exists.
return Path(url_or_filename)
elif len(parsed.scheme) < 2:
# File, but it doesn't exist.
raise FileNotFoundError(f"file {url_or_filename} not found")
else:
# Something unknown
raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path")
def download_s3_to_path(bucket_name: str, cache_path: Path) -> Path:
out_path = cache_path / bucket_name
if out_path.exists():
return out_path
s3 = boto3.resource("s3", config=Config(signature_version=UNSIGNED))
bucket = s3.Bucket(bucket_name)
for obj in bucket.objects.iterator():
if obj.key[-1] == "/":
continue
target = out_path / obj.key
target.parent.mkdir(exist_ok=True, parents=True)
bucket.download_file(obj.key, str(target))
return out_path
def unzip_file(file: Union[str, Path], unzip_to: Union[str, Path]):
from zipfile import ZipFile
with ZipFile(Path(file), "r") as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall(Path(unzip_to))
def unpack_file(file: Path, unpack_to: Path, mode: Optional[str] = None, keep: bool = True):
"""Unpacks an archive file to the given location.
:param file Archive file to unpack
:param unpack_to Destination where to store the output
:param mode Type of the archive (zip, tar, gz, targz, rar)
:param keep Indicates whether to keep the archive after extraction or delete it
"""
if mode == "zip" or (mode is None and str(file).endswith("zip")):
from zipfile import ZipFile
with ZipFile(file, "r") as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall(unpack_to)
elif mode == "targz" or (mode is None and str(file).endswith("tar.gz") or str(file).endswith("tgz")):
import tarfile
with tarfile.open(file, "r:gz") as tarObj:
tarObj.extractall(unpack_to)
elif mode == "tar" or (mode is None and str(file).endswith("tar")):
import tarfile
with tarfile.open(file, "r") as tarObj:
tarObj.extractall(unpack_to)
elif mode == "gz" or (mode is None and str(file).endswith("gz")):
import gzip
with gzip.open(str(file), "rb") as f_in, open(str(unpack_to), "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
elif mode == "rar" or (mode is None and str(file).endswith("rar")):
import patoolib
patoolib.extract_archive(str(file), outdir=unpack_to, interactive=False)
else:
if mode is None:
raise AssertionError(f"Can't infer archive type from {file}")
else:
raise AssertionError(f"Unsupported mode {mode}")
if not keep:
os.remove(str(file))
# TODO(joelgrus): do we want to do checksums or anything like that?
def get_from_cache(url: str, cache_dir: Path) -> Path:
"""Given a URL, look for the corresponding file in the local cache or download it.
return: the path to the cached file.
"""
cache_dir.mkdir(parents=True, exist_ok=True)
filename = re.sub(r".+/", "", url)
# get cache path to put the file
cache_path = cache_dir / filename
if cache_path.exists():
return cache_path
# make HEAD request to check ETag
response = requests.head(url, headers={"User-Agent": "Flair"}, allow_redirects=True)
if response.status_code != 200:
raise OSError(f"HEAD request failed for url {url} with status code {response.status_code}.")
# add ETag to filename if it exists
# etag = response.headers.get("ETag")
if not cache_path.exists():
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
fd, temp_filename = tempfile.mkstemp()
logger.info("%s not found in cache, downloading to %s", url, temp_filename)
# GET file object
req = requests.get(url, stream=True, headers={"User-Agent": "Flair"}, proxies=url_proxies)
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = Tqdm.tqdm(unit="B", total=total, unit_scale=True, unit_divisor=1024)
with open(temp_filename, "wb") as temp_file:
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
logger.info("copying %s to cache at %s", temp_filename, cache_path)
shutil.copyfile(temp_filename, str(cache_path))
logger.info("removing temp file %s", temp_filename)
os.close(fd)
os.remove(temp_filename)
return cache_path
def open_inside_zip(
archive_path: str,
cache_dir: Union[str, Path],
member_path: Optional[str] = None,
encoding: str = "utf8",
) -> typing.Iterable:
cached_archive_path = cached_path(archive_path, cache_dir=Path(cache_dir))
with zipfile.ZipFile(cached_archive_path, "r") as archive:
if member_path is None:
members_list = archive.namelist()
member_path = get_the_only_file_in_the_archive(members_list, archive_path)
member_path = cast(str, member_path)
member_file = archive.open(member_path, "r")
return io.TextIOWrapper(member_file, encoding=encoding)
def extract_single_zip_file(
archive_path: str,
cache_dir: Union[str, Path],
member_path: Optional[str] = None,
) -> Path:
cache_dir = Path(cache_dir)
cached_archive_path = cached_path(archive_path, cache_dir=cache_dir)
dataset_cache = flair.cache_root / cache_dir if flair.cache_root not in cache_dir.parents else cache_dir
if member_path is not None:
output_path = dataset_cache / member_path
if output_path.exists():
return output_path
with zipfile.ZipFile(cached_archive_path, "r") as archive:
if member_path is None:
members_list = archive.namelist()
member_path = get_the_only_file_in_the_archive(members_list, archive_path)
output_path = dataset_cache / member_path
if not output_path.exists():
archive.extract(member_path, dataset_cache)
return output_path
def get_the_only_file_in_the_archive(members_list: Sequence[str], archive_path: str) -> str:
if len(members_list) > 1:
raise ValueError(
"The archive {} contains multiple files, so you must select "
"one of the files inside providing a uri of the type: {}".format(
archive_path,
format_embeddings_file_uri("path_or_url_to_archive", "path_inside_archive"),
)
)
return members_list[0]
def format_embeddings_file_uri(main_file_path_or_url: str, path_inside_archive: Optional[str] = None) -> str:
if path_inside_archive:
return f"({main_file_path_or_url})#{path_inside_archive}"
return main_file_path_or_url
class Tqdm:
# These defaults are the same as the argument defaults in tqdm.
default_mininterval: float = 0.1
@staticmethod
def set_default_mininterval(value: float) -> None:
Tqdm.default_mininterval = value
@staticmethod
def set_slower_interval(use_slower_interval: bool) -> None:
"""Slows down the tqdm update interval.
If ``use_slower_interval`` is ``True``, we will dramatically slow down ``tqdm's`` default
output rate. ``tqdm's`` default output rate is great for interactively watching progress,
but it is not great for log files. You might want to set this if you are primarily going
to be looking at output through log files, not the terminal.
"""
if use_slower_interval:
Tqdm.default_mininterval = 10.0
else:
Tqdm.default_mininterval = 0.1
@staticmethod
def tqdm(*args, **kwargs):
new_kwargs = {"mininterval": Tqdm.default_mininterval, **kwargs}
return _tqdm(*args, **new_kwargs)
def instance_lru_cache(*cache_args, **cache_kwargs):
def decorator(func):
@functools.wraps(func)
def create_cache(self, *args, **kwargs):
instance_cache = functools.lru_cache(*cache_args, **cache_kwargs)(func)
instance_cache = instance_cache.__get__(self, self.__class__)
setattr(self, func.__name__, instance_cache)
return instance_cache(*args, **kwargs)
return create_cache
return decorator
def load_torch_state(model_file: str) -> typing.Dict[str, typing.Any]:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# load_big_file is a workaround byhttps://github.com/highway11git
# to load models on some Mac/Windows setups
# see https://github.com/zalandoresearch/flair/issues/351
f = load_big_file(model_file)
return torch.load(f, map_location="cpu")
| 12,566 | 34.600567 | 109 | py |
flair | flair-master/flair/__init__.py | import logging.config
import os
from pathlib import Path
import torch
from transformers import set_seed as hf_set_seed
# global variable: cache_root
from .file_utils import set_proxies
cache_root = Path(os.getenv("FLAIR_CACHE_ROOT", Path(Path.home(), ".flair")))
device: torch.device
"""Flair is using a single device for everything. You can set this device by overwriting this variable."""
# global variable: device
if torch.cuda.is_available():
device_id = os.environ.get("FLAIR_DEVICE")
# No need for correctness checks, torch is doing it
device = torch.device(f"cuda:{device_id}") if device_id else torch.device("cuda:0")
else:
device = torch.device("cpu")
# global variable: version
__version__ = "0.12.2"
# global variable: arrow symbol
_arrow = " → "
from . import ( # noqa: E402 import after setting device
data,
models,
nn,
trainers,
visual,
)
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"formatters": {"standard": {"format": "%(asctime)-15s %(message)s"}},
"handlers": {
"console": {
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "standard",
"stream": "ext://sys.stdout",
}
},
"loggers": {"flair": {"handlers": ["console"], "level": "INFO", "propagate": False}},
}
)
logger = logging.getLogger("flair")
def set_seed(seed: int):
hf_set_seed(seed)
__all__ = [
"cache_root",
"device",
"__version__",
"logger",
"set_seed",
"data",
"models",
"nn",
"trainers",
"visual",
"datasets",
"set_proxies",
]
| 1,705 | 20.871795 | 106 | py |
flair | flair-master/flair/samplers.py | import logging
import random
from collections import defaultdict
from typing import Dict
import torch
from torch.utils.data.sampler import Sampler
log = logging.getLogger("flair")
class FlairSampler(Sampler):
def set_dataset(self, data_source):
"""Initialize the data source for the FlairSampler.
:param data_source: dataset to sample from.
"""
self.data_source = data_source
self.num_samples = len(self.data_source)
def __len__(self) -> int:
return self.num_samples
class ImbalancedClassificationDatasetSampler(FlairSampler):
"""Use this to upsample rare classes and downsample common classes in your unbalanced classification dataset."""
def __init__(self) -> None:
super().__init__(None)
def set_dataset(self, data_source):
"""Initialize the dataset used for sampling.
:param data_source:
"""
self.data_source = data_source
self.num_samples = len(self.data_source)
self.indices = list(range(len(data_source)))
# first determine the distribution of classes in the dataset
label_count: Dict[str, int] = defaultdict(int)
for sentence in data_source:
for label in sentence.labels:
label_count[label.value] += 1
# weight for each sample
offset = 0
weights = [1.0 / (offset + label_count[data_source[idx].labels[0].value]) for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(self.weights, self.num_samples, replacement=True))
class ChunkSampler(FlairSampler):
"""Splits data into blocks and randomizes them before sampling.
This causes some order of the data to be preserved, while still shuffling the data.
"""
def __init__(self, block_size=5, plus_window=5) -> None:
super().__init__(None)
self.block_size = block_size
self.plus_window = plus_window
self.data_source = None
def __iter__(self):
data = list(range(len(self.data_source)))
blocksize = self.block_size + random.randint(0, self.plus_window)
log.info(f"Chunk sampling with blocksize = {blocksize} ({self.block_size} + {self.plus_window})")
# Create blocks
blocks = [data[i : i + blocksize] for i in range(0, len(data), blocksize)]
# shuffle the blocks
random.shuffle(blocks)
# concatenate the shuffled blocks
data[:] = [b for bs in blocks for b in bs]
return iter(data)
class ExpandingChunkSampler(FlairSampler):
"""Splits data into blocks and randomizes them before sampling.
Block size grows with each epoch.
This causes some order of the data to be preserved, while still shuffling the data.
"""
def __init__(self, step=3) -> None:
"""Initialize the ExpandingChunkSampler.
:param step: every *step* epochs the block size increments by one.
"""
super().__init__(None)
self.block_size = 1
self.epoch_count = 0
self.step = step
def __iter__(self):
self.epoch_count += 1
data = list(range(len(self.data_source)))
log.info(f"Chunk sampling with blocksize = {self.block_size}")
# Create blocks
blocks = [data[i : i + self.block_size] for i in range(0, len(data), self.block_size)]
# shuffle the blocks
random.shuffle(blocks)
# concatenate the shuffled blocks
data[:] = [b for bs in blocks for b in bs]
if self.epoch_count % self.step == 0:
self.block_size += 1
return iter(data)
| 3,688 | 30 | 116 | py |
flair | flair-master/flair/nn/model.py | import inspect
import itertools
import logging
import typing
from abc import ABC, abstractmethod
from collections import Counter
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import torch.nn
from torch.nn.modules.loss import _Loss
from torch.utils.data.dataset import Dataset
from tqdm import tqdm
import flair
from flair.data import DT, DT2, Corpus, Dictionary, Sentence, _iter_dataset
from flair.datasets import DataLoader, FlairDatapointDataset
from flair.embeddings import Embeddings
from flair.embeddings.base import load_embeddings
from flair.file_utils import Tqdm, load_torch_state
from flair.training_utils import Result, store_embeddings
log = logging.getLogger("flair")
class Model(torch.nn.Module, typing.Generic[DT], ABC):
"""Abstract base class for all downstream task models in Flair, such as SequenceTagger and TextClassifier.
Every new type of model must implement these methods.
"""
model_card: Optional[Dict[str, Any]] = None
@property
@abstractmethod
def label_type(self):
"""Each model predicts labels of a certain type."""
raise NotImplementedError
@abstractmethod
def forward_loss(self, data_points: List[DT]) -> Tuple[torch.Tensor, int]:
"""Performs a forward pass and returns a loss tensor for backpropagation.
Implement this to enable training.
"""
raise NotImplementedError
@abstractmethod
def evaluate(
self,
data_points: Union[List[DT], Dataset],
gold_label_type: str,
out_path: Optional[Union[str, Path]] = None,
embedding_storage_mode: str = "none",
mini_batch_size: int = 32,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
exclude_labels: List[str] = [],
gold_label_dictionary: Optional[Dictionary] = None,
return_loss: bool = True,
**kwargs,
) -> Result:
"""Evaluates the model. Returns a Result object containing evaluation results and a loss value.
Implement this to enable evaluation.
:param data_loader: DataLoader that iterates over dataset to be evaluated
:param out_path: Optional output path to store predictions
:param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU # noqa: E501
:return: Returns a Tuple consisting of a Result object and a loss float value
"""
raise NotImplementedError
def _get_state_dict(self):
"""Returns the state dictionary for this model."""
state_dict = {"state_dict": self.state_dict()}
# Always include the name of the Model class for which the state dict holds
state_dict["__cls__"] = self.__class__.__name__
return state_dict
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
"""Initialize the model from a state dictionary."""
if "embeddings" in kwargs:
embeddings = kwargs.pop("embeddings")
if isinstance(embeddings, dict):
embeddings = load_embeddings(embeddings)
kwargs["embeddings"] = embeddings
model = cls(**kwargs)
model.load_state_dict(state["state_dict"])
return model
@staticmethod
def _fetch_model(model_name) -> str:
return model_name
def save(self, model_file: Union[str, Path], checkpoint: bool = False):
"""Saves the current model to the provided file.
:param model_file: the model file
"""
model_state = self._get_state_dict()
# write out a "model card" if one is set
if self.model_card is not None:
model_state["model_card"] = self.model_card
# save model
torch.save(model_state, str(model_file), pickle_protocol=4)
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "Model":
"""Loads the model from the given file.
:param model_path: the model file or the already loaded state dict
:return: the loaded text classifier model
"""
# if this class is abstract, go through all inheriting classes and try to fetch and load the model
if inspect.isabstract(cls):
# get all non-abstract subclasses
subclasses = get_non_abstract_subclasses(cls)
# try to fetch the model for each subclass. if fetching is possible, load model and return it
for model_cls in subclasses:
try:
new_model_path = model_cls._fetch_model(model_path)
if new_model_path != model_path:
return model_cls.load(new_model_path)
except Exception:
# skip any invalid loadings, e.g. not found on huggingface hub
continue
# if the model cannot be fetched, load as a file
state = model_path if isinstance(model_path, dict) else load_torch_state(str(model_path))
# try to get model class from state
cls_name = state.pop("__cls__", None)
if cls_name:
for model_cls in subclasses:
if cls_name == model_cls.__name__:
return model_cls.load(state)
# older (flair 11.3 and below) models do not contain cls information. In this case, try all subclasses
for model_cls in subclasses:
# if str(model_cls) == "<class 'flair.models.pairwise_classification_model.TextPairClassifier'>": continue
try:
model = model_cls.load(state)
return model
except Exception as e:
print(e)
# skip any invalid loadings, e.g. not found on huggingface hub
continue
raise ValueError(f"Could not find any model with name '{model_path}'")
else:
# if this class is not abstract, fetch the model and load it
if not isinstance(model_path, dict):
model_file = cls._fetch_model(str(model_path))
state = load_torch_state(model_file)
else:
state = model_path
if "__cls__" in state:
state.pop("__cls__")
model = cls._init_model_with_state_dict(state)
if "model_card" in state:
model.model_card = state["model_card"]
model.eval()
model.to(flair.device)
return model
def print_model_card(self):
if hasattr(self, "model_card"):
param_out = "\n------------------------------------\n"
param_out += "--------- Flair Model Card ---------\n"
param_out += "------------------------------------\n"
param_out += "- this Flair model was trained with:\n"
param_out += f"-- Flair version {self.model_card['flair_version']}\n"
param_out += f"-- PyTorch version {self.model_card['pytorch_version']}\n"
if "transformers_version" in self.model_card:
param_out += f"-- Transformers version {self.model_card['transformers_version']}\n"
param_out += "------------------------------------\n"
param_out += "------- Training Parameters: -------\n"
param_out += "------------------------------------\n"
training_params = "\n".join(
f'-- {param} = {self.model_card["training_parameters"][param]}'
for param in self.model_card["training_parameters"]
)
param_out += training_params + "\n"
param_out += "------------------------------------\n"
log.info(param_out)
else:
log.info(
"This model has no model card (likely because it is not yet "
"trained or was trained with Flair version < 0.9.1)"
)
class ReduceTransformerVocabMixin(ABC):
@abstractmethod
def get_used_tokens(self, corpus: Corpus) -> typing.Iterable[List[str]]:
pass
class Classifier(Model[DT], typing.Generic[DT], ReduceTransformerVocabMixin, ABC):
"""Abstract base class for all Flair models that do classification.
The classifier inherits from flair.nn.Model and adds unified functionality for both, single- and multi-label
classification and evaluation. Therefore, it is ensured to have a fair comparison between multiple classifiers.
"""
def evaluate(
self,
data_points: Union[List[DT], Dataset],
gold_label_type: str,
out_path: Optional[Union[str, Path]] = None,
embedding_storage_mode: str = "none",
mini_batch_size: int = 32,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
exclude_labels: List[str] = [],
gold_label_dictionary: Optional[Dictionary] = None,
return_loss: bool = True,
**kwargs,
) -> Result:
import numpy as np
import sklearn
# make sure <unk> is contained in gold_label_dictionary, if given
if gold_label_dictionary and not gold_label_dictionary.add_unk:
raise AssertionError("gold_label_dictionary must have add_unk set to true in initialization.")
# read Dataset into data loader, if list of sentences passed, make Dataset first
if not isinstance(data_points, Dataset):
data_points = FlairDatapointDataset(data_points)
with torch.no_grad():
# loss calculation
eval_loss = torch.zeros(1, device=flair.device)
average_over = 0
# variables for printing
lines: List[str] = []
# variables for computing scores
all_spans: Set[str] = set()
all_true_values = {}
all_predicted_values = {}
loader = DataLoader(data_points, batch_size=mini_batch_size)
sentence_id = 0
for batch in Tqdm.tqdm(loader):
# remove any previously predicted labels
for datapoint in batch:
datapoint.remove_labels("predicted")
# predict for batch
loss_and_count = self.predict(
batch,
embedding_storage_mode=embedding_storage_mode,
mini_batch_size=mini_batch_size,
label_name="predicted",
return_loss=return_loss,
)
if return_loss:
if isinstance(loss_and_count, tuple):
average_over += loss_and_count[1]
eval_loss += loss_and_count[0]
else:
eval_loss += loss_and_count
# get the gold labels
for datapoint in batch:
for gold_label in datapoint.get_labels(gold_label_type):
representation = str(sentence_id) + ": " + gold_label.unlabeled_identifier
value = gold_label.value
if gold_label_dictionary and gold_label_dictionary.get_idx_for_item(value) == 0:
value = "<unk>"
if representation not in all_true_values:
all_true_values[representation] = [value]
else:
all_true_values[representation].append(value)
if representation not in all_spans:
all_spans.add(representation)
for predicted_span in datapoint.get_labels("predicted"):
representation = str(sentence_id) + ": " + predicted_span.unlabeled_identifier
# add to all_predicted_values
if representation not in all_predicted_values:
all_predicted_values[representation] = [predicted_span.value]
else:
all_predicted_values[representation].append(predicted_span.value)
if representation not in all_spans:
all_spans.add(representation)
sentence_id += 1
store_embeddings(batch, embedding_storage_mode)
# make printout lines
if out_path:
lines.extend(self._print_predictions(batch, gold_label_type))
# convert true and predicted values to two span-aligned lists
true_values_span_aligned = []
predicted_values_span_aligned = []
for span in all_spans:
list_of_gold_values_for_span = all_true_values[span] if span in all_true_values else ["O"]
# delete exluded labels if exclude_labels is given
for excluded_label in exclude_labels:
if excluded_label in list_of_gold_values_for_span:
list_of_gold_values_for_span.remove(excluded_label)
# if after excluding labels, no label is left, ignore the datapoint
if not list_of_gold_values_for_span:
continue
true_values_span_aligned.append(list_of_gold_values_for_span)
predicted_values_span_aligned.append(
all_predicted_values[span] if span in all_predicted_values else ["O"]
)
# write all_predicted_values to out_file if set
if out_path:
with open(Path(out_path), "w", encoding="utf-8") as outfile:
outfile.write("".join(lines))
# make the evaluation dictionary
evaluation_label_dictionary = Dictionary(add_unk=False)
evaluation_label_dictionary.add_item("O")
for true_values in all_true_values.values():
for label in true_values:
evaluation_label_dictionary.add_item(label)
for predicted_values in all_predicted_values.values():
for label in predicted_values:
evaluation_label_dictionary.add_item(label)
# check if this is a multi-label problem
multi_label = False
for true_instance, predicted_instance in zip(true_values_span_aligned, predicted_values_span_aligned):
if len(true_instance) > 1 or len(predicted_instance) > 1:
multi_label = True
break
log.debug(f"Evaluating as a multi-label problem: {multi_label}")
# compute numbers by formatting true and predicted such that Scikit-Learn can use them
y_true = []
y_pred = []
if multi_label:
# multi-label problems require a multi-hot vector for each true and predicted label
for true_instance in true_values_span_aligned:
y_true_instance = np.zeros(len(evaluation_label_dictionary), dtype=int)
for true_value in true_instance:
y_true_instance[evaluation_label_dictionary.get_idx_for_item(true_value)] = 1
y_true.append(y_true_instance.tolist())
for predicted_values in predicted_values_span_aligned:
y_pred_instance = np.zeros(len(evaluation_label_dictionary), dtype=int)
for predicted_value in predicted_values:
y_pred_instance[evaluation_label_dictionary.get_idx_for_item(predicted_value)] = 1
y_pred.append(y_pred_instance.tolist())
else:
# single-label problems can do with a single index for each true and predicted label
y_true = [
evaluation_label_dictionary.get_idx_for_item(true_instance[0])
for true_instance in true_values_span_aligned
]
y_pred = [
evaluation_label_dictionary.get_idx_for_item(predicted_instance[0])
for predicted_instance in predicted_values_span_aligned
]
# now, calculate evaluation numbers
target_names = []
labels = []
counter = Counter(itertools.chain.from_iterable(all_true_values.values()))
counter.update(list(itertools.chain.from_iterable(all_predicted_values.values())))
for label_name, _count in counter.most_common():
if label_name == "O":
continue
target_names.append(label_name)
labels.append(evaluation_label_dictionary.get_idx_for_item(label_name))
# there is at least one gold label or one prediction (default)
if len(all_true_values) + len(all_predicted_values) > 1:
classification_report = sklearn.metrics.classification_report(
y_true,
y_pred,
digits=4,
target_names=target_names,
zero_division=0,
labels=labels,
)
classification_report_dict = sklearn.metrics.classification_report(
y_true,
y_pred,
target_names=target_names,
zero_division=0,
output_dict=True,
labels=labels,
)
accuracy_score = round(sklearn.metrics.accuracy_score(y_true, y_pred), 4)
macro_f_score = round(classification_report_dict["macro avg"]["f1-score"], 4)
# if there is only one label, then "micro avg" = "macro avg"
if len(target_names) == 1:
classification_report_dict["micro avg"] = classification_report_dict["macro avg"]
if "micro avg" in classification_report_dict:
# micro average is only computed if zero-label exists (for instance "O")
micro_f_score = round(classification_report_dict["micro avg"]["f1-score"], 4)
else:
# if no zero-label exists (such as in POS tagging) micro average is equal to accuracy
micro_f_score = round(classification_report_dict["accuracy"], 4)
# same for the main score
if "micro avg" not in classification_report_dict and main_evaluation_metric[0] == "micro avg":
main_score = classification_report_dict["accuracy"]
else:
main_score = classification_report_dict[main_evaluation_metric[0]][main_evaluation_metric[1]]
else:
# issue error and default all evaluation numbers to 0.
log.error(
"ACHTUNG! No gold labels and no all_predicted_values found! "
"Could be an error in your corpus or how you "
"initialize the trainer!"
)
accuracy_score = micro_f_score = macro_f_score = main_score = 0.0
classification_report = ""
classification_report_dict = {}
detailed_result = (
"\nResults:"
f"\n- F-score (micro) {micro_f_score}"
f"\n- F-score (macro) {macro_f_score}"
f"\n- Accuracy {accuracy_score}"
"\n\nBy class:\n" + classification_report
)
scores: Dict[Union[Tuple[str, ...], str], Any] = {}
for avg_type in ("micro avg", "macro avg"):
for metric_type in ("f1-score", "precision", "recall"):
if avg_type == "micro avg" and avg_type not in classification_report_dict:
value = classification_report_dict["accuracy"]
else:
value = classification_report_dict[avg_type][metric_type]
scores[(avg_type, metric_type)] = value
scores["accuracy"] = accuracy_score
if average_over > 0:
eval_loss /= average_over
scores["loss"] = eval_loss.item()
result = Result(
main_score=main_score,
detailed_results=detailed_result,
classification_report=classification_report_dict,
scores=scores,
)
return result
@abstractmethod
def predict(
self,
sentences: Union[List[DT], DT],
mini_batch_size: int = 32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
):
"""Predicts the class labels for the given sentences.
The labels are directly added to the sentences.
:param sentences: list of sentences
:param mini_batch_size: mini batch size to use
:param return_probabilities_for_all_classes : return probabilities for all classes instead of only best predicted # noqa: E501
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted # noqa: E501
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively. 'gpu' to store embeddings in GPU memory. # noqa: E501
"""
raise NotImplementedError
def _print_predictions(self, batch, gold_label_type):
lines = []
for datapoint in batch:
# check if there is a label mismatch
g = [label.labeled_identifier for label in datapoint.get_labels(gold_label_type)]
p = [label.labeled_identifier for label in datapoint.get_labels("predicted")]
g.sort()
p.sort()
correct_string = " -> MISMATCH!\n" if g != p else ""
# print info
eval_line = (
f"{datapoint.text}\n"
f" - Gold: {', '.join(label.value if label.data_point == datapoint else label.labeled_identifier for label in datapoint.get_labels(gold_label_type))}\n"
f" - Pred: {', '.join(label.value if label.data_point == datapoint else label.labeled_identifier for label in datapoint.get_labels('predicted'))}\n{correct_string}\n"
)
lines.append(eval_line)
return lines
def get_used_tokens(self, corpus: Corpus) -> typing.Iterable[List[str]]:
for sentence in _iter_dataset(corpus.get_all_sentences()):
yield [t.text for t in sentence]
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "Classifier":
from typing import cast
return cast("Classifier", super().load(model_path=model_path))
class DefaultClassifier(Classifier[DT], typing.Generic[DT, DT2], ABC):
"""Default base class for all Flair models that do classification.
It inherits from flair.nn.Classifier and thus from flair.nn.Model. All features shared by all classifiers are
implemented here, including the loss calculation, prediction heads for both single- and multi- label classification
and the `predict()` method. Example implementations of this class are the TextClassifier, RelationExtractor,
TextPairClassifier and TokenClassifier.
"""
def __init__(
self,
embeddings: Embeddings,
label_dictionary: Dictionary,
final_embedding_size: int,
dropout: float = 0.0,
locked_dropout: float = 0.0,
word_dropout: float = 0.0,
multi_label: bool = False,
multi_label_threshold: float = 0.5,
loss_weights: Optional[Dict[str, float]] = None,
decoder: Optional[torch.nn.Module] = None,
inverse_model: bool = False,
train_on_gold_pairs_only: bool = False,
should_embed_sentence: bool = True,
) -> None:
super().__init__()
# set the embeddings
self.embeddings = embeddings
# initialize the label dictionary
self.label_dictionary: Dictionary = label_dictionary
# initialize the decoder
if decoder is not None:
self.decoder = decoder
self._custom_decoder = True
else:
self.decoder = torch.nn.Linear(final_embedding_size, len(self.label_dictionary))
torch.nn.init.xavier_uniform_(self.decoder.weight)
self._custom_decoder = False
# set up multi-label logic
self.multi_label = multi_label
self.multi_label_threshold = multi_label_threshold
self.final_embedding_size = final_embedding_size
self.inverse_model = inverse_model
# init dropouts
self.dropout: torch.nn.Dropout = torch.nn.Dropout(dropout)
self.locked_dropout = flair.nn.LockedDropout(locked_dropout)
self.word_dropout = flair.nn.WordDropout(word_dropout)
self.should_embed_sentence = should_embed_sentence
# loss weights and loss function
self.weight_dict = loss_weights
# Initialize the weight tensor
if loss_weights is not None:
n_classes = len(self.label_dictionary)
weight_list = [1.0 for i in range(n_classes)]
for i, tag in enumerate(self.label_dictionary.get_items()):
if tag in loss_weights:
weight_list[i] = loss_weights[tag]
self.loss_weights: Optional[torch.Tensor] = torch.FloatTensor(weight_list).to(flair.device)
else:
self.loss_weights = None
# set up gradient reversal if so specified
if inverse_model:
from pytorch_revgrad import RevGrad
self.gradient_reversal = RevGrad()
if self.multi_label:
self.loss_function: _Loss = torch.nn.BCEWithLogitsLoss(weight=self.loss_weights, reduction="sum")
else:
self.loss_function = torch.nn.CrossEntropyLoss(weight=self.loss_weights, reduction="sum")
self.train_on_gold_pairs_only = train_on_gold_pairs_only
def _filter_data_point(self, data_point: DT) -> bool:
"""Specify if a data point should be kept.
That way you can remove for example empty texts. Per default all datapoints that have length zero
will be removed.
Return true if the data point should be kept and false if it should be removed.
"""
return len(data_point) > 0
@abstractmethod
def _get_embedding_for_data_point(self, prediction_data_point: DT2) -> torch.Tensor:
raise NotImplementedError
@abstractmethod
def _get_data_points_from_sentence(self, sentence: DT) -> List[DT2]:
"""Returns the data_points to which labels are added.
The results should be of any type that inherits from DataPoint (Sentence, Span, Token, ... objects).
"""
raise NotImplementedError
def _get_data_points_for_batch(self, sentences: List[DT]) -> List[DT2]:
"""Returns the data_points to which labels are added.
The results should be of any type that inherits from DataPoint (Sentence, Span, Token, ... objects).
"""
return [data_point for sentence in sentences for data_point in self._get_data_points_from_sentence(sentence)]
def _get_label_of_datapoint(self, data_point: DT2) -> List[str]:
"""Extracts the labels from the data points.
Each data point might return a list of strings, representing multiple labels.
"""
if self.multi_label:
return [label.value for label in data_point.get_labels(self.label_type)]
else:
return [data_point.get_label(self.label_type).value]
@property
def multi_label_threshold(self):
return self._multi_label_threshold
@multi_label_threshold.setter
def multi_label_threshold(self, x): # setter method
if type(x) is dict:
if "default" in x:
self._multi_label_threshold = x
else:
raise Exception('multi_label_threshold dict should have a "default" key')
else:
self._multi_label_threshold = {"default": x}
def _prepare_label_tensor(self, prediction_data_points: List[DT2]) -> torch.Tensor:
labels = [self._get_label_of_datapoint(dp) for dp in prediction_data_points]
if self.multi_label:
return torch.tensor(
[
[1 if label in all_labels_for_point else 0 for label in self.label_dictionary.get_items()]
for all_labels_for_point in labels
],
dtype=torch.float,
device=flair.device,
)
else:
return torch.tensor(
[
self.label_dictionary.get_idx_for_item(label[0])
if len(label) > 0
else self.label_dictionary.get_idx_for_item("O")
for label in labels
],
dtype=torch.long,
device=flair.device,
)
def _encode_data_points(self, sentences: List[DT], data_points: List[DT2]):
# embed sentences
if self.should_embed_sentence:
self.embeddings.embed(sentences)
# get a tensor of data points
data_point_tensor = torch.stack([self._get_embedding_for_data_point(data_point) for data_point in data_points])
# do dropout
data_point_tensor = data_point_tensor.unsqueeze(1)
data_point_tensor = self.dropout(data_point_tensor)
data_point_tensor = self.locked_dropout(data_point_tensor)
data_point_tensor = self.word_dropout(data_point_tensor)
data_point_tensor = data_point_tensor.squeeze(1)
return data_point_tensor
def _mask_scores(self, scores, data_points):
return scores
def forward_loss(self, sentences: List[DT]) -> Tuple[torch.Tensor, int]:
# make a forward pass to produce embedded data points and labels
sentences = [sentence for sentence in sentences if self._filter_data_point(sentence)]
# get the data points for which to predict labels
data_points = self._get_data_points_for_batch(sentences)
if len(data_points) == 0:
return torch.tensor(0.0, requires_grad=True, device=flair.device), 1
# get their gold labels as a tensor
label_tensor = self._prepare_label_tensor(data_points)
if label_tensor.size(0) == 0:
return torch.tensor(0.0, requires_grad=True, device=flair.device), 1
# pass data points through network to get encoded data point tensor
data_point_tensor = self._encode_data_points(sentences, data_points)
# decode
scores = self.decoder(data_point_tensor)
# an optional masking step (no masking in most cases)
scores = self._mask_scores(scores, data_points)
# calculate the loss
return self._calculate_loss(scores, label_tensor)
def _calculate_loss(self, scores: torch.Tensor, labels: torch.Tensor) -> Tuple[torch.Tensor, int]:
return self.loss_function(scores, labels), labels.size(0)
def _sort_data(self, data_points: List[DT]) -> List[DT]:
if len(data_points) == 0:
return []
if not isinstance(data_points[0], Sentence):
return data_points
# filter empty sentences
sentences = [sentence for sentence in typing.cast(List[Sentence], data_points) if len(sentence) > 0]
# reverse sort all sequences by their length
reordered_sentences = sorted(sentences, key=len, reverse=True)
return typing.cast(List[DT], reordered_sentences)
def predict(
self,
sentences: Union[List[DT], DT],
mini_batch_size: int = 32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
):
"""Predicts the class labels for the given sentences. The labels are directly added to the sentences.
:param sentences: list of sentences
:param mini_batch_size: mini batch size to use
:param return_probabilities_for_all_classes : return probabilities for all classes instead of only best predicted # noqa: E501
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively. # noqa: E501
'gpu' to store embeddings in GPU memory.
"""
if label_name is None:
label_name = self.label_type if self.label_type is not None else "label"
with torch.no_grad():
if not sentences:
return sentences
if not isinstance(sentences, list):
sentences = [sentences]
if isinstance(sentences[0], Sentence):
Sentence.set_context_for_sentences(typing.cast(List[Sentence], sentences))
reordered_sentences = self._sort_data(sentences)
if len(reordered_sentences) == 0:
return sentences
if len(reordered_sentences) > mini_batch_size:
batches: Union[DataLoader, List[List[DT]]] = DataLoader(
dataset=FlairDatapointDataset(reordered_sentences),
batch_size=mini_batch_size,
)
# progress bar for verbosity
if verbose:
progress_bar = tqdm(batches)
progress_bar.set_description("Batch inference")
batches = progress_bar
else:
batches = [reordered_sentences]
overall_loss = torch.zeros(1, device=flair.device)
label_count = 0
for batch in batches:
# filter data points in batch
batch = [dp for dp in batch if self._filter_data_point(dp)]
# stop if all sentences are empty
if not batch:
continue
data_points = self._get_data_points_for_batch(batch)
if not data_points:
continue
# pass data points through network and decode
data_point_tensor = self._encode_data_points(batch, data_points)
scores = self.decoder(data_point_tensor)
scores = self._mask_scores(scores, data_points)
# if anything could possibly be predicted
if len(data_points) > 0:
# remove previously predicted labels of this type
for sentence in data_points:
sentence.remove_labels(label_name)
if return_loss:
# filter data points that have labels outside of dictionary
filtered_indices = []
has_unknown_label = False
for idx, dp in enumerate(data_points):
if all(
label in self.label_dictionary.get_items() for label in self._get_label_of_datapoint(dp)
):
filtered_indices.append(idx)
else:
has_unknown_label = True
if has_unknown_label:
scores = torch.index_select(scores, 0, torch.tensor(filtered_indices, device=flair.device))
gold_labels = self._prepare_label_tensor([data_points[index] for index in filtered_indices])
overall_loss += self._calculate_loss(scores, gold_labels)[0]
label_count += len(filtered_indices)
if self.multi_label:
sigmoided = torch.sigmoid(scores) # size: (n_sentences, n_classes)
n_labels = sigmoided.size(1)
for s_idx, data_point in enumerate(data_points):
for l_idx in range(n_labels):
label_value = self.label_dictionary.get_item_for_index(l_idx)
if label_value == "O":
continue
label_threshold = self._get_label_threshold(label_value)
label_score = sigmoided[s_idx, l_idx].item()
if label_score > label_threshold or return_probabilities_for_all_classes:
data_point.add_label(typename=label_name, value=label_value, score=label_score)
else:
softmax = torch.nn.functional.softmax(scores, dim=-1)
if return_probabilities_for_all_classes:
n_labels = softmax.size(1)
for s_idx, data_point in enumerate(data_points):
for l_idx in range(n_labels):
label_value = self.label_dictionary.get_item_for_index(l_idx)
if label_value == "O":
continue
label_score = softmax[s_idx, l_idx].item()
data_point.add_label(typename=label_name, value=label_value, score=label_score)
else:
conf, indices = torch.max(softmax, dim=-1)
for data_point, c, i in zip(data_points, conf, indices):
label_value = self.label_dictionary.get_item_for_index(i.item())
if label_value == "O":
continue
data_point.add_label(typename=label_name, value=label_value, score=c.item())
store_embeddings(batch, storage_mode=embedding_storage_mode)
self._post_process_batch_after_prediction(batch, label_name)
if return_loss:
if has_unknown_label:
log.info(
"During evaluation, encountered labels that are not in the label_dictionary:"
"Evaluation loss is computed without them."
)
return overall_loss, label_count
return None
def _post_process_batch_after_prediction(self, batch, label_name):
pass
def _get_label_threshold(self, label_value):
label_threshold = self.multi_label_threshold["default"]
if label_value in self.multi_label_threshold:
label_threshold = self.multi_label_threshold[label_value]
return label_threshold
def __str__(self) -> str:
return (
super(flair.nn.Model, self).__str__().rstrip(")")
+ f" (weights): {self.weight_dict}\n"
+ f" (weight_tensor) {self.loss_weights}\n)"
)
@classmethod
def _init_model_with_state_dict(cls, state, **kwargs):
# add DefaultClassifier arguments
for arg in [
"decoder",
"dropout",
"word_dropout",
"locked_dropout",
"multi_label",
"multi_label_threshold",
"loss_weights",
"train_on_gold_pairs_only",
"inverse_model",
]:
if arg not in kwargs and arg in state:
kwargs[arg] = state[arg]
return super(Classifier, cls)._init_model_with_state_dict(state, **kwargs)
def _get_state_dict(self):
state = super()._get_state_dict()
# add variables of DefaultClassifier
state["dropout"] = self.dropout.p
state["word_dropout"] = self.word_dropout.dropout_rate
state["locked_dropout"] = self.locked_dropout.dropout_rate
state["multi_label"] = self.multi_label
state["multi_label_threshold"] = self.multi_label_threshold
state["loss_weights"] = self.loss_weights
state["train_on_gold_pairs_only"] = self.train_on_gold_pairs_only
state["inverse_model"] = self.inverse_model
if self._custom_decoder:
state["decoder"] = self.decoder
return state
@classmethod
def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "DefaultClassifier":
from typing import cast
return cast("DefaultClassifier", super().load(model_path=model_path))
def get_non_abstract_subclasses(cls):
all_subclasses = []
for subclass in cls.__subclasses__():
all_subclasses.extend(get_non_abstract_subclasses(subclass))
if inspect.isabstract(subclass):
continue
all_subclasses.append(subclass)
return all_subclasses
| 41,127 | 41.443756 | 267 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.