repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
bnp | bnp-master/regression/models/attention.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class MultiHeadAttn(nn.Module):
def __init__(self, dim_q, dim_k, dim_v, dim_out, num_heads=8):
super().__init__()
self.num_heads = num_heads
self.dim_out = dim_out
self.fc_q = nn.Linear(dim_q, dim_out, bias=False)
self.fc_k = nn.Linear(dim_k, dim_out, bias=False)
self.fc_v = nn.Linear(dim_v, dim_out, bias=False)
self.fc_out = nn.Linear(dim_out, dim_out)
self.ln1 = nn.LayerNorm(dim_out)
self.ln2 = nn.LayerNorm(dim_out)
def scatter(self, x):
return torch.cat(x.chunk(self.num_heads, -1), -3)
def gather(self, x):
return torch.cat(x.chunk(self.num_heads, -3), -1)
def attend(self, q, k, v, mask=None):
q_, k_, v_ = [self.scatter(x) for x in [q, k, v]]
A_logits = q_ @ k_.transpose(-2, -1) / math.sqrt(self.dim_out)
if mask is not None:
mask = mask.bool().to(q.device)
mask = torch.stack([mask]*q.shape[-2], -2)
mask = torch.cat([mask]*self.num_heads, -3)
A = torch.softmax(A_logits.masked_fill(mask, -float('inf')), -1)
A = A.masked_fill(torch.isnan(A), 0.0)
else:
A = torch.softmax(A_logits, -1)
return self.gather(A @ v_)
def forward(self, q, k, v, mask=None):
q, k, v = self.fc_q(q), self.fc_k(k), self.fc_v(v)
out = self.ln1(q + self.attend(q, k, v, mask=mask))
out = self.ln2(out + F.relu(self.fc_out(out)))
return out
class SelfAttn(MultiHeadAttn):
def __init__(self, dim_in, dim_out, num_heads=8):
super().__init__(dim_in, dim_in, dim_in, dim_out, num_heads)
def forward(self, x, mask=None):
return super().forward(x, x, x, mask=mask)
| 1,805 | 35.857143 | 76 | py |
bnp | bnp-master/regression/utils/misc.py | import os
from importlib.machinery import SourceFileLoader
import math
import torch
def gen_load_func(parser, func):
def load(args, cmdline):
sub_args, cmdline = parser.parse_known_args(cmdline)
for k, v in sub_args.__dict__.items():
args.__dict__[k] = v
return func(**sub_args.__dict__), cmdline
return load
def load_module(filename):
module_name = os.path.splitext(os.path.basename(filename))[0]
return SourceFileLoader(module_name, filename).load_module()
def logmeanexp(x, dim=0):
return x.logsumexp(dim) - math.log(x.shape[dim])
def stack(x, num_samples=None, dim=0):
return x if num_samples is None \
else torch.stack([x]*num_samples, dim=dim)
| 726 | 29.291667 | 65 | py |
bnp | bnp-master/regression/utils/log.py | import torch
import time
import logging
from collections import OrderedDict
def get_logger(filename, mode='a'):
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
logger.addHandler(logging.FileHandler(filename, mode=mode))
return logger
class RunningAverage(object):
def __init__(self, *keys):
self.sum = OrderedDict()
self.cnt = OrderedDict()
self.clock = time.time()
for key in keys:
self.sum[key] = 0
self.cnt[key] = 0
def update(self, key, val):
if isinstance(val, torch.Tensor):
val = val.item()
if self.sum.get(key, None) is None:
self.sum[key] = val
self.cnt[key] = 1
else:
self.sum[key] = self.sum[key] + val
self.cnt[key] += 1
def reset(self):
for key in self.sum.keys():
self.sum[key] = 0
self.cnt[key] = 0
self.clock = time.time()
def clear(self):
self.sum = OrderedDict()
self.cnt = OrderedDict()
self.clock = time.time()
def keys(self):
return self.sum.keys()
def get(self, key):
assert(self.sum.get(key, None) is not None)
return self.sum[key] / self.cnt[key]
def info(self, show_et=True):
line = ''
for key in self.sum.keys():
val = self.sum[key] / self.cnt[key]
if type(val) == float:
line += f'{key} {val:.4f} '
else:
line += f'{key} {val} '.format(key, val)
if show_et:
line += f'({time.time()-self.clock:.3f} secs)'
return line
| 1,679 | 27 | 65 | py |
bnp | bnp-master/regression/utils/sampling.py | import torch
def gather(items, idxs):
K = idxs.shape[0]
idxs = idxs.to(items[0].device)
gathered = []
for item in items:
gathered.append(torch.gather(
torch.stack([item]*K), -2,
torch.stack([idxs]*item.shape[-1], -1)).squeeze(0))
return gathered[0] if len(gathered) == 1 else gathered
def sample_subset(*items, r_N=None, num_samples=None):
r_N = r_N or torch.rand(1).item()
K = num_samples or 1
N = items[0].shape[-2]
Ns = min(max(1, int(r_N * N)), N-1)
batch_shape = items[0].shape[:-2]
idxs = torch.rand((K,)+batch_shape+(N,)).argsort(-1)
return gather(items, idxs[...,:Ns]), gather(items, idxs[...,Ns:])
def sample_with_replacement(*items, num_samples=None, r_N=1.0, N_s=None):
K = num_samples or 1
N = items[0].shape[-2]
N_s = N_s or max(1, int(r_N * N))
batch_shape = items[0].shape[:-2]
idxs = torch.randint(N, size=(K,)+batch_shape+(N_s,))
return gather(items, idxs)
def sample_mask(B, N, num_samples=None, min_num=3, prob=0.5):
min_num = min(min_num, N)
K = num_samples or 1
fixed = torch.ones(K, B, min_num)
if N - min_num > 0:
rand = torch.bernoulli(prob*torch.ones(K, B, N-min_num))
mask = torch.cat([fixed, rand], -1)
return mask.squeeze(0)
else:
return fixed.squeeze(0)
| 1,343 | 32.6 | 73 | py |
bnp | bnp-master/regression/data/gp.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import MultivariateNormal, StudentT
from attrdict import AttrDict
import math
__all__ = ['GPSampler', 'RBFKernel', 'PeriodicKernel', 'Matern52Kernel']
class GPSampler(object):
def __init__(self, kernel, t_noise=None):
self.kernel = kernel
self.t_noise = t_noise
def sample(self,
batch_size=16,
num_ctx=None,
max_num_points=50,
x_range=(-2, 2),
device='cpu'):
batch = AttrDict()
num_ctx = num_ctx or torch.randint(low=3, high=max_num_points-3, size=[1]).item()
num_tar = torch.randint(low=3, high=max_num_points-num_ctx, size=[1]).item()
num_points = num_ctx + num_tar
batch.x = x_range[0] + (x_range[1] - x_range[0]) \
* torch.rand([batch_size, num_points, 1], device=device)
batch.xc = batch.x[:,:num_ctx]
batch.xt = batch.x[:,num_ctx:]
# batch_size * num_points * num_points
cov = self.kernel(batch.x)
mean = torch.zeros(batch_size, num_points, device=device)
batch.y = MultivariateNormal(mean, cov).rsample().unsqueeze(-1)
batch.yc = batch.y[:,:num_ctx]
batch.yt = batch.y[:,num_ctx:]
if self.t_noise is not None:
if self.t_noise == -1:
t_noise = 0.15 * torch.rand(batch.y.shape).to(device)
else:
t_noise = self.t_noise
batch.y += t_noise * StudentT(2.1).rsample(batch.y.shape).to(device)
return batch
class RBFKernel(object):
def __init__(self, sigma_eps=2e-2, max_length=0.6, max_scale=1.0):
self.sigma_eps = sigma_eps
self.max_length = max_length
self.max_scale = max_scale
# x: batch_size * num_points * dim
def __call__(self, x):
length = 0.1 + (self.max_length-0.1) \
* torch.rand([x.shape[0], 1, 1, 1], device=x.device)
scale = 0.1 + (self.max_scale-0.1) \
* torch.rand([x.shape[0], 1, 1], device=x.device)
# batch_size * num_points * num_points * dim
dist = (x.unsqueeze(-2) - x.unsqueeze(-3))/length
# batch_size * num_points * num_points
cov = scale.pow(2) * torch.exp(-0.5 * dist.pow(2).sum(-1)) \
+ self.sigma_eps**2 * torch.eye(x.shape[-2]).to(x.device)
return cov
class Matern52Kernel(object):
def __init__(self, sigma_eps=2e-2, max_length=0.6, max_scale=1.0):
self.sigma_eps = sigma_eps
self.max_length = max_length
self.max_scale = max_scale
# x: batch_size * num_points * dim
def __call__(self, x):
length = 0.1 + (self.max_length-0.1) \
* torch.rand([x.shape[0], 1, 1, 1], device=x.device)
scale = 0.1 + (self.max_scale-0.1) \
* torch.rand([x.shape[0], 1, 1], device=x.device)
# batch_size * num_points * num_points
dist = torch.norm((x.unsqueeze(-2) - x.unsqueeze(-3))/length, dim=-1)
cov = scale.pow(2)*(1 + math.sqrt(5.0)*dist + 5.0*dist.pow(2)/3.0) \
* torch.exp(-math.sqrt(5.0) * dist) \
+ self.sigma_eps**2 * torch.eye(x.shape[-2]).to(x.device)
return cov
class PeriodicKernel(object):
def __init__(self, sigma_eps=2e-2, max_length=0.6, max_scale=1.0):
#self.p = p
self.sigma_eps = sigma_eps
self.max_length = max_length
self.max_scale = max_scale
# x: batch_size * num_points * dim
def __call__(self, x):
p = 0.1 + 0.4*torch.rand([x.shape[0], 1, 1], device=x.device)
length = 0.1 + (self.max_length-0.1) \
* torch.rand([x.shape[0], 1, 1], device=x.device)
scale = 0.1 + (self.max_scale-0.1) \
* torch.rand([x.shape[0], 1, 1], device=x.device)
dist = x.unsqueeze(-2) - x.unsqueeze(-3)
cov = scale.pow(2) * torch.exp(\
- 2*(torch.sin(math.pi*dist.abs().sum(-1)/p)/length).pow(2)) \
+ self.sigma_eps**2 * torch.eye(x.shape[-2]).to(x.device)
return cov
| 4,122 | 35.8125 | 89 | py |
bnp | bnp-master/regression/data/emnist.py | import argparse
import torch
import torchvision.datasets as tvds
from utils.paths import datasets_path
from utils.misc import gen_load_func
class EMNIST(tvds.EMNIST):
def __init__(self, train=True, class_range=[0, 47], device='cpu', download=True):
super().__init__(datasets_path, train=train, split='balanced', download=download)
self.data = self.data.unsqueeze(1).float().div(255).transpose(-1, -2).to(device)
self.targets = self.targets.to(device)
idxs = []
for c in range(class_range[0], class_range[1]):
idxs.append(torch.where(self.targets==c)[0])
idxs = torch.cat(idxs)
self.data = self.data[idxs]
self.targets = self.targets[idxs]
def __getitem__(self, idx):
return self.data[idx], self.targets[idx]
| 807 | 30.076923 | 89 | py |
bnp | bnp-master/regression/data/image.py | import torch
from attrdict import AttrDict
from torch.utils.data import DataLoader
from torch.distributions import StudentT, Normal
def img_to_task(img, num_ctx=None,
max_num_points=None, target_all=False, t_noise=None, device=None):
B, C, H, W = img.shape
num_pixels = H*W
img = img.view(B, C, -1)
if t_noise is not None:
if t_noise == -1:
t_noise = 0.09 * torch.rand(img.shape)
img += t_noise * StudentT(2.1).rsample(img.shape)
device = img.device if device is None else device
batch = AttrDict()
max_num_points = max_num_points or num_pixels
num_ctx = num_ctx or \
torch.randint(low=3, high=max_num_points-3, size=[1]).item()
num_tar = max_num_points - num_ctx if target_all else \
torch.randint(low=3, high=max_num_points-num_ctx, size=[1]).item()
num_points = num_ctx + num_tar
idxs = torch.rand(B, num_pixels).argsort(-1)[...,:num_points].to(img.device)
x1, x2 = idxs//W, idxs%W
batch.x = torch.stack([
2*x1.float()/(H-1) - 1,
2*x2.float()/(W-1) - 1], -1).to(device)
batch.y = (torch.gather(img, -1, idxs.unsqueeze(-2).repeat(1, C, 1))\
.transpose(-2, -1) - 0.5).to(device)
batch.xc = batch.x[:,:num_ctx]
batch.xt = batch.x[:,num_ctx:]
batch.yc = batch.y[:,:num_ctx]
batch.yt = batch.y[:,num_ctx:]
return batch
def coord_to_img(x, y, shape):
x = x.cpu()
y = y.cpu()
B = x.shape[0]
C, H, W = shape
I = torch.zeros(B, 3, H, W)
I[:,0,:,:] = 0.61
I[:,1,:,:] = 0.55
I[:,2,:,:] = 0.71
x1, x2 = x[...,0], x[...,1]
x1 = ((x1+1)*(H-1)/2).round().long()
x2 = ((x2+1)*(W-1)/2).round().long()
for b in range(B):
for c in range(3):
I[b,c,x1[b],x2[b]] = y[b,:,min(c,C-1)]
return I
def task_to_img(xc, yc, xt, yt, shape):
xc = xc.cpu()
yc = yc.cpu()
xt = xt.cpu()
yt = yt.cpu()
B = xc.shape[0]
C, H, W = shape
xc1, xc2 = xc[...,0], xc[...,1]
xc1 = ((xc1+1)*(H-1)/2).round().long()
xc2 = ((xc2+1)*(W-1)/2).round().long()
xt1, xt2 = xt[...,0], xt[...,1]
xt1 = ((xt1+1)*(H-1)/2).round().long()
xt2 = ((xt2+1)*(W-1)/2).round().long()
task_img = torch.zeros(B, 3, H, W).to(xc.device)
task_img[:,2,:,:] = 1.0
task_img[:,1,:,:] = 0.4
for b in range(B):
for c in range(3):
task_img[b,c,xc1[b],xc2[b]] = yc[b,:,min(c,C-1)] + 0.5
task_img = task_img.clamp(0, 1)
completed_img = task_img.clone()
for b in range(B):
for c in range(3):
completed_img[b,c,xt1[b],xt2[b]] = yt[b,:,min(c,C-1)] + 0.5
completed_img = completed_img.clamp(0, 1)
return task_img, completed_img
| 2,725 | 28 | 80 | py |
bnp | bnp-master/regression/data/lotka_volterra.py | import torch
import numpy as np
import numpy.random as npr
import numba as nb
from tqdm import tqdm
from attrdict import AttrDict
#import pandas as pd
import wget
import os.path as osp
from utils.paths import datasets_path
@nb.njit(nb.i4(nb.f8[:]))
def catrnd(prob):
cprob = prob.cumsum()
u = npr.rand()
for i in range(len(cprob)):
if u < cprob[i]:
return i
return i
@nb.njit(nb.types.Tuple((nb.f8[:,:,:], nb.f8[:,:,:], nb.i4)) \
(nb.i4, nb.i4, nb.i4, \
nb.f8, nb.f8, nb.f8, nb.f8, nb.f8, nb.f8))
def _simulate_task(batch_size, num_steps, max_num_points,
X0, Y0, theta0, theta1, theta2, theta3):
time = np.zeros((batch_size, num_steps, 1))
pop = np.zeros((batch_size, num_steps, 2))
length = num_steps*np.ones((batch_size))
for b in range(batch_size):
pop[b,0,0] = max(int(X0 + npr.randn()), 1)
pop[b,0,1] = max(int(Y0 + npr.randn()), 1)
for i in range(1, num_steps):
X, Y = pop[b,i-1,0], pop[b,i-1,1]
rates = np.array([
theta0*X*Y,
theta1*X,
theta2*Y,
theta3*X*Y])
total_rate = rates.sum()
time[b,i,0] = time[b,i-1,0] + npr.exponential(scale=1./total_rate)
pop[b,i,0] = pop[b,i-1,0]
pop[b,i,1] = pop[b,i-1,1]
a = catrnd(rates/total_rate)
if a == 0:
pop[b,i,0] += 1
elif a == 1:
pop[b,i,0] -= 1
elif a == 2:
pop[b,i,1] += 1
else:
pop[b,i,1] -= 1
if pop[b,i,0] == 0 or pop[b,i,1] == 0:
length[b] = i+1
break
num_ctx = npr.randint(15, max_num_points-15)
num_tar = npr.randint(15, max_num_points-num_ctx)
num_points = num_ctx + num_tar
min_length = length.min()
while num_points > min_length:
num_ctx = npr.randint(15, max_num_points-15)
num_tar = npr.randint(15, max_num_points-num_ctx)
num_points = num_ctx + num_tar
x = np.zeros((batch_size, num_points, 1))
y = np.zeros((batch_size, num_points, 2))
for b in range(batch_size):
idxs = np.arange(int(length[b]))
npr.shuffle(idxs)
for j in range(num_points):
x[b,j,0] = time[b,idxs[j],0]
y[b,j,0] = pop[b,idxs[j],0]
y[b,j,1] = pop[b,idxs[j],1]
return x, y, num_ctx
class LotkaVolterraSimulator(object):
def __init__(self,
X0=50,
Y0=100,
theta0=0.01,
theta1=0.5,
theta2=1.0,
theta3=0.01):
self.X0 = X0
self.Y0 = Y0
self.theta0 = theta0
self.theta1 = theta1
self.theta2 = theta2
self.theta3 = theta3
def simulate_tasks(self,
num_batches,
batch_size,
num_steps=20000,
max_num_points=100):
batches = []
for _ in tqdm(range(num_batches)):
batch = AttrDict()
x, y, num_ctx = _simulate_task(
batch_size, num_steps, max_num_points,
self.X0, self.Y0, self.theta0, self.theta1, self.theta2, self.theta3)
batch.x = torch.Tensor(x)
batch.y = torch.Tensor(y)
batch.xc = batch.x[:,:num_ctx]
batch.xt = batch.x[:,num_ctx:]
batch.yc = batch.y[:,:num_ctx]
batch.yt = batch.y[:,num_ctx:]
batches.append(batch)
return batches
def load_hare_lynx(num_batches, batch_size):
filename = osp.join(datasets_path, 'lotka_volterra', 'LynxHare.txt')
if not osp.isfile(filename):
wget.download('http://people.whitman.edu/~hundledr/courses/M250F03/LynxHare.txt',
out=osp.join(datsets_path, 'lotka_volterra'))
tb = np.loadtxt(filename)
times = torch.Tensor(tb[:,0]).unsqueeze(-1)
pops = torch.stack([torch.Tensor(tb[:,2]), torch.Tensor(tb[:,1])], -1)
#tb = pd.read_csv(osp.join(datasets_path, 'lotka_volterra', 'hare-lynx.csv'))
#times = torch.Tensor(np.array(tb['time'])).unsqueeze(-1)
#pops = torch.stack([torch.Tensor(np.array(tb['lynx'])),
# torch.Tensor(np.array(tb['hare']))], -1)
batches = []
N = pops.shape[-2]
for _ in range(num_batches):
batch = AttrDict()
num_ctx = torch.randint(low=15, high=N-15, size=[1]).item()
num_tar = N - num_ctx
idxs = torch.rand(batch_size, N).argsort(-1)
batch.x = torch.gather(
torch.stack([times]*batch_size),
-2, idxs.unsqueeze(-1))
batch.y = torch.gather(torch.stack([pops]*batch_size),
-2, torch.stack([idxs]*2, -1))
batch.xc = batch.x[:,:num_ctx]
batch.xt = batch.x[:,num_ctx:]
batch.yc = batch.y[:,:num_ctx]
batch.yt = batch.y[:,num_ctx:]
batches.append(batch)
return batches
if __name__ == '__main__':
import argparse
import os
from utils.paths import datasets_path
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('--num_batches', type=int, default=10000)
parser.add_argument('--batch_size', type=int, default=50)
parser.add_argument('--filename', type=str, default='batch')
parser.add_argument('--X0', type=float, default=50)
parser.add_argument('--Y0', type=float, default=100)
parser.add_argument('--theta0', type=float, default=0.01)
parser.add_argument('--theta1', type=float, default=0.5)
parser.add_argument('--theta2', type=float, default=1.0)
parser.add_argument('--theta3', type=float, default=0.01)
parser.add_argument('--num_steps', type=int, default=20000)
args = parser.parse_args()
sim = LotkaVolterraSimulator(X0=args.X0, Y0=args.Y0,
theta0=args.theta0, theta1=args.theta1,
theta2=args.theta2, theta3=args.theta3)
batches = sim.simulate_tasks(args.num_batches, args.batch_size,
num_steps=args.num_steps)
root = os.path.join(datasets_path, 'lotka_volterra')
if not os.path.isdir(root):
os.makedirs(root)
torch.save(batches, os.path.join(root, f'{args.filename}.tar'))
fig, axes = plt.subplots(1, 4, figsize=(16,4))
for i, ax in enumerate(axes.flatten()):
ax.scatter(batches[0].x[i,:,0], batches[0].y[i,:,0])
ax.scatter(batches[0].x[i,:,0], batches[0].y[i,:,1])
plt.show()
| 6,466 | 31.497487 | 89 | py |
bnp | bnp-master/regression/data/celeba.py | import torch
import os.path as osp
import argparse
from utils.paths import datasets_path
from utils.misc import gen_load_func
class CelebA(object):
def __init__(self, train=True):
self.data, self.targets = torch.load(
osp.join(datasets_path, 'celeba',
'train.pt' if train else 'eval.pt'))
self.data = self.data.float() / 255.0
if train:
self.data, self.targets = self.data, self.targets
else:
self.data, self.targets = self.data, self.targets
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index], self.targets[index]
if __name__ == '__main__':
# preprocess
# before proceeding, download img_celeba.7z from
# https://drive.google.com/drive/folders/0B7EVK8r0v71pTUZsaXdaSnZBZzg
# ,download list_eval_partitions.txt from
# https://drive.google.com/drive/folders/0B7EVK8r0v71pdjI3dmwtNm5jRkE
# and download identity_CelebA.txt from
# https://drive.google.com/drive/folders/0B7EVK8r0v71pOC0wOVZlQnFfaGs
# and place them in ${datasets_path}/celeba folder.
import os
import os.path as osp
from PIL import Image
from tqdm import tqdm
import numpy as np
import torch
# load train/val/test split
splitdict = {}
with open(osp.join(datasets_path, 'celeba', 'list_eval_partition.txt'), 'r') as f:
for line in f:
fn, split = line.split()
splitdict[fn] = int(split)
# load identities
iddict = {}
with open(osp.join(datasets_path, 'celeba', 'identity_CelebA.txt'), 'r') as f:
for line in f:
fn, label = line.split()
iddict[fn] = int(label)
train_imgs = []
train_labels = []
eval_imgs = []
eval_labels = []
path = osp.join(datasets_path, 'celeba', 'img_align_celeba')
imgfilenames = os.listdir(path)
for fn in tqdm(imgfilenames):
img = Image.open(osp.join(path, fn)).resize((32, 32))
if splitdict[fn] == 2:
eval_imgs.append(torch.LongTensor(np.array(img).transpose(2, 0, 1)))
eval_labels.append(iddict[fn])
else:
train_imgs.append(torch.LongTensor(np.array(img).transpose(2, 0, 1)))
train_labels.append(iddict[fn])
print(f'{len(train_imgs)} train, {len(eval_imgs)} eval')
train_imgs = torch.stack(train_imgs)
train_labels = torch.LongTensor(train_labels)
torch.save([train_imgs, train_labels], osp.join(datasets_path, 'celeba', 'train.pt'))
eval_imgs = torch.stack(eval_imgs)
eval_labels = torch.LongTensor(eval_labels)
torch.save([eval_imgs, eval_labels], osp.join(datasets_path, 'celeba', 'eval.pt'))
| 2,735 | 31.963855 | 89 | py |
landing | landing-main/setup.py | #! /usr/bin/env python
from setuptools import setup
setup(name='landing',
install_requires=['torch', 'geoopt', 'scipy'],
packages=['landing'],
version='0.0'
) | 185 | 17.6 | 52 | py |
landing | landing-main/examples/plot_procrustes.py | """
A simple example of the landing algorithm on Procrustes problem
===============================================================
Given n pairs of matrices in an array A and B, we want to solve
in parallel the procrustes problems min_X ||XA - B|| where X is
orthogonal. We compare Riemannian gradient descent with the
landing method.
Here, the bottleneck of the methods is not computing the gradients,
but rather moving on the manifold. Therefore, the landing algorithm
greatly accelerates convergence
"""
from time import time
import matplotlib.pyplot as plt
import torch
import geoopt
from geoopt.optim import RiemannianSGD
from landing import LandingSGD
torch.manual_seed(1)
# generate random matrices
n = 200
p = 100
A = torch.randn(n, p, p)
B = torch.randn(n, p, p)
init_weights = torch.randn(n, p, p)
# Compute closed-form solution from svd, used for monitoring.
u, _, v = torch.svd(B.matmul(A.transpose(-1, -2)))
w_star = u.matmul(v.transpose(-1, -2))
loss_star = ((torch.matmul(w_star, A) - B) ** 2).sum() / n
loss_star = loss_star.item()
method_names = ["Landing", "Retraction"]
methods = [LandingSGD, RiemannianSGD]
learning_rate = 0.3
f, axes = plt.subplots(2, 1)
for method_name, method, n_epochs in zip(method_names, methods, [500, 500]):
iterates = []
loss_list = []
time_list = []
param = geoopt.ManifoldParameter(
init_weights.clone(), manifold=geoopt.Stiefel(canonical=False)
)
optimizer = method((param,), lr=learning_rate)
t0 = time()
for _ in range(n_epochs):
optimizer.zero_grad()
res = torch.matmul(param, A) - B
loss = (res ** 2).sum() / n
loss.backward()
time_list.append(time() - t0)
loss_list.append(loss.item() - loss_star)
iterates.append(param.data.clone())
optimizer.step()
distance_list = []
for matrix in iterates:
d = (
torch.norm(matrix.matmul(matrix.transpose(-1, -2)) - torch.eye(p))
/ n
)
distance_list.append(d.item())
axes[0].semilogy(time_list, distance_list, label=method_name)
axes[1].semilogy(time_list, loss_list, label=method_name)
axes[0].set_xlabel("time (s.)")
axes[1].set_xlabel("time (s.)")
axes[0].set_ylabel("Orthogonality error")
axes[1].set_ylabel("f - f^*")
plt.legend()
plt.show()
| 2,324 | 26.678571 | 78 | py |
landing | landing-main/examples/plot_nn_distillation.py | """
The landing algorithm to train a toy neural network on a distilation task
=========================================================================
"""
from time import time
import matplotlib.pyplot as plt
import torch
from torch import nn, optim
import geoopt
from geoopt.optim import RiemannianSGD
from landing import LandingSGD
torch.manual_seed(1)
p = 100
n_layers = 10
class Network(nn.Module):
def __init__(self, n_layers=10):
super().__init__()
self.weights = geoopt.ManifoldParameter(
torch.randn(n_layers, p, p),
manifold=geoopt.Stiefel(canonical=False),
)
self.biases = torch.nn.Parameter(torch.randn(n_layers, p))
self.n_layers = n_layers
def forward(self, x):
for i in range(self.n_layers):
x = torch.tanh(x.mm(self.weights[i].t()) + self.biases[i])
return x
teacher = Network(n_layers)
# deactivate the parameters in teacher
for param in teacher.parameters():
param.requires_grad = False
init_weights = torch.randn(n_layers, p, p)
init_biases = torch.randn(n_layers, p)
method_names = ["Landing", "Retraction"]
methods = [LandingSGD, RiemannianSGD]
n_epochs = 10
learning_rate = 0.1
momentum = 0.9
batch_size = 10
batch_per_epoch = 10
test_size = 1000
plt.figure()
for method_name, method in zip(method_names, methods):
student_network = Network()
with torch.no_grad():
student_network.weights.data = init_weights.clone()
student_network.weights.proj_
student_network.biases.data = init_biases.clone()
optimizer_ortho = method(
(student_network.weights,), lr=learning_rate, momentum=momentum
)
optimizer_bias = optim.SGD(
(student_network.biases,), lr=learning_rate, momentum=momentum
)
test_losses = []
time_epochs = []
for epoch in range(n_epochs):
# train
t0 = time()
for batch in range(batch_per_epoch):
optimizer_ortho.zero_grad()
optimizer_bias.zero_grad()
x = torch.randn(batch_size, p)
target = teacher(x)
pred = student_network(x)
loss = torch.mean((target - pred) ** 2)
loss.backward()
optimizer_ortho.step()
optimizer_bias.step()
time_epochs.append(time() - t0)
# test
x_test = torch.randn(test_size, p)
target = teacher(x_test)
pred = student_network(x_test)
test_mse = torch.mean((target - pred) ** 2).item()
test_losses.append(test_mse)
print(
"Method %s, time for an epoch : %.1e sec, test MSE: %.2e"
% (method_name, time_epochs[-1], test_mse)
)
plt.semilogy(
torch.cumsum(torch.tensor(time_epochs), dim=0),
test_losses,
label=method_name,
)
plt.legend()
plt.xlabel("time")
plt.ylabel("test error")
plt.show()
| 2,899 | 26.102804 | 73 | py |
landing | landing-main/tests/test_landing.py | import pytest
import torch
import geoopt
from landing import LandingSGD
torch.manual_seed(1)
@pytest.mark.parametrize("momentum", [0, 0.5])
@pytest.mark.parametrize("shape", [(3, 3), (4, 3, 3), (5, 4, 3, 3)])
@pytest.mark.parametrize("safe_step", [0.3, False])
def test_forward(shape, momentum, safe_step):
param = geoopt.ManifoldParameter(
torch.randn(*shape), manifold=geoopt.Stiefel()
)
optimizer = LandingSGD(
(param,), lr=0.1, momentum=momentum, safe_step=safe_step
)
optimizer.zero_grad()
loss = (param**2).sum()
loss.backward()
optimizer.step()
@pytest.mark.parametrize("safe_step", [0.3, 0.1, 1e-2])
@pytest.mark.parametrize("lbda", [0.1, 1, 10])
@pytest.mark.parametrize("n_features", [2, 10])
def test_safe(safe_step, lbda, n_features, n_reps=10, n_iters=100, tol=1e-6):
p = n_features
shape = (p, p)
for _ in range(n_reps):
param = geoopt.ManifoldParameter(
torch.randn(*shape), manifold=geoopt.Stiefel()
)
# param.requires_grad = False
# param.proj_()
param.requires_grad = True
target = torch.randn(*shape)
# take large lr so that the safe step always triggers
optimizer = LandingSGD(
(param,), lr=1e5, safe_step=safe_step, lambda_regul=lbda
)
for n_iter in range(n_iters):
# print(param)
optimizer.zero_grad()
loss = (param * target).sum()
loss.backward()
optimizer.step()
# print(param)
orth_error = torch.norm(param.t().mm(param) - torch.eye(p))
assert orth_error < safe_step + tol
def test_convergence():
p = 3
param = geoopt.ManifoldParameter(
torch.eye(p) + 0.1 * torch.randn(p, p), manifold=geoopt.Stiefel()
)
optimizer = LandingSGD((param,), lr=0.1)
n_epochs = 100
# Trace maximization: should end up in identity
for _ in range(n_epochs):
optimizer.zero_grad()
loss = -torch.trace(param)
loss.backward()
optimizer.step()
assert loss.item() + p < 1e-5
orth_error = torch.norm(param.mm(param.t()) - torch.eye(p))
assert orth_error < 1e-5
| 2,207 | 29.246575 | 77 | py |
landing | landing-main/landing/optimizer.py | import torch
import torch.optim.optimizer
import geoopt
from geoopt.tensor import ManifoldParameter, ManifoldTensor
from geoopt.optim.mixin import OptimMixin
__all__ = ["LandingSGD"]
def _check_orthogonal(param):
if not hasattr(param, "manifold"):
raise TypeError("Parameter should be a geoopt parameter")
if not isinstance(
param.manifold, geoopt.manifolds.stiefel.CanonicalStiefel
) and not isinstance(
param.manifold, geoopt.manifolds.stiefel.EuclideanStiefel
):
raise TypeError("Parameters should be on the Stiefel manifold")
*_, p, q = param.shape
if p != q:
raise ValueError(
"The last two dimensions of the parameters should be the same. "
"Only square matrices are supported so far"
)
def _safe_step_size(d, g, lambda_regul, eps_d, reg=1e-7):
"""Compute the safe step size
Parameters
----------
d : float
The distance to the manifold
g : float
The norm of the landing update
lambda_regul : float
The regularisation parameter
eps_d : float
The tolerance: the maximal allowed distance to the manifold
Return
------
sol : float
The maximal step-size one can take
"""
beta = lambda_regul * d * (1 - d)
alpha = g**2
tmp1 = torch.maximum(alpha * (eps_d - d), torch.zeros(1))
sol = (beta + torch.sqrt(beta**2 + tmp1)) / (alpha + reg)
return torch.minimum(sol, 1.0 / (2.0 * lambda_regul) * torch.ones(1))
def _landing_direction(point, grad, lambda_regul, learning_rate, safe_step):
*_, p = point.shape
distance = torch.matmul(point, point.transpose(-1, -2)) - torch.eye(
p, device=point.device
)
landing_field = torch.matmul(grad + lambda_regul * distance, point)
if safe_step:
d = torch.norm(distance, dim=(-1, -2))
g = torch.norm(landing_field, dim=(-1, -2))
max_step = _safe_step_size(d, g, lambda_regul, safe_step)
# One step per orthogonal matrix
step_size_shape = list(point.shape)
step_size_shape[-1] = 1
step_size_shape[-2] = 1
step_size = torch.clip(max_step, max=learning_rate).view(
*step_size_shape
)
else:
step_size = learning_rate
return point - step_size * landing_field
class LandingSGD(OptimMixin, torch.optim.Optimizer):
r"""
Landing algorithm on the orthogonal manifold with the same API as
:class:`torch.optim.SGD`.
Parameters
----------
params : iterable
iterable of parameters to optimize or dicts defining
parameter groups. Must contain square orthogonal matrices.
lr : float
learning rate
momentum : float (optional)
momentum factor (default: 0)
weight_decay : float (optional)
weight decay (L2 penalty) (default: 0)
dampening : float (optional)
dampening for momentum (default: 0)
nesterov : bool (optional)
enables Nesterov momentum (default: False)
lambda_regul : float (optional)
the hyperparameter lambda that controls the tradeoff between
optimization in f and landing speed (default: 1.)
check_type : bool (optional)
whether to check that the parameters are all orthogonal matrices
Other Parameters
----------------
stabilize : int
Stabilize parameters if they are off-manifold due to numerical
reasons every ``stabilize`` steps (default: ``None`` -- no stabilize)
"""
def __init__(
self,
params,
lr,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
stabilize=None,
lambda_regul=1.0,
safe_step=0.5,
check_type=True,
):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay)
)
if lambda_regul < 0.0:
raise ValueError(
"Invalid lambda_regul value: {}".format(lambda_regul)
)
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
lambda_regul=lambda_regul,
safe_step=safe_step,
check_type=check_type,
)
for param in params:
with torch.no_grad():
param.proj_()
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError(
"Nesterov momentum requires a momentum and zero dampening"
)
super().__init__(params, defaults, stabilize=stabilize)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
with torch.no_grad():
for group in self.param_groups:
if "step" not in group:
group["step"] = 0
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
learning_rate = group["lr"]
lambda_regul = group["lambda_regul"]
safe_step = group["safe_step"]
check_type = group["check_type"]
group["step"] += 1
for point in group["params"]:
if check_type:
_check_orthogonal(point)
grad = point.grad
if grad is None:
continue
if grad.is_sparse:
raise RuntimeError(
"LandingSGD does not support sparse gradients"
)
state = self.state[point]
# State initialization
if len(state) == 0:
if momentum > 0:
state["momentum_buffer"] = grad.clone()
grad.add_(point, alpha=weight_decay)
grad = torch.matmul(
grad, point.transpose(-1, -2)
) # relative gradient
grad = grad - grad.transpose(-1, -2)
grad /= 2.0
if momentum > 0:
momentum_buffer = state["momentum_buffer"]
momentum_buffer.mul_(momentum).add_(
grad, alpha=1 - dampening
)
if nesterov:
grad = grad.add_(momentum_buffer, alpha=momentum)
else:
grad = momentum_buffer
# landing method
new_point = _landing_direction(
point, grad, lambda_regul, learning_rate, safe_step
)
# use copy only for user facing point
point.copy_(new_point)
else:
new_point = _landing_direction(
point, grad, lambda_regul, learning_rate, safe_step
)
point.copy_(new_point)
if (
group["stabilize"] is not None
and group["step"] % group["stabilize"] == 0
):
self.stabilize_group(group)
return loss
@torch.no_grad()
def stabilize_group(self, group):
for p in group["params"]:
if not isinstance(p, (ManifoldParameter, ManifoldTensor)):
continue
manifold = p.manifold
momentum = group["momentum"]
p.copy_(manifold.projx(p))
if momentum > 0:
param_state = self.state[p]
if not param_state: # due to None grads
continue
if "momentum_buffer" in param_state:
buf = param_state["momentum_buffer"]
buf.copy_(manifold.proju(p, buf))
| 8,343 | 34.65812 | 79 | py |
Montreal-Forced-Aligner | Montreal-Forced-Aligner-main/montreal_forced_aligner/diarization/multiprocessing.py | """Multiprocessing functionality for speaker diarization"""
from __future__ import annotations
import logging
import multiprocessing as mp
import os
import queue
import subprocess
import sys
import time
import typing
from pathlib import Path
import dataclassy
import hdbscan
import kneed
import librosa
import numpy as np
import sqlalchemy
from scipy.spatial import distance
from sklearn import cluster, manifold, metrics, neighbors, preprocessing
from sqlalchemy.orm import Session, joinedload
from montreal_forced_aligner.abc import KaldiFunction
from montreal_forced_aligner.config import GLOBAL_CONFIG, IVECTOR_DIMENSION, XVECTOR_DIMENSION
from montreal_forced_aligner.corpus.features import (
PldaModel,
classify_plda,
compute_classification_stats,
pairwise_plda_distance_matrix,
)
from montreal_forced_aligner.data import (
ClusterType,
DistanceMetric,
ManifoldAlgorithm,
MfaArguments,
)
from montreal_forced_aligner.db import File, Job, SoundFile, Speaker, Utterance
from montreal_forced_aligner.utils import Stopped, read_feats, thirdparty_binary
try:
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
torch_logger = logging.getLogger("speechbrain.utils.torch_audio_backend")
torch_logger.setLevel(logging.ERROR)
torch_logger = logging.getLogger("speechbrain.utils.train_logger")
torch_logger.setLevel(logging.ERROR)
import torch
from speechbrain.pretrained import EncoderClassifier, SpeakerRecognition
FOUND_SPEECHBRAIN = True
except (ImportError, OSError):
FOUND_SPEECHBRAIN = False
EncoderClassifier = None
SpeakerRecognition = None
__all__ = [
"PldaClassificationArguments",
"PldaClassificationFunction",
"ComputeEerArguments",
"ComputeEerFunction",
"SpeechbrainArguments",
"SpeechbrainClassificationFunction",
"SpeechbrainEmbeddingFunction",
"cluster_matrix",
"visualize_clusters",
]
logger = logging.getLogger("mfa")
# noinspection PyUnresolvedReferences
@dataclassy.dataclass(slots=True)
class PldaClassificationArguments(MfaArguments):
"""Arguments for :class:`~montreal_forced_aligner.diarization.multiprocessing.PldaClassificationFunction`"""
plda: PldaModel
train_ivector_path: Path
num_utts_path: Path
use_xvector: bool
# noinspection PyUnresolvedReferences
@dataclassy.dataclass(slots=True)
class ComputeEerArguments(MfaArguments):
"""Arguments for :class:`~montreal_forced_aligner.diarization.multiprocessing.ComputeEerFunction`"""
plda: PldaModel
metric: DistanceMetric
use_xvector: bool
limit_within_speaker: int
limit_per_speaker: int
# noinspection PyUnresolvedReferences
@dataclassy.dataclass(slots=True)
class SpeechbrainArguments(MfaArguments):
"""Arguments for :class:`~montreal_forced_aligner.diarization.multiprocessing.SpeechbrainClassificationFunction`"""
cuda: bool
cluster: bool
def visualize_clusters(
ivectors: np.ndarray,
manifold_algorithm: ManifoldAlgorithm,
metric_type: DistanceMetric,
n_neighbors: int = 10,
plda: typing.Optional[PldaModel] = None,
quick=False,
):
logger.debug(f"Generating 2D representation of ivectors with {manifold_algorithm.name}...")
begin = time.time()
to_fit = ivectors
metric = metric_type.name
tsne_angle = 0.5
tsne_iterations = 1000
mds_iterations = 300
if quick:
tsne_angle = 0.8
tsne_iterations = 500
mds_iterations = 150
if metric_type is DistanceMetric.plda:
logger.info("Generating precomputed distance matrix...")
to_fit = metrics.pairwise_distances(
ivectors, ivectors, metric=plda.distance, n_jobs=GLOBAL_CONFIG.current_profile.num_jobs
)
np.fill_diagonal(to_fit, 0)
metric = "precomputed"
if manifold_algorithm is ManifoldAlgorithm.mds:
if metric_type is DistanceMetric.cosine:
to_fit = preprocessing.normalize(ivectors, norm="l2")
metric = "euclidean"
points = manifold.MDS(
dissimilarity=metric,
random_state=0,
n_jobs=GLOBAL_CONFIG.current_profile.num_jobs,
max_iter=mds_iterations,
metric=False,
normalized_stress=True,
).fit_transform(to_fit)
elif manifold_algorithm is ManifoldAlgorithm.tsne:
points = manifold.TSNE(
metric=metric,
random_state=0,
perplexity=n_neighbors,
init="pca" if metric != "precomputed" else "random",
n_jobs=GLOBAL_CONFIG.current_profile.num_jobs,
angle=tsne_angle,
n_iter=tsne_iterations,
).fit_transform(to_fit)
elif manifold_algorithm is ManifoldAlgorithm.spectral:
points = manifold.SpectralEmbedding(
affinity="nearest_neighbors",
random_state=0,
n_neighbors=n_neighbors,
n_jobs=GLOBAL_CONFIG.current_profile.num_jobs,
).fit_transform(to_fit)
elif manifold_algorithm is ManifoldAlgorithm.isomap:
points = manifold.Isomap(
metric=metric, n_neighbors=n_neighbors, n_jobs=GLOBAL_CONFIG.current_profile.num_jobs
).fit_transform(to_fit)
else:
raise NotImplementedError
logger.debug(f"Generating 2D representation took {time.time() - begin:.3f} seconds")
return points
def calculate_distance_threshold(
metric: typing.Union[str, callable],
to_fit: np.ndarray,
min_samples: int = 5,
working_directory: str = None,
score_metric_params=None,
no_visuals: bool = False,
) -> float:
"""
Calculate a threshold for the given ivectors using a relative threshold
Parameters
----------
metric: str or callable
Metric to evaluate
to_fit: numpy.ndarray
Ivectors or distance matrix
relative_distance_threshold: float
Relative threshold from 0 to 1
Returns
-------
float
Absolute distance threshold
"""
logger.debug(f"Calculating distance threshold from {min_samples} nearest neighbors...")
nbrs = neighbors.NearestNeighbors(
n_neighbors=min_samples,
metric=metric,
metric_params=score_metric_params,
n_jobs=GLOBAL_CONFIG.current_profile.num_jobs,
).fit(to_fit)
distances, indices = nbrs.kneighbors(to_fit)
distances = distances[:, min_samples - 1]
distances = np.sort(distances, axis=0)
kneedle = kneed.KneeLocator(np.arange(distances.shape[0]), distances, curve="concave", S=5)
index = kneedle.elbow
threshold = distances[index]
min_distance = np.min(distances)
max_distance = np.max(distances)
logger.debug(
f"Distance threshold was set to {threshold} (range = {min_distance:.4f} - {max_distance:.4f})"
)
if GLOBAL_CONFIG.current_profile.debug and not no_visuals:
import seaborn as sns
from matplotlib import pyplot as plt
sns.set()
plt.plot(distances)
plt.xlabel("Index")
plt.ylabel("Distance to NN")
plt.axvline(index, c="k")
plt.text(
index, max_distance, "threshold", horizontalalignment="right", verticalalignment="top"
)
if working_directory is not None:
plot_path = os.path.join(working_directory, "nearest_neighbor_distances.png")
close_string = f"Closing k-distance plot, it has been saved to {plot_path}."
plt.savefig(plot_path, transparent=True)
else:
close_string = "Closing k-distance plot."
if GLOBAL_CONFIG.current_profile.verbose:
plt.show(block=False)
plt.pause(10)
logger.debug(close_string)
plt.close()
return float(threshold)
def cluster_matrix(
ivectors: np.ndarray,
cluster_type: ClusterType,
metric: DistanceMetric = DistanceMetric.euclidean,
strict=True,
no_visuals=False,
working_directory=None,
**kwargs,
) -> np.ndarray:
"""
Wrapper function for sklearn's clustering methods
Parameters
----------
ivectors: numpy.ndarray
Ivectors to cluster
cluster_type: :class:`~montreal_forced_aligner.data.ClusterType`
Clustering algorithm
metric: :class:`~montreal_forced_aligner.data.DistanceMetric`
Distance metric to use in clustering
strict: bool
Flag for whether to raise exceptions when only one cluster is found
kwargs
Extra keyword arguments to pass to sklearn cluster classes
Returns
-------
numpy.ndarray
Cluster labels for each utterance
"""
from montreal_forced_aligner.config import GLOBAL_CONFIG
logger.debug(f"Running {cluster_type}...")
if sys.platform == "win32" and cluster_type is ClusterType.kmeans:
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
else:
os.environ["OMP_NUM_THREADS"] = f"{GLOBAL_CONFIG.current_profile.num_jobs}"
os.environ["OPENBLAS_NUM_THREADS"] = f"{GLOBAL_CONFIG.current_profile.num_jobs}"
os.environ["MKL_NUM_THREADS"] = f"{GLOBAL_CONFIG.current_profile.num_jobs}"
distance_threshold = kwargs.pop("distance_threshold", None)
plda: PldaModel = kwargs.pop("plda", None)
min_cluster_size = kwargs.pop("min_cluster_size", 15)
score_metric = metric.value
if score_metric == "plda":
score_metric = plda
to_fit = ivectors
score_metric_params = None
if score_metric == "plda" and cluster_type is not ClusterType.affinity:
logger.debug("Generating precomputed distance matrix...")
begin = time.time()
to_fit = to_fit.astype("float64")
psi = plda.psi.astype("float64")
to_fit = pairwise_plda_distance_matrix(to_fit, psi)
logger.debug(f"Precomputed distance matrix took {time.time() - begin:.3f} seconds")
score_metric = "precomputed"
if cluster_type is ClusterType.affinity:
affinity = metric
if metric is DistanceMetric.cosine:
to_fit = preprocessing.normalize(to_fit, norm="l2")
score_metric = "euclidean"
affinity = "euclidean"
elif metric is DistanceMetric.plda:
logger.debug("Generating precomputed distance matrix...")
to_fit = metrics.pairwise_distances(
to_fit,
to_fit,
metric=plda.log_likelihood,
n_jobs=GLOBAL_CONFIG.current_profile.num_jobs,
)
score_metric = "precomputed"
affinity = "precomputed"
c_labels = cluster.AffinityPropagation(
affinity=affinity,
copy=False,
random_state=GLOBAL_CONFIG.current_profile.seed,
verbose=GLOBAL_CONFIG.current_profile.verbose,
**kwargs,
).fit_predict(to_fit)
elif cluster_type is ClusterType.agglomerative:
if metric is DistanceMetric.cosine:
to_fit = preprocessing.normalize(to_fit, norm="l2")
score_metric = "euclidean"
if not kwargs["n_clusters"]:
if distance_threshold is not None:
eps = distance_threshold
else:
eps = calculate_distance_threshold(
score_metric,
to_fit,
min_cluster_size,
working_directory,
score_metric_params=score_metric_params,
no_visuals=no_visuals,
)
kwargs["distance_threshold"] = eps
c_labels = cluster.AgglomerativeClustering(metric=score_metric, **kwargs).fit_predict(
to_fit
)
elif cluster_type is ClusterType.spectral:
affinity = "nearest_neighbors"
if metric is DistanceMetric.cosine:
to_fit = preprocessing.normalize(to_fit, norm="l2")
score_metric = "euclidean"
elif metric is DistanceMetric.plda:
logger.info("Generating precomputed distance matrix...")
affinity = "precomputed_nearest_neighbors"
to_fit = metrics.pairwise_distances(
to_fit, to_fit, metric=score_metric, n_jobs=GLOBAL_CONFIG.current_profile.num_jobs
)
np.fill_diagonal(to_fit, 0)
score_metric = "precomputed"
c_labels = cluster.SpectralClustering(
affinity=affinity,
n_jobs=GLOBAL_CONFIG.current_profile.num_jobs,
random_state=GLOBAL_CONFIG.current_profile.seed,
verbose=GLOBAL_CONFIG.current_profile.verbose,
**kwargs,
).fit_predict(to_fit)
elif cluster_type is ClusterType.dbscan:
if distance_threshold is not None:
eps = distance_threshold
else:
eps = calculate_distance_threshold(
score_metric,
to_fit,
min_cluster_size,
working_directory,
score_metric_params=score_metric_params,
no_visuals=no_visuals,
)
c_labels = cluster.DBSCAN(
min_samples=min_cluster_size,
metric=score_metric,
eps=eps,
n_jobs=GLOBAL_CONFIG.current_profile.num_jobs,
**kwargs,
).fit_predict(to_fit)
elif cluster_type is ClusterType.meanshift:
if score_metric == "cosine":
to_fit = preprocessing.normalize(to_fit, norm="l2")
score_metric = "euclidean"
c_labels = cluster.MeanShift(
n_jobs=GLOBAL_CONFIG.current_profile.num_jobs, **kwargs
).fit_predict(to_fit)
elif cluster_type is ClusterType.hdbscan:
if score_metric == "cosine":
to_fit = preprocessing.normalize(to_fit, norm="l2")
score_metric = "euclidean"
min_samples = max(5, int(min_cluster_size / 4))
if distance_threshold is not None:
eps = distance_threshold
else:
eps = calculate_distance_threshold(
score_metric,
to_fit,
min_cluster_size,
working_directory,
score_metric_params=score_metric_params,
no_visuals=no_visuals,
)
if score_metric == "precomputed" or metric is DistanceMetric.plda:
algorithm = "best"
else:
algorithm = "boruvka_balltree"
c_labels = hdbscan.HDBSCAN(
min_samples=min_samples,
min_cluster_size=min_cluster_size,
cluster_selection_epsilon=eps,
metric=score_metric,
algorithm=algorithm,
core_dist_n_jobs=GLOBAL_CONFIG.current_profile.num_jobs,
**kwargs,
).fit_predict(to_fit)
elif cluster_type is ClusterType.optics:
if distance_threshold is not None:
eps = distance_threshold
else:
eps = calculate_distance_threshold(
score_metric,
to_fit,
min_cluster_size,
working_directory,
score_metric_params=score_metric_params,
no_visuals=no_visuals,
)
c_labels = cluster.OPTICS(
min_samples=min_cluster_size,
max_eps=eps,
metric=score_metric,
n_jobs=GLOBAL_CONFIG.current_profile.num_jobs,
**kwargs,
).fit_predict(to_fit)
elif cluster_type is ClusterType.kmeans:
if score_metric == "cosine":
to_fit = preprocessing.normalize(to_fit, norm="l2")
score_metric = "euclidean"
c_labels = cluster.MiniBatchKMeans(
verbose=GLOBAL_CONFIG.current_profile.verbose, n_init="auto", **kwargs
).fit_predict(to_fit)
else:
raise NotImplementedError(f"The cluster type '{cluster_type}' is not supported.")
num_clusters = np.unique(c_labels).shape[0]
logger.debug(f"Found {num_clusters} clusters")
try:
if score_metric == "plda":
score_metric = plda.distance
elif score_metric == "precomputed":
if cluster_type is ClusterType.affinity:
to_fit = np.max(to_fit) - to_fit
np.fill_diagonal(to_fit, 0)
score = metrics.silhouette_score(to_fit, c_labels, metric=score_metric)
logger.debug(f"Silhouette score (-1-1): {score}")
except ValueError:
if num_clusters == 1:
logger.warning(
"Only found one cluster, please adjust cluster parameters to generate more clusters."
)
if strict:
raise
os.environ["OMP_NUM_THREADS"] = f"{GLOBAL_CONFIG.current_profile.blas_num_threads}"
os.environ["OPENBLAS_NUM_THREADS"] = f"{GLOBAL_CONFIG.current_profile.blas_num_threads}"
os.environ["MKL_NUM_THREADS"] = f"{GLOBAL_CONFIG.current_profile.blas_num_threads}"
return c_labels
class PldaClassificationFunction(KaldiFunction):
"""
Multiprocessing function to compute voice activity detection
See Also
--------
:meth:`.AcousticCorpusMixin.compute_vad`
Main function that calls this function in parallel
:meth:`.AcousticCorpusMixin.compute_vad_arguments`
Job method for generating arguments for this function
:kaldi_src:`compute-vad`
Relevant Kaldi binary
Parameters
----------
args: :class:`~montreal_forced_aligner.corpus.features.VadArguments`
Arguments for the function
"""
def __init__(self, args: PldaClassificationArguments):
super().__init__(args)
self.plda = args.plda
self.train_ivector_path = args.train_ivector_path
self.num_utts_path = args.num_utts_path
self.use_xvector = args.use_xvector
def _run(self) -> typing.Generator[typing.Tuple[int, int, int]]:
"""Run the function"""
utterance_counts = {}
with open(self.num_utts_path) as f:
for line in f:
speaker, utt_count = line.strip().split()
utt_count = int(utt_count)
utterance_counts[int(speaker)] = utt_count
input_proc = subprocess.Popen(
[
thirdparty_binary("ivector-subtract-global-mean"),
f"ark:{self.train_ivector_path}",
"ark,t:-",
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
env=os.environ,
)
speaker_ids = []
speaker_counts = []
if self.use_xvector:
dim = XVECTOR_DIMENSION
else:
dim = IVECTOR_DIMENSION
speaker_ivectors = np.empty((len(utterance_counts), dim))
for speaker_id, ivector in read_feats(input_proc, raw_id=True):
speaker_id = int(speaker_id)
if speaker_id not in utterance_counts:
continue
speaker_ivectors[len(speaker_ids), :] = ivector
speaker_ids.append(speaker_id)
speaker_counts.append(utterance_counts[speaker_id])
speaker_counts = np.array(speaker_counts)
speaker_ivectors = speaker_ivectors.astype("float64")
self.plda.psi = self.plda.psi.astype("float64")
speaker_ivectors = self.plda.process_ivectors(speaker_ivectors, counts=speaker_counts)
classification_args = compute_classification_stats(
speaker_ivectors, self.plda.psi, counts=speaker_counts
)
lines = []
for line in input_proc.stdout:
lines.append(line)
input_proc.wait()
with Session(self.db_engine()) as session:
job: Job = (
session.query(Job)
.options(joinedload(Job.corpus, innerjoin=True))
.filter(Job.id == self.job_name)
.first()
)
utterances = (
session.query(Utterance.id, Utterance.plda_vector)
.filter(Utterance.plda_vector != None) # noqa
.filter(Utterance.job_id == job.id)
.order_by(Utterance.kaldi_id)
)
for u_id, u_ivector in utterances:
ind, score = classify_plda(u_ivector.astype("float64"), *classification_args)
speaker = speaker_ids[ind]
yield u_id, speaker, score
class ComputeEerFunction(KaldiFunction):
"""
Multiprocessing function to compute voice activity detection
See Also
--------
:meth:`.AcousticCorpusMixin.compute_vad`
Main function that calls this function in parallel
:meth:`.AcousticCorpusMixin.compute_vad_arguments`
Job method for generating arguments for this function
:kaldi_src:`compute-vad`
Relevant Kaldi binary
Parameters
----------
args: :class:`~montreal_forced_aligner.corpus.features.VadArguments`
Arguments for the function
"""
def __init__(self, args: ComputeEerArguments):
super().__init__(args)
self.plda = args.plda
self.metric = args.metric
self.use_xvector = args.use_xvector
self.limit_within_speaker = args.limit_within_speaker
self.limit_per_speaker = args.limit_per_speaker
# noinspection PyTypeChecker
def _run(self) -> typing.Generator[typing.Tuple[int, int, int]]:
"""Run the function"""
if self.use_xvector:
columns = [Utterance.id, Utterance.speaker_id, Utterance.xvector]
filter = Utterance.xvector != None # noqa
else:
columns = [Utterance.id, Utterance.speaker_id, Utterance.plda_vector]
filter = Utterance.plda_vector != None # noqa
with Session(self.db_engine()) as session:
speakers = (
session.query(Speaker.id)
.join(Speaker.utterances)
.filter(Utterance.job_id == self.job_name)
.order_by(Speaker.id)
.distinct(Speaker.id)
)
for (s_id,) in speakers:
match_scores = []
mismatch_scores = []
random_within_speaker = (
session.query(*columns)
.filter(filter, Utterance.speaker_id == s_id)
.order_by(sqlalchemy.func.random())
.limit(self.limit_within_speaker)
)
for u_id, s_id, u_ivector in random_within_speaker:
comp_query = (
session.query(columns[2])
.filter(filter, Utterance.speaker_id == s_id, Utterance.id != u_id)
.order_by(sqlalchemy.func.random())
.limit(self.limit_within_speaker)
)
for (u2_ivector,) in comp_query:
if self.metric is DistanceMetric.plda:
score = self.plda.distance(u_ivector, u2_ivector)
elif self.metric is DistanceMetric.cosine:
score = distance.cosine(u_ivector, u2_ivector)
else:
score = distance.euclidean(u_ivector, u2_ivector)
match_scores.append(score)
other_speakers = session.query(Speaker.id).filter(Speaker.id != s_id)
for (o_s_id,) in other_speakers:
random_out_speaker = (
session.query(columns[2])
.filter(filter, Utterance.speaker_id == s_id)
.order_by(sqlalchemy.func.random())
.limit(self.limit_per_speaker)
)
for (u_ivector,) in random_out_speaker:
comp_query = (
session.query(columns[2])
.filter(filter, Utterance.speaker_id == o_s_id)
.order_by(sqlalchemy.func.random())
.limit(self.limit_per_speaker)
)
for (u2_ivector,) in comp_query:
if self.metric is DistanceMetric.plda:
score = self.plda.distance(u_ivector, u2_ivector)
elif self.metric is DistanceMetric.cosine:
score = distance.cosine(u_ivector, u2_ivector)
else:
score = distance.euclidean(u_ivector, u2_ivector)
mismatch_scores.append(score)
yield match_scores, mismatch_scores
class SpeechbrainClassificationFunction(KaldiFunction):
"""
Multiprocessing function to classify speakers based on a speechbrain model
Parameters
----------
args: :class:`~montreal_forced_aligner.diarization.multiprocessing.SpeechbrainArguments`
Arguments for the function
"""
def __init__(self, args: SpeechbrainArguments):
super().__init__(args)
self.cuda = args.cuda
self.cluster = args.cluster
def _run(self) -> typing.Generator[typing.Tuple[int, int, int]]:
"""Run the function"""
run_opts = None
if self.cuda:
run_opts = {"device": "cuda"}
model = EncoderClassifier.from_hparams(
source="speechbrain/spkrec-ecapa-voxceleb",
savedir=os.path.join(
GLOBAL_CONFIG.current_profile.temporary_directory,
"models",
"SpeakerRecognition",
),
run_opts=run_opts,
)
device = torch.device("cuda" if self.cuda else "cpu")
with Session(self.db_engine()) as session:
job: Job = (
session.query(Job)
.options(joinedload(Job.corpus, innerjoin=True))
.filter(Job.id == self.job_name)
.first()
)
utterances = session.query(Utterance.id, Utterance.xvector).filter(
Utterance.xvector != None, Utterance.job_id == job.id # noqa
)
for u_id, ivector in utterances:
ivector = torch.tensor(ivector, device=device).unsqueeze(0).unsqueeze(0)
out_prob = model.mods.classifier(ivector).squeeze(1)
score, index = torch.max(out_prob, dim=-1)
text_lab = model.hparams.label_encoder.decode_torch(index)
new_speaker = text_lab[0]
del out_prob
del index
yield u_id, new_speaker, float(score.cpu().numpy())
del text_lab
del new_speaker
del score
if self.cuda:
torch.cuda.empty_cache()
del model
if self.cuda:
torch.cuda.empty_cache()
class SpeechbrainEmbeddingFunction(KaldiFunction):
"""
Multiprocessing function to generating xvector embeddings from a speechbrain model
Parameters
----------
args: :class:`~montreal_forced_aligner.diarization.multiprocessing.SpeechbrainArguments`
Arguments for the function
"""
def __init__(self, args: SpeechbrainArguments):
super().__init__(args)
self.cuda = args.cuda
self.cluster = args.cluster
def _run(self) -> typing.Generator[typing.Tuple[int, int, int]]:
"""Run the function"""
run_opts = None
if self.cuda:
run_opts = {"device": "cuda"}
if self.cluster:
model_class = SpeakerRecognition
else:
model_class = EncoderClassifier
model = model_class.from_hparams(
source="speechbrain/spkrec-ecapa-voxceleb",
savedir=os.path.join(
GLOBAL_CONFIG.current_profile.temporary_directory,
"models",
"SpeakerRecognition",
),
run_opts=run_opts,
)
return_q = mp.Queue(2)
finished_adding = Stopped()
stopped = Stopped()
loader = UtteranceFileLoader(
self.job_name, self.db_string, return_q, stopped, finished_adding
)
loader.start()
exception = None
device = torch.device("cuda" if self.cuda else "cpu")
while True:
try:
result = return_q.get(timeout=1)
except queue.Empty:
if finished_adding.stop_check():
break
continue
if stopped.stop_check():
continue
if isinstance(result, Exception):
stopped.stop()
continue
u_id, y = result
emb = (
model.encode_batch(
torch.tensor(y[np.newaxis, :], device=device), normalize=self.cluster
)
.cpu()
.numpy()
.squeeze(axis=1)
)
yield u_id, emb[0]
del emb
if self.cuda:
torch.cuda.empty_cache()
loader.join()
if exception:
raise Exception
class UtteranceFileLoader(mp.Process):
"""
Helper process for loading utterance waveforms in parallel with embedding extraction
Parameters
----------
job_name: int
Job identifier
db_string: str
Connection string for database
return_q: multiprocessing.Queue
Queue to put waveforms
stopped: :class:`~montreal_forced_aligner.utils.Stopped`
Check for whether the process to exit gracefully
finished_adding: :class:`~montreal_forced_aligner.utils.Stopped`
Check for whether the worker has processed all utterances
"""
def __init__(
self,
job_name: int,
db_string: str,
return_q: mp.Queue,
stopped: Stopped,
finished_adding: Stopped,
):
super().__init__()
self.job_name = job_name
self.db_string = db_string
self.return_q = return_q
self.stopped = stopped
self.finished_adding = finished_adding
def run(self) -> None:
"""
Run the waveform loading job
"""
db_engine = sqlalchemy.create_engine(
self.db_string,
poolclass=sqlalchemy.NullPool,
pool_reset_on_return=None,
isolation_level="AUTOCOMMIT",
logging_name=f"{type(self).__name__}_engine",
).execution_options(logging_token=f"{type(self).__name__}_engine")
with Session(db_engine) as session:
try:
utterances = (
session.query(
Utterance.id,
Utterance.begin,
Utterance.duration,
SoundFile.sound_file_path,
)
.join(Utterance.file)
.join(File.sound_file)
.filter(Utterance.job_id == self.job_name)
)
for u_id, begin, duration, sound_file_path in utterances:
if self.stopped.stop_check():
break
y, _ = librosa.load(
sound_file_path,
sr=16000,
mono=False,
offset=begin,
duration=duration,
)
self.return_q.put((u_id, y))
except Exception as e:
self.return_q.put(e)
finally:
self.finished_adding.stop()
| 31,665 | 35.650463 | 119 | py |
Montreal-Forced-Aligner | Montreal-Forced-Aligner-main/montreal_forced_aligner/diarization/speaker_diarizer.py | """
Speaker classification
======================
"""
from __future__ import annotations
import collections
import csv
import logging
import os
import pickle
import random
import shutil
import subprocess
import sys
import time
import typing
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, Optional
import numpy as np
import sqlalchemy
import yaml
from sklearn import decomposition, metrics
from sqlalchemy.orm import joinedload, selectinload
from tqdm.rich import tqdm
from montreal_forced_aligner.abc import FileExporterMixin, TopLevelMfaWorker
from montreal_forced_aligner.alignment.multiprocessing import construct_output_path
from montreal_forced_aligner.config import (
GLOBAL_CONFIG,
IVECTOR_DIMENSION,
MEMORY,
PLDA_DIMENSION,
XVECTOR_DIMENSION,
)
from montreal_forced_aligner.corpus.features import (
ExportIvectorsArguments,
ExportIvectorsFunction,
PldaModel,
)
from montreal_forced_aligner.corpus.ivector_corpus import IvectorCorpusMixin
from montreal_forced_aligner.data import (
ClusterType,
DistanceMetric,
ManifoldAlgorithm,
WorkflowType,
)
from montreal_forced_aligner.db import (
Corpus,
File,
SoundFile,
Speaker,
SpeakerOrdering,
TextFile,
Utterance,
bulk_update,
)
from montreal_forced_aligner.diarization.multiprocessing import (
ComputeEerArguments,
ComputeEerFunction,
PldaClassificationArguments,
PldaClassificationFunction,
SpeechbrainArguments,
SpeechbrainClassificationFunction,
SpeechbrainEmbeddingFunction,
cluster_matrix,
visualize_clusters,
)
from montreal_forced_aligner.exceptions import KaldiProcessingError
from montreal_forced_aligner.helper import load_configuration, mfa_open
from montreal_forced_aligner.models import IvectorExtractorModel
from montreal_forced_aligner.textgrid import export_textgrid
from montreal_forced_aligner.utils import log_kaldi_errors, run_kaldi_function, thirdparty_binary
try:
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
torch_logger = logging.getLogger("speechbrain.utils.torch_audio_backend")
torch_logger.setLevel(logging.ERROR)
torch_logger = logging.getLogger("speechbrain.utils.train_logger")
torch_logger.setLevel(logging.ERROR)
import torch
from speechbrain.pretrained import EncoderClassifier, SpeakerRecognition
from speechbrain.utils.metric_stats import EER
FOUND_SPEECHBRAIN = True
except (ImportError, OSError):
FOUND_SPEECHBRAIN = False
EncoderClassifier = None
if TYPE_CHECKING:
from montreal_forced_aligner.abc import MetaDict
__all__ = ["SpeakerDiarizer"]
logger = logging.getLogger("mfa")
class SpeakerDiarizer(IvectorCorpusMixin, TopLevelMfaWorker, FileExporterMixin):
"""
Class for performing speaker classification, not currently very functional, but
is planned to be expanded in the future
Parameters
----------
ivector_extractor_path : str
Path to ivector extractor model, or "speechbrain"
expected_num_speakers: int, optional
Number of speakers in the corpus, if known
cluster: bool
Flag for whether speakers should be clustered instead of classified
evaluation_mode: bool
Flag for evaluating against existing speaker labels
cuda: bool
Flag for using CUDA for speechbrain models
metric: str or :class:`~montreal_forced_aligner.data.DistanceMetric`
One of "cosine", "plda", or "euclidean"
cluster_type: str or :class:`~montreal_forced_aligner.data.ClusterType`
Clustering algorithm
relative_distance_threshold: float
Threshold to use clustering based on distance
"""
def __init__(
self,
ivector_extractor_path: typing.Union[str, Path] = "speechbrain",
expected_num_speakers: int = 0,
cluster: bool = True,
evaluation_mode: bool = False,
cuda: bool = False,
use_pca: bool = True,
metric: typing.Union[str, DistanceMetric] = "cosine",
cluster_type: typing.Union[str, ClusterType] = "hdbscan",
manifold_algorithm: typing.Union[str, ManifoldAlgorithm] = "tsne",
distance_threshold: float = None,
score_threshold: float = None,
min_cluster_size: int = 60,
max_iterations: int = 10,
linkage: str = "average",
**kwargs,
):
self.use_xvector = False
self.ivector_extractor = None
self.ivector_extractor_path = ivector_extractor_path
if ivector_extractor_path == "speechbrain":
if not FOUND_SPEECHBRAIN:
logger.error(
"Could not import speechbrain, please ensure it is installed via `pip install speechbrain`"
)
sys.exit(1)
self.use_xvector = True
else:
self.ivector_extractor = IvectorExtractorModel(ivector_extractor_path)
kwargs.update(self.ivector_extractor.parameters)
super().__init__(**kwargs)
self.expected_num_speakers = expected_num_speakers
self.cluster = cluster
self.metric = DistanceMetric[metric]
self.cuda = cuda
self.cluster_type = ClusterType[cluster_type]
self.manifold_algorithm = ManifoldAlgorithm[manifold_algorithm]
self.distance_threshold = distance_threshold
self.score_threshold = score_threshold
if self.distance_threshold is None:
if self.use_xvector:
self.distance_threshold = 0.25
self.evaluation_mode = evaluation_mode
self.min_cluster_size = min_cluster_size
self.linkage = linkage
self.use_pca = use_pca
self.max_iterations = max_iterations
self.current_labels = []
self.classification_score = None
self.initial_plda_score_threshold = 0
self.plda_score_threshold = 10
self.initial_sb_score_threshold = 0.75
self.ground_truth_utt2spk = {}
self.ground_truth_speakers = {}
self.single_clusters = set()
self._unknown_speaker_break_up_count = 0
@classmethod
def parse_parameters(
cls,
config_path: Optional[Path] = None,
args: Optional[Dict[str, Any]] = None,
unknown_args: Optional[List[str]] = None,
) -> MetaDict:
"""
Parse parameters for speaker classification from a config path or command-line arguments
Parameters
----------
config_path: :class:`~pathlib.Path`
Config path
args: dict[str, Any]
Parsed arguments
unknown_args: list[str]
Optional list of arguments that were not parsed
Returns
-------
dict[str, Any]
Configuration parameters
"""
global_params = {}
if config_path and os.path.exists(config_path):
data = load_configuration(config_path)
for k, v in data.items():
if k == "features":
if "type" in v:
v["feature_type"] = v["type"]
del v["type"]
global_params.update(v)
else:
if v is None and k in cls.nullable_fields:
v = []
global_params[k] = v
global_params.update(cls.parse_args(args, unknown_args))
return global_params
# noinspection PyTypeChecker
def setup(self) -> None:
"""
Sets up the corpus and speaker classifier
Raises
------
:class:`~montreal_forced_aligner.exceptions.KaldiProcessingError`
If there were any errors in running Kaldi binaries
"""
if self.initialized:
return
super().setup()
self.create_new_current_workflow(WorkflowType.speaker_diarization)
wf = self.current_workflow
if wf.done:
logger.info("Diarization already done, skipping initialization.")
return
log_dir = self.working_directory.joinpath("log")
os.makedirs(log_dir, exist_ok=True)
try:
if self.ivector_extractor is None: # Download models if needed
_ = EncoderClassifier.from_hparams(
source="speechbrain/spkrec-ecapa-voxceleb",
savedir=os.path.join(
GLOBAL_CONFIG.current_profile.temporary_directory,
"models",
"EncoderClassifier",
),
)
_ = SpeakerRecognition.from_hparams(
source="speechbrain/spkrec-ecapa-voxceleb",
savedir=os.path.join(
GLOBAL_CONFIG.current_profile.temporary_directory,
"models",
"SpeakerRecognition",
),
)
self.initialize_database()
self._load_corpus()
self.initialize_jobs()
self.load_embeddings()
if self.cluster:
self.compute_speaker_embeddings()
else:
if not self.has_ivectors():
if self.ivector_extractor.meta["version"] < "2.1":
logger.warning(
"The ivector extractor was trained in an earlier version of MFA. "
"There may be incompatibilities in feature generation that cause errors. "
"Please download the latest version of the model via `mfa model download`, "
"use a different ivector extractor, or use version 2.0.6 of MFA."
)
self.ivector_extractor.export_model(self.working_directory)
self.load_corpus()
self.extract_ivectors()
self.compute_speaker_ivectors()
if self.evaluation_mode:
self.ground_truth_utt2spk = {}
with self.session() as session:
query = session.query(Utterance.id, Utterance.speaker_id, Speaker.name).join(
Utterance.speaker
)
for u_id, s_id, name in query:
self.ground_truth_utt2spk[u_id] = s_id
self.ground_truth_speakers[s_id] = name
except Exception as e:
if isinstance(e, KaldiProcessingError):
log_kaldi_errors(e.error_logs)
e.update_log_file()
raise
self.initialized = True
def plda_classification_arguments(self) -> List[PldaClassificationArguments]:
"""
Generate Job arguments for :class:`~montreal_forced_aligner.diarization.multiprocessing.PldaClassificationFunction`
Returns
-------
list[:class:`~montreal_forced_aligner.diarization.multiprocessing.PldaClassificationArguments`]
Arguments for processing
"""
return [
PldaClassificationArguments(
j.id,
getattr(self, "db_string", ""),
self.working_log_directory.joinpath(f"plda_classification.{j.id}.log"),
self.plda,
self.speaker_ivector_path,
self.num_utts_path,
self.use_xvector,
)
for j in self.jobs
]
def classify_speakers(self):
"""Classify speakers based on ivector or speechbrain model"""
self.setup()
logger.info("Classifying utterances...")
with self.session() as session, tqdm(
total=self.num_utterances, disable=GLOBAL_CONFIG.quiet
) as pbar, mfa_open(
self.working_directory.joinpath("speaker_classification_results.csv"), "w"
) as f:
writer = csv.DictWriter(f, ["utt_id", "file", "begin", "end", "speaker", "score"])
writer.writeheader()
file_names = {
k: v for k, v in session.query(Utterance.id, File.name).join(Utterance.file)
}
utterance_times = {
k: (b, e)
for k, b, e in session.query(Utterance.id, Utterance.begin, Utterance.end)
}
utterance_mapping = []
next_speaker_id = self.get_next_primary_key(Speaker)
speaker_mapping = {}
existing_speakers = {
name: s_id for s_id, name in session.query(Speaker.id, Speaker.name)
}
self.classification_score = 0
if session.query(Speaker).filter(Speaker.name == "MFA_UNKNOWN").first() is None:
session.add(Speaker(id=next_speaker_id, name="MFA_UNKNOWN"))
session.commit()
next_speaker_id += 1
unknown_speaker_id = (
session.query(Speaker).filter(Speaker.name == "MFA_UNKNOWN").first().id
)
if self.use_xvector:
arguments = [
SpeechbrainArguments(j.id, self.db_string, None, self.cuda, self.cluster)
for j in self.jobs
]
func = SpeechbrainClassificationFunction
else:
plda_transform_path = self.working_directory.joinpath("plda.pkl")
with open(plda_transform_path, "rb") as f:
self.plda: PldaModel = pickle.load(f)
arguments = self.plda_classification_arguments()
func = PldaClassificationFunction
for utt_id, classified_speaker, score in run_kaldi_function(
func, arguments, pbar.update
):
classified_speaker = str(classified_speaker)
self.classification_score += score / self.num_utterances
if self.score_threshold is not None and score < self.score_threshold:
speaker_id = unknown_speaker_id
elif classified_speaker in existing_speakers:
speaker_id = existing_speakers[classified_speaker]
else:
if classified_speaker not in speaker_mapping:
speaker_mapping[classified_speaker] = {
"id": next_speaker_id,
"name": classified_speaker,
}
next_speaker_id += 1
speaker_id = speaker_mapping[classified_speaker]["id"]
utterance_mapping.append({"id": utt_id, "speaker_id": speaker_id})
line = {
"utt_id": utt_id,
"file": file_names[utt_id],
"begin": utterance_times[utt_id][0],
"end": utterance_times[utt_id][1],
"speaker": classified_speaker,
"score": score,
}
writer.writerow(line)
if self.stopped.stop_check():
logger.debug("Stopping clustering early.")
return
if speaker_mapping:
session.bulk_insert_mappings(Speaker, list(speaker_mapping.values()))
session.flush()
session.commit()
bulk_update(session, Utterance, utterance_mapping)
session.commit()
if not self.evaluation_mode:
self.clean_up_unknown_speaker()
self.fix_speaker_ordering()
if not self.evaluation_mode:
self.cleanup_empty_speakers()
self.refresh_speaker_vectors()
if self.evaluation_mode:
self.evaluate_classification()
def map_speakers_to_ground_truth(self):
with self.session() as session:
utterances = session.query(Utterance.id, Utterance.speaker_id)
labels = []
utterance_ids = []
for utt_id, s_id in utterances:
utterance_ids.append(utt_id)
labels.append(s_id)
ground_truth = np.array([self.ground_truth_utt2spk[x] for x in utterance_ids])
cluster_labels = np.unique(labels)
ground_truth_labels = np.unique(ground_truth)
cm = np.zeros((cluster_labels.shape[0], ground_truth_labels.shape[0]), dtype="int16")
for y_pred, y in zip(labels, ground_truth):
if y_pred < 0:
continue
cm[np.where(cluster_labels == y_pred), np.where(ground_truth_labels == y)] += 1
cm_argmax = cm.argmax(axis=1)
label_to_ground_truth_mapping = {}
for i in range(cluster_labels.shape[0]):
label_to_ground_truth_mapping[int(cluster_labels[i])] = int(
ground_truth_labels[cm_argmax[i]]
)
return label_to_ground_truth_mapping
def evaluate_clustering(self) -> None:
"""Compute clustering metric scores and output clustering evaluation results"""
label_to_ground_truth_mapping = self.map_speakers_to_ground_truth()
with self.session() as session, mfa_open(
self.working_directory.joinpath("diarization_evaluation_results.csv"), "w"
) as f:
writer = csv.DictWriter(
f,
fieldnames=[
"file",
"begin",
"end",
"text",
"predicted_speaker",
"ground_truth_speaker",
],
)
writer.writeheader()
predicted_utt2spk = {}
query = session.query(
Utterance.id,
File.name,
Utterance.begin,
Utterance.end,
Utterance.text,
Utterance.speaker_id,
).join(Utterance.file)
for u_id, file_name, begin, end, text, s_id in query:
s_id = label_to_ground_truth_mapping[s_id]
predicted_utt2spk[u_id] = s_id
writer.writerow(
{
"file": file_name,
"begin": begin,
"end": end,
"text": text,
"predicted_speaker": self.ground_truth_speakers[s_id],
"ground_truth_speaker": self.ground_truth_speakers[
self.ground_truth_utt2spk[u_id]
],
}
)
ground_truth_labels = np.array([v for v in self.ground_truth_utt2spk.values()])
predicted_labels = np.array(
[predicted_utt2spk[k] for k in self.ground_truth_utt2spk.keys()]
)
rand_score = metrics.adjusted_rand_score(ground_truth_labels, predicted_labels)
ami_score = metrics.adjusted_mutual_info_score(ground_truth_labels, predicted_labels)
nmi_score = metrics.normalized_mutual_info_score(ground_truth_labels, predicted_labels)
homogeneity_score = metrics.homogeneity_score(ground_truth_labels, predicted_labels)
completeness_score = metrics.completeness_score(ground_truth_labels, predicted_labels)
v_measure_score = metrics.v_measure_score(ground_truth_labels, predicted_labels)
fm_score = metrics.fowlkes_mallows_score(ground_truth_labels, predicted_labels)
logger.info(f"Adjusted Rand index score (0-1, higher is better): {rand_score:.4f}")
logger.info(f"Normalized Mutual Information score (perfect=1.0): {nmi_score:.4f}")
logger.info(f"Adjusted Mutual Information score (perfect=1.0): {ami_score:.4f}")
logger.info(f"Homogeneity score (0-1, higher is better): {homogeneity_score:.4f}")
logger.info(f"Completeness score (0-1, higher is better): {completeness_score:.4f}")
logger.info(f"V measure score (0-1, higher is better): {v_measure_score:.4f}")
logger.info(f"Fowlkes-Mallows score (0-1, higher is better): {fm_score:.4f}")
def evaluate_classification(self) -> None:
"""Evaluate and output classification accuracy"""
label_to_ground_truth_mapping = self.map_speakers_to_ground_truth()
with self.session() as session, mfa_open(
self.working_directory.joinpath("diarization_evaluation_results.csv"), "w"
) as f:
writer = csv.DictWriter(
f,
fieldnames=[
"file",
"begin",
"end",
"text",
"predicted_speaker",
"ground_truth_speaker",
],
)
writer.writeheader()
predicted_utt2spk = {}
query = session.query(
Utterance.id,
File.name,
Utterance.begin,
Utterance.end,
Utterance.text,
Utterance.speaker_id,
).join(Utterance.file)
for u_id, file_name, begin, end, text, s_id in query:
s_id = label_to_ground_truth_mapping[s_id]
predicted_utt2spk[u_id] = s_id
writer.writerow(
{
"file": file_name,
"begin": begin,
"end": end,
"text": text,
"predicted_speaker": self.ground_truth_speakers[s_id],
"ground_truth_speaker": self.ground_truth_speakers[
self.ground_truth_utt2spk[u_id]
],
}
)
ground_truth_labels = np.array([v for v in self.ground_truth_utt2spk.values()])
predicted_labels = np.array(
[
predicted_utt2spk[k] if k in predicted_utt2spk else -1
for k in self.ground_truth_utt2spk.keys()
]
)
precision_score = metrics.precision_score(
ground_truth_labels, predicted_labels, average="weighted"
)
recall_score = metrics.recall_score(
ground_truth_labels, predicted_labels, average="weighted"
)
f1_score = metrics.f1_score(ground_truth_labels, predicted_labels, average="weighted")
logger.info(f"Precision (0-1): {precision_score:.4f}")
logger.info(f"Recall (0-1): {recall_score:.4f}")
logger.info(f"F1 (0-1): {f1_score:.4f}")
@property
def num_utts_path(self) -> str:
"""Path to archive containing number of per training speaker"""
return self.working_directory.joinpath("num_utts.ark")
@property
def speaker_ivector_path(self) -> str:
"""Path to archive containing training speaker ivectors"""
return self.working_directory.joinpath("speaker_ivectors.ark")
def visualize_clusters(self, ivectors, cluster_labels=None):
import seaborn as sns
from matplotlib import pyplot as plt
sns.set()
metric = self.metric
if metric is DistanceMetric.plda:
metric = DistanceMetric.cosine
points = visualize_clusters(ivectors, self.manifold_algorithm, metric, 10, self.plda)
fig = plt.figure(1)
ax = fig.add_subplot(111)
if cluster_labels is not None:
unique_labels = np.unique(cluster_labels)
num_unique_labels = unique_labels.shape[0]
has_noise = 0 in set(unique_labels)
if has_noise:
num_unique_labels -= 1
cm = sns.color_palette("tab20", num_unique_labels)
for cluster in unique_labels:
if cluster == -1:
color = "k"
name = "Noise"
alpha = 0.75
else:
name = cluster
if not isinstance(name, str):
name = f"Cluster {name}"
cluster_id = cluster
else:
cluster_id = np.where(unique_labels == cluster)[0][0]
if has_noise:
color = cm[cluster_id - 1]
else:
color = cm[cluster_id]
alpha = 1.0
idx = np.where(cluster_labels == cluster)
ax.scatter(points[idx, 0], points[idx, 1], color=color, label=name, alpha=alpha)
else:
ax.scatter(points[:, 0], points[:, 1])
handles, labels = ax.get_legend_handles_labels()
fig.subplots_adjust(bottom=0.3, wspace=0.33)
plt.axis("off")
lgd = ax.legend(
handles,
labels,
loc="upper center",
bbox_to_anchor=(0.5, -0.1),
fancybox=True,
shadow=True,
ncol=5,
)
plot_path = self.working_directory.joinpath("cluster_plot.png")
plt.savefig(plot_path, bbox_extra_artists=(lgd,), bbox_inches="tight", transparent=True)
if GLOBAL_CONFIG.current_profile.verbose:
plt.show(block=False)
plt.pause(10)
logger.debug(f"Closing cluster plot, it has been saved to {plot_path}.")
plt.close()
def export_xvectors(self):
logger.info("Exporting SpeechBrain embeddings...")
os.makedirs(self.split_directory, exist_ok=True)
with tqdm(total=self.num_utterances, disable=GLOBAL_CONFIG.quiet) as pbar:
arguments = [
ExportIvectorsArguments(
j.id,
self.db_string,
j.construct_path(self.working_log_directory, "export_ivectors", "log"),
self.use_xvector,
)
for j in self.jobs
]
utterance_mapping = []
for utt_id, ark_path in run_kaldi_function(
ExportIvectorsFunction, arguments, pbar.update
):
utterance_mapping.append({"id": utt_id, "ivector_ark": ark_path})
with self.session() as session:
bulk_update(session, Utterance, utterance_mapping)
session.commit()
self._write_ivectors()
def fix_speaker_ordering(self):
with self.session() as session:
query = (
session.query(Speaker.id, File.id)
.join(Utterance.speaker)
.join(Utterance.file)
.distinct()
)
speaker_ordering_mapping = []
for s_id, f_id in query:
speaker_ordering_mapping.append({"speaker_id": s_id, "file_id": f_id, "index": 10})
session.execute(sqlalchemy.delete(SpeakerOrdering))
session.flush()
session.execute(
sqlalchemy.dialects.postgresql.insert(SpeakerOrdering)
.values(speaker_ordering_mapping)
.on_conflict_do_nothing()
)
session.commit()
def initialize_mfa_clustering(self):
self._unknown_speaker_break_up_count = 0
with self.session() as session:
next_speaker_id = self.get_next_primary_key(Speaker)
speaker_mapping = {}
existing_speakers = {
name: s_id for s_id, name in session.query(Speaker.id, Speaker.name)
}
utterance_mapping = []
self.classification_score = 0
unk_count = 0
if self.use_xvector:
arguments = [
SpeechbrainArguments(j.id, self.db_string, None, self.cuda, self.cluster)
for j in self.jobs
]
func = SpeechbrainClassificationFunction
score_threshold = self.initial_sb_score_threshold
self.export_xvectors()
else:
plda_transform_path = self.working_directory.joinpath("plda.pkl")
with open(plda_transform_path, "rb") as f:
self.plda: PldaModel = pickle.load(f)
arguments = self.plda_classification_arguments()
func = PldaClassificationFunction
score_threshold = self.initial_plda_score_threshold
logger.info("Generating initial speaker labels...")
utt2spk = {k: v for k, v in session.query(Utterance.id, Utterance.speaker_id)}
with tqdm(total=self.num_utterances, disable=GLOBAL_CONFIG.quiet) as pbar:
for utt_id, classified_speaker, score in run_kaldi_function(
func, arguments, pbar.update
):
classified_speaker = str(classified_speaker)
self.classification_score += score / self.num_utterances
if score < score_threshold:
unk_count += 1
utterance_mapping.append(
{"id": utt_id, "speaker_id": existing_speakers["MFA_UNKNOWN"]}
)
continue
if classified_speaker in existing_speakers:
speaker_id = existing_speakers[classified_speaker]
else:
if classified_speaker not in speaker_mapping:
speaker_mapping[classified_speaker] = {
"id": next_speaker_id,
"name": classified_speaker,
}
next_speaker_id += 1
speaker_id = speaker_mapping[classified_speaker]["id"]
if speaker_id == utt2spk[utt_id]:
continue
utterance_mapping.append({"id": utt_id, "speaker_id": speaker_id})
if speaker_mapping:
session.bulk_insert_mappings(Speaker, list(speaker_mapping.values()))
session.flush()
session.commit()
session.execute(sqlalchemy.text("DROP INDEX IF EXISTS ix_utterance_speaker_id"))
session.execute(sqlalchemy.text("DROP INDEX IF EXISTS utterance_position_index"))
session.commit()
bulk_update(session, Utterance, utterance_mapping)
session.execute(
sqlalchemy.text(
"CREATE INDEX IF NOT EXISTS ix_utterance_speaker_id on utterance(speaker_id)"
)
)
session.execute(
sqlalchemy.text(
'CREATE INDEX IF NOT EXISTS utterance_position_index on utterance(file_id, speaker_id, begin, "end", channel)'
)
)
session.commit()
self.breakup_large_clusters()
self.cleanup_empty_speakers()
def export_speaker_ivectors(self):
logger.info("Exporting current speaker ivectors...")
with self.session() as session, tqdm(
total=self.num_speakers, disable=GLOBAL_CONFIG.quiet
) as pbar, mfa_open(self.num_utts_path, "w") as f:
if self.use_xvector:
ivector_column = Speaker.xvector
else:
ivector_column = Speaker.ivector
speakers = (
session.query(Speaker.id, ivector_column, sqlalchemy.func.count(Utterance.id))
.join(Speaker.utterances)
.filter(Speaker.name != "MFA_UNKNOWN")
.group_by(Speaker.id)
.order_by(Speaker.id)
)
input_proc = subprocess.Popen(
[
thirdparty_binary("copy-vector"),
"--binary=true",
"ark,t:-",
f"ark:{self.speaker_ivector_path}",
],
stdin=subprocess.PIPE,
stderr=subprocess.DEVNULL,
env=os.environ,
)
for s_id, ivector, utterance_count in speakers:
if ivector is None:
continue
ivector = " ".join([format(x, ".12g") for x in ivector])
in_line = f"{s_id} [ {ivector} ]\n".encode("utf8")
input_proc.stdin.write(in_line)
input_proc.stdin.flush()
pbar.update(1)
f.write(f"{s_id} {utterance_count}\n")
input_proc.stdin.close()
input_proc.wait()
def classify_iteration(self, iteration=None) -> None:
logger.info("Classifying utterances...")
low_count = None
if iteration is not None and self.min_cluster_size:
low_count = np.linspace(0, self.min_cluster_size, self.max_iterations)[iteration]
logger.debug(f"Minimum size: {low_count}")
score_threshold = self.plda_score_threshold
if iteration is not None:
score_threshold = np.linspace(
self.initial_plda_score_threshold,
self.plda_score_threshold,
self.max_iterations,
)[iteration]
logger.debug(f"Score threshold: {score_threshold}")
with self.session() as session, tqdm(
total=self.num_utterances, disable=GLOBAL_CONFIG.quiet
) as pbar:
unknown_speaker_id = (
session.query(Speaker.id).filter(Speaker.name == "MFA_UNKNOWN").first()[0]
)
utterance_mapping = []
self.classification_score = 0
plda_transform_path = self.working_directory.joinpath("plda.pkl")
with open(plda_transform_path, "rb") as f:
self.plda: PldaModel = pickle.load(f)
arguments = self.plda_classification_arguments()
func = PldaClassificationFunction
utt2spk = {k: v for k, v in session.query(Utterance.id, Utterance.speaker_id)}
for utt_id, classified_speaker, score in run_kaldi_function(
func, arguments, pbar.update
):
self.classification_score += score / self.num_utterances
if score < score_threshold:
speaker_id = unknown_speaker_id
else:
speaker_id = classified_speaker
if speaker_id == utt2spk[utt_id]:
continue
utterance_mapping.append({"id": utt_id, "speaker_id": speaker_id})
logger.debug(f"Updating {len(utterance_mapping)} utterances with new speakers")
session.commit()
session.execute(sqlalchemy.text("DROP INDEX IF EXISTS ix_utterance_speaker_id"))
session.execute(sqlalchemy.text("DROP INDEX IF EXISTS utterance_position_index"))
session.commit()
bulk_update(session, Utterance, utterance_mapping)
session.execute(
sqlalchemy.text(
"CREATE INDEX IF NOT EXISTS ix_utterance_speaker_id on utterance(speaker_id)"
)
)
session.execute(
sqlalchemy.text(
'CREATE INDEX IF NOT EXISTS utterance_position_index on utterance(file_id, speaker_id, begin, "end", channel)'
)
)
session.commit()
if iteration is not None and iteration < self.max_iterations - 2:
self.breakup_large_clusters()
self.cleanup_empty_speakers(low_count)
def breakup_large_clusters(self):
with self.session() as session:
unknown_speaker_id = (
session.query(Speaker.id).filter(Speaker.name == "MFA_UNKNOWN").first()[0]
)
sq = (
session.query(Speaker.id, sqlalchemy.func.count().label("utterance_count"))
.join(Speaker.utterances)
.filter(Speaker.id != unknown_speaker_id)
.group_by(Speaker.id)
)
above_threshold_speakers = [unknown_speaker_id]
threshold = 500
for s_id, utterance_count in sq:
if threshold and utterance_count > threshold and s_id not in self.single_clusters:
above_threshold_speakers.append(s_id)
logger.info("Breaking up large speakers...")
logger.debug(f"Unknown speaker is {unknown_speaker_id}")
next_speaker_id = self.get_next_primary_key(Speaker)
with tqdm(total=len(above_threshold_speakers), disable=GLOBAL_CONFIG.quiet) as pbar:
utterance_mapping = []
new_speakers = {}
for s_id in above_threshold_speakers:
logger.debug(f"Breaking up {s_id}")
query = session.query(Utterance.id, Utterance.plda_vector).filter(
Utterance.plda_vector != None, Utterance.speaker_id == s_id # noqa
)
pbar.update(1)
ivectors = np.empty((query.count(), PLDA_DIMENSION))
logger.debug(f"Had {ivectors.shape[0]} utterances.")
if ivectors.shape[0] == 0:
continue
utterance_ids = []
for i, (u_id, ivector) in enumerate(query):
if self.stopped.stop_check():
break
utterance_ids.append(u_id)
ivectors[i, :] = ivector
if ivectors.shape[0] < self.min_cluster_size:
continue
labels = cluster_matrix(
ivectors,
ClusterType.optics,
metric=DistanceMetric.cosine,
strict=False,
no_visuals=True,
working_directory=self.working_directory,
distance_threshold=0.25,
)
unique, counts = np.unique(labels, return_counts=True)
num_clusters = unique.shape[0]
counts = dict(zip(unique, counts))
logger.debug(f"{num_clusters} clusters found: {counts}")
if num_clusters == 1:
if s_id != unknown_speaker_id:
logger.debug(f"Deleting {s_id} due to no clusters found")
session.execute(
sqlalchemy.update(Utterance)
.filter(Utterance.speaker_id == s_id)
.values({Utterance.speaker_id: unknown_speaker_id})
)
session.flush()
continue
if num_clusters == 2:
if s_id != unknown_speaker_id:
logger.debug(
f"Only found one cluster for {s_id} will skip in the future"
)
self.single_clusters.add(s_id)
continue
for i, utt_id in enumerate(utterance_ids):
label = labels[i]
if label == -1:
speaker_id = unknown_speaker_id
else:
if s_id in self.single_clusters:
continue
if label not in new_speakers:
if s_id == unknown_speaker_id:
label = self._unknown_speaker_break_up_count
self._unknown_speaker_break_up_count += 1
new_speakers[label] = {
"id": next_speaker_id,
"name": f"{s_id}_{label}",
}
next_speaker_id += 1
speaker_id = new_speakers[label]["id"]
utterance_mapping.append({"id": utt_id, "speaker_id": speaker_id})
if new_speakers:
session.bulk_insert_mappings(Speaker, list(new_speakers.values()))
session.commit()
if utterance_mapping:
bulk_update(session, Utterance, utterance_mapping)
session.commit()
logger.debug(f"Broke speakers into {len(new_speakers)} new speakers.")
def cleanup_empty_speakers(self, threshold=None):
with self.session() as session:
session.execute(sqlalchemy.delete(SpeakerOrdering))
session.flush()
unknown_speaker_id = (
session.query(Speaker.id).filter(Speaker.name == "MFA_UNKNOWN").first()[0]
)
non_empty_speakers = [unknown_speaker_id]
sq = (
session.query(Speaker.id, sqlalchemy.func.count().label("utterance_count"))
.join(Speaker.utterances)
.filter(Speaker.id != unknown_speaker_id)
.group_by(Speaker.id)
)
below_threshold_speakers = []
for s_id, utterance_count in sq:
if threshold and utterance_count < threshold:
below_threshold_speakers.append(s_id)
continue
non_empty_speakers.append(s_id)
session.execute(
sqlalchemy.update(Utterance)
.where(Utterance.speaker_id.in_(below_threshold_speakers))
.values(speaker_id=unknown_speaker_id)
)
session.execute(sqlalchemy.delete(Speaker).where(~Speaker.id.in_(non_empty_speakers)))
session.commit()
self._num_speakers = session.query(Speaker).count()
conn = self.db_engine.connect()
try:
conn.execution_options(isolation_level="AUTOCOMMIT")
conn.execute(
sqlalchemy.text(f"ANALYZE {Speaker.__tablename__}, {Utterance.__tablename__}")
)
finally:
conn.close()
def cluster_utterances_mfa(self) -> None:
"""
Cluster utterances with a ivector or speechbrain model
"""
self.cluster = False
self.setup()
with self.session() as session:
if session.query(Speaker).filter(Speaker.name == "MFA_UNKNOWN").first() is None:
session.add(Speaker(id=self.get_next_primary_key(Speaker), name="MFA_UNKNOWN"))
session.commit()
self.initialize_mfa_clustering()
with self.session() as session:
uncategorized_count = (
session.query(Utterance)
.join(Utterance.speaker)
.filter(Speaker.name == "MFA_UNKNOWN")
.count()
)
if self.use_xvector:
logger.info(f"Initial average cosine score {self.classification_score:.4f}")
else:
logger.info(f"Initial average PLDA score {self.classification_score:.4f}")
logger.info(f"Number of speakers: {self.num_speakers}")
logger.info(f"Unclassified utterances: {uncategorized_count}")
self._unknown_speaker_break_up_count = 0
for i in range(self.max_iterations):
logger.info(f"Iteration {i}:")
current_score = self.classification_score
self._write_ivectors()
self.compute_plda()
self.refresh_plda_vectors()
self.refresh_speaker_vectors()
self.export_speaker_ivectors()
self.classify_iteration(i)
improvement = self.classification_score - current_score
with self.session() as session:
uncategorized_count = (
session.query(Utterance)
.join(Utterance.speaker)
.filter(Speaker.name == "MFA_UNKNOWN")
.count()
)
logger.info(f"Average PLDA score {self.classification_score:.4f}")
logger.info(f"Improvement: {improvement:.4f}")
logger.info(f"Number of speakers: {self.num_speakers}")
logger.info(f"Unclassified utterances: {uncategorized_count}")
logger.debug(f"Found {self.num_speakers} clusters")
if GLOBAL_CONFIG.current_profile.debug and self.num_utterances < 100000:
self.visualize_current_clusters()
def visualize_current_clusters(self):
with self.session() as session:
query = (
session.query(Speaker.name, Utterance.plda_vector)
.join(Utterance.speaker)
.filter(Utterance.plda_vector is not None)
)
dim = PLDA_DIMENSION
num_utterances = query.count()
if num_utterances == 0:
if self.use_xvector:
column = Utterance.xvector
dim = XVECTOR_DIMENSION
else:
column = Utterance.ivector
dim = IVECTOR_DIMENSION
query = (
session.query(Speaker.name, column)
.join(Utterance.speaker)
.filter(column is not None)
)
num_utterances = query.count()
if num_utterances == 0:
logger.warning("No ivectors/xvectors to visualize")
return
ivectors = np.empty((query.count(), dim))
labels = []
for s_name, ivector in query:
ivectors[len(labels), :] = ivector
labels.append(s_name)
self.visualize_clusters(ivectors, labels)
def cluster_utterances(self) -> None:
"""
Cluster utterances with a ivector or speechbrain model
"""
if self.cluster_type is ClusterType.mfa:
self.cluster_utterances_mfa()
self.fix_speaker_ordering()
if not self.evaluation_mode:
self.cleanup_empty_speakers()
self.refresh_speaker_vectors()
if self.evaluation_mode:
self.evaluate_clustering()
return
self.setup()
os.environ["OMP_NUM_THREADS"] = f"{GLOBAL_CONFIG.current_profile.num_jobs}"
os.environ["OPENBLAS_NUM_THREADS"] = f"{GLOBAL_CONFIG.current_profile.num_jobs}"
os.environ["MKL_NUM_THREADS"] = f"{GLOBAL_CONFIG.current_profile.num_jobs}"
if self.metric is DistanceMetric.plda:
plda_transform_path = self.working_directory.joinpath("plda.pkl")
with open(plda_transform_path, "rb") as f:
self.plda: PldaModel = pickle.load(f)
if self.evaluation_mode and GLOBAL_CONFIG.current_profile.debug:
self.calculate_eer()
logger.info("Clustering utterances (this may take a while, please be patient)...")
with self.session() as session:
if self.use_pca:
query = session.query(Utterance.id, Utterance.plda_vector).filter(
Utterance.plda_vector != None # noqa
)
ivectors = np.empty((query.count(), PLDA_DIMENSION))
elif self.use_xvector:
query = session.query(Utterance.id, Utterance.xvector).filter(
Utterance.xvector != None # noqa
)
ivectors = np.empty((query.count(), XVECTOR_DIMENSION))
else:
query = session.query(Utterance.id, Utterance.ivector).filter(
Utterance.ivector != None # noqa
)
ivectors = np.empty((query.count(), IVECTOR_DIMENSION))
utterance_ids = []
for i, (u_id, ivector) in enumerate(query):
if self.stopped.stop_check():
break
utterance_ids.append(u_id)
ivectors[i, :] = ivector
num_utterances = ivectors.shape[0]
kwargs = {}
if self.stopped.stop_check():
logger.debug("Stopping clustering early.")
return
kwargs["min_cluster_size"] = self.min_cluster_size
kwargs["distance_threshold"] = self.distance_threshold
if self.cluster_type is ClusterType.agglomerative:
kwargs["memory"] = MEMORY
kwargs["linkage"] = self.linkage
kwargs["n_clusters"] = self.expected_num_speakers
if not self.expected_num_speakers:
kwargs["n_clusters"] = None
elif self.cluster_type is ClusterType.spectral:
kwargs["n_clusters"] = self.expected_num_speakers
elif self.cluster_type is ClusterType.hdbscan:
kwargs["memory"] = MEMORY
elif self.cluster_type is ClusterType.optics:
kwargs["memory"] = MEMORY
elif self.cluster_type is ClusterType.kmeans:
kwargs["n_clusters"] = self.expected_num_speakers
labels = cluster_matrix(
ivectors,
self.cluster_type,
metric=self.metric,
plda=self.plda,
working_directory=self.working_directory,
**kwargs,
)
if self.stopped.stop_check():
logger.debug("Stopping clustering early.")
return
if GLOBAL_CONFIG.current_profile.debug:
self.visualize_clusters(ivectors, labels)
utterance_clusters = collections.defaultdict(list)
for i in range(num_utterances):
u_id = utterance_ids[i]
cluster_id = int(labels[i])
utterance_clusters[cluster_id].append(u_id)
utterance_mapping = []
next_speaker_id = self.get_next_primary_key(Speaker)
speaker_mapping = []
unknown_speaker_id = None
for cluster_id, utterance_ids in sorted(utterance_clusters.items()):
if cluster_id < 0:
if unknown_speaker_id is None:
speaker_name = "MFA_UNKNOWN"
speaker_mapping.append({"id": next_speaker_id, "name": speaker_name})
speaker_id = next_speaker_id
unknown_speaker_id = speaker_id
next_speaker_id += 1
else:
speaker_id = unknown_speaker_id
else:
speaker_name = f"Cluster {cluster_id}"
speaker_mapping.append({"id": next_speaker_id, "name": speaker_name})
speaker_id = next_speaker_id
next_speaker_id += 1
for u_id in utterance_ids:
utterance_mapping.append({"id": u_id, "speaker_id": speaker_id})
if self.stopped.stop_check():
logger.debug("Stopping clustering early.")
return
if speaker_mapping:
session.bulk_insert_mappings(Speaker, speaker_mapping)
session.flush()
session.commit()
bulk_update(session, Utterance, utterance_mapping)
session.flush()
session.commit()
if not self.evaluation_mode:
self.clean_up_unknown_speaker()
self.fix_speaker_ordering()
if not self.evaluation_mode:
self.cleanup_empty_speakers()
self.refresh_speaker_vectors()
if self.evaluation_mode:
self.evaluate_clustering()
os.environ["OMP_NUM_THREADS"] = f"{GLOBAL_CONFIG.current_profile.blas_num_threads}"
os.environ["OPENBLAS_NUM_THREADS"] = f"{GLOBAL_CONFIG.current_profile.blas_num_threads}"
os.environ["MKL_NUM_THREADS"] = f"{GLOBAL_CONFIG.current_profile.blas_num_threads}"
def clean_up_unknown_speaker(self):
with self.session() as session:
unknown_speaker = session.query(Speaker).filter(Speaker.name == "MFA_UNKNOWN").first()
next_speaker_id = self.get_next_primary_key(Speaker)
if unknown_speaker is not None:
speaker_mapping = {}
utterance_mapping = []
query = (
session.query(File.id, File.name)
.join(File.utterances)
.filter(Utterance.speaker_id == unknown_speaker.id)
.distinct()
)
for file_id, file_name in query:
speaker_mapping[file_id] = {"id": next_speaker_id, "name": file_name}
next_speaker_id += 1
query = (
session.query(Utterance.id, Utterance.file_id)
.join(File.utterances)
.filter(Utterance.speaker_id == unknown_speaker.id)
)
for utterance_id, file_id in query:
utterance_mapping.append(
{"id": utterance_id, "speaker_id": speaker_mapping[file_id]["id"]}
)
session.bulk_insert_mappings(Speaker, list(speaker_mapping.values()))
session.flush()
session.execute(
sqlalchemy.delete(SpeakerOrdering).where(
SpeakerOrdering.c.speaker_id == unknown_speaker.id
)
)
session.commit()
bulk_update(session, Utterance, utterance_mapping)
session.commit()
def calculate_eer(self) -> typing.Tuple[float, float]:
"""
Calculate Equal Error Rate (EER) and threshold for the diarization metric using the ground truth data.
Returns
-------
float
EER
float
Threshold of EER
"""
if not FOUND_SPEECHBRAIN:
logger.info("No speechbrain found, skipping EER calculation.")
return 0.0, 0.0
logger.info("Calculating EER using ground truth speakers...")
limit_per_speaker = 5
limit_within_speaker = 30
begin = time.time()
with tqdm(total=self.num_speakers, disable=GLOBAL_CONFIG.quiet) as pbar:
arguments = [
ComputeEerArguments(
j.id,
self.db_string,
None,
self.plda,
self.metric,
self.use_xvector,
limit_within_speaker,
limit_per_speaker,
)
for j in self.jobs
]
match_scores = []
mismatch_scores = []
for matches, mismatches in run_kaldi_function(
ComputeEerFunction, arguments, pbar.update
):
match_scores.extend(matches)
mismatch_scores.extend(mismatches)
random.shuffle(mismatches)
mismatch_scores = mismatch_scores[: len(match_scores)]
match_scores = np.array(match_scores)
mismatch_scores = np.array(mismatch_scores)
device = torch.device("cuda" if self.cuda else "cpu")
eer, thresh = EER(
torch.tensor(mismatch_scores, device=device),
torch.tensor(match_scores, device=device),
)
logger.debug(
f"Matching scores: {np.min(match_scores):.3f}-{np.max(match_scores):.3f} (mean = {match_scores.mean():.3f}, n = {match_scores.shape[0]})"
)
logger.debug(
f"Mismatching scores: {np.min(mismatch_scores):.3f}-{np.max(mismatch_scores):.3f} (mean = {mismatch_scores.mean():.3f}, n = {mismatch_scores.shape[0]})"
)
logger.info(f"EER: {eer*100:.2f}%")
logger.info(f"Threshold: {thresh:.4f}")
logger.debug(f"Calculating EER took {time.time() - begin:.3f} seconds")
return eer, thresh
def load_embeddings(self) -> None:
"""Load embeddings from a speechbrain model"""
if self.has_xvectors():
logger.info("Embeddings already loaded.")
return
logger.info("Loading SpeechBrain embeddings...")
with tqdm(
total=self.num_utterances, disable=GLOBAL_CONFIG.quiet
) as pbar, self.session() as session:
begin = time.time()
update_mapping = {}
arguments = [
SpeechbrainArguments(j.id, self.db_string, None, self.cuda, self.cluster)
for j in self.jobs
]
embeddings = []
utterance_ids = []
for u_id, emb in run_kaldi_function(
SpeechbrainEmbeddingFunction, arguments, pbar.update
):
utterance_ids.append(u_id)
embeddings.append(emb)
update_mapping[u_id] = {"id": u_id, "xvector": emb}
embeddings = np.array(embeddings)
if PLDA_DIMENSION != XVECTOR_DIMENSION:
if embeddings.shape[0] < PLDA_DIMENSION:
logger.debug("Can't run PLDA due to too few features.")
else:
pca = decomposition.PCA(PLDA_DIMENSION)
pca.fit(embeddings)
logger.debug(
f"PCA explained variance: {np.sum(pca.explained_variance_ratio_)*100:.2f}%"
)
transformed = pca.transform(embeddings)
for i, u_id in enumerate(utterance_ids):
update_mapping[u_id]["plda_vector"] = transformed[i, :]
else:
for v in update_mapping.values():
v["plda_vector"] = v["xvector"]
bulk_update(session, Utterance, list(update_mapping.values()))
session.flush()
session.execute(
sqlalchemy.text(
"CREATE INDEX IF NOT EXISTS utterance_xvector_index ON utterance USING ivfflat (xvector vector_cosine_ops);"
)
)
session.execute(
sqlalchemy.text(
"CREATE INDEX IF NOT EXISTS utterance_plda_vector_index ON utterance USING ivfflat (plda_vector vector_cosine_ops);"
)
)
session.query(Corpus).update({Corpus.xvectors_loaded: True})
session.commit()
logger.debug(f"Loading embeddings took {time.time() - begin:.3f} seconds")
def refresh_plda_vectors(self):
logger.info("Refreshing PLDA vectors...")
self.plda = PldaModel.load(self.plda_path)
with self.session() as session, tqdm(
total=self.num_utterances, disable=GLOBAL_CONFIG.quiet
) as pbar:
if self.use_xvector:
ivector_column = Utterance.xvector
else:
ivector_column = Utterance.ivector
update_mapping = []
utterance_ids = []
ivectors = []
utterances = session.query(Utterance.id, ivector_column).filter(
ivector_column != None # noqa
)
for utt_id, ivector in utterances:
pbar.update(1)
utterance_ids.append(utt_id)
ivectors.append(ivector)
ivectors = np.array(ivectors)
ivectors = self.plda.process_ivectors(ivectors)
for i, utt_id in enumerate(utterance_ids):
update_mapping.append({"id": utt_id, "plda_vector": ivectors[i, :]})
bulk_update(session, Utterance, update_mapping)
session.commit()
plda_transform_path = self.working_directory.joinpath("plda.pkl")
with open(plda_transform_path, "wb") as f:
pickle.dump(self.plda, f)
def refresh_speaker_vectors(self) -> None:
"""Refresh speaker vectors following clustering or classification"""
logger.info("Refreshing speaker vectors...")
with self.session() as session, tqdm(
total=self.num_speakers, disable=GLOBAL_CONFIG.quiet
) as pbar:
if self.use_xvector:
ivector_column = Utterance.xvector
else:
ivector_column = Utterance.ivector
update_mapping = {}
speaker_ids = []
ivectors = []
speakers = session.query(Speaker.id)
for (s_id,) in speakers:
query = session.query(ivector_column).filter(Utterance.speaker_id == s_id)
s_ivectors = []
for (u_ivector,) in query:
s_ivectors.append(u_ivector)
if not s_ivectors:
continue
mean_ivector = np.mean(np.array(s_ivectors), axis=0)
speaker_ids.append(s_id)
ivectors.append(mean_ivector)
if self.use_xvector:
key = "xvector"
else:
key = "ivector"
update_mapping[s_id] = {"id": s_id, key: mean_ivector}
pbar.update(1)
ivectors = np.array(ivectors)
if self.plda is not None:
ivectors = self.plda.process_ivectors(ivectors)
for i, speaker_id in enumerate(speaker_ids):
update_mapping[speaker_id]["plda_vector"] = ivectors[i, :]
bulk_update(session, Speaker, list(update_mapping.values()))
session.commit()
def compute_speaker_embeddings(self) -> None:
"""Generate per-speaker embeddings as the mean over their utterances"""
if not self.has_xvectors():
self.load_embeddings()
logger.info("Computing SpeechBrain speaker embeddings...")
with tqdm(
total=self.num_speakers, disable=GLOBAL_CONFIG.quiet
) as pbar, self.session() as session:
update_mapping = []
speakers = session.query(Speaker.id)
for (s_id,) in speakers:
u_query = session.query(Utterance.xvector).filter(
Utterance.speaker_id == s_id, Utterance.xvector != None # noqa
)
embeddings = np.empty((u_query.count(), XVECTOR_DIMENSION))
if embeddings.shape[0] == 0:
continue
for i, (xvector,) in enumerate(u_query):
embeddings[i, :] = xvector
speaker_xvector = np.mean(embeddings, axis=0)
update_mapping.append({"id": s_id, "xvector": speaker_xvector})
pbar.update(1)
bulk_update(session, Speaker, update_mapping)
session.commit()
def export_files(self, output_directory: str) -> None:
"""
Export files with their new speaker labels
Parameters
----------
output_directory: str
Output directory to save files
"""
if not self.overwrite and os.path.exists(output_directory):
output_directory = self.working_directory.joinpath("speaker_classification")
os.makedirs(output_directory, exist_ok=True)
diagnostic_files = [
"diarization_evaluation_results.csv",
"cluster_plot.png",
"nearest_neighbors.png",
]
for fname in diagnostic_files:
path = self.working_directory.joinpath(fname)
if os.path.exists(path):
shutil.copyfile(
path,
os.path.join(output_directory, fname),
)
with mfa_open(os.path.join(output_directory, "parameters.yaml"), "w") as f:
yaml.dump(
{
"ivector_extractor_path": str(self.ivector_extractor_path),
"expected_num_speakers": self.expected_num_speakers,
"cluster": self.cluster,
"cuda": self.cuda,
"metric": self.metric.name,
"cluster_type": self.cluster_type.name,
"distance_threshold": self.distance_threshold,
"min_cluster_size": self.min_cluster_size,
"linkage": self.linkage,
},
f,
Dumper=yaml.Dumper,
)
with self.session() as session:
logger.info("Writing output files...")
files = session.query(File).options(
selectinload(File.utterances),
selectinload(File.speakers),
joinedload(File.sound_file, innerjoin=True).load_only(SoundFile.duration),
joinedload(File.text_file, innerjoin=True).load_only(TextFile.file_type),
)
with tqdm(total=self.num_files, disable=GLOBAL_CONFIG.quiet) as pbar:
for file in files:
utterance_count = len(file.utterances)
if utterance_count == 0:
logger.debug(f"Could not find any utterances for {file.name}")
continue
output_format = file.text_file.file_type
output_path = construct_output_path(
file.name,
file.relative_path,
output_directory,
output_format=output_format,
)
if output_format == "lab":
with mfa_open(output_path, "w") as f:
f.write(file.utterances[0].text)
else:
data = file.construct_transcription_tiers(original_text=True)
export_textgrid(
data,
output_path,
file.duration,
self.export_frame_shift,
output_format,
)
pbar.update(1)
| 66,292 | 42.357096 | 168 | py |
Montreal-Forced-Aligner | Montreal-Forced-Aligner-main/montreal_forced_aligner/vad/multiprocessing.py | """Multiprocessing functionality for VAD"""
from __future__ import annotations
import logging
import os
import re
import subprocess
import typing
from pathlib import Path
from typing import TYPE_CHECKING, List, Union
import librosa
import numpy as np
import pynini
import pywrapfst
import sqlalchemy
from Bio import pairwise2
from montreal_forced_aligner.abc import KaldiFunction
from montreal_forced_aligner.corpus.features import online_feature_proc
from montreal_forced_aligner.data import CtmInterval, MfaArguments, WordType
from montreal_forced_aligner.db import Dictionary, File, SoundFile, Speaker, Utterance, Word
from montreal_forced_aligner.exceptions import KaldiProcessingError
from montreal_forced_aligner.helper import mfa_open
from montreal_forced_aligner.utils import parse_ctm_output, read_feats, thirdparty_binary
try:
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
torch_logger = logging.getLogger("speechbrain.utils.torch_audio_backend")
torch_logger.setLevel(logging.ERROR)
torch_logger = logging.getLogger("speechbrain.utils.train_logger")
torch_logger.setLevel(logging.ERROR)
import torch
from speechbrain.pretrained import VAD
FOUND_SPEECHBRAIN = True
except (ImportError, OSError):
FOUND_SPEECHBRAIN = False
VAD = None
if TYPE_CHECKING:
SpeakerCharacterType = Union[str, int]
from dataclasses import dataclass
from montreal_forced_aligner.abc import MetaDict
else:
from dataclassy import dataclass
@dataclass
class SegmentVadArguments(MfaArguments):
"""Arguments for :class:`~montreal_forced_aligner.segmenter.SegmentVadFunction`"""
vad_path: Path
segmentation_options: MetaDict
def get_initial_segmentation(
frames: List[Union[int, str]], frame_shift: float
) -> List[CtmInterval]:
"""
Compute initial segmentation over voice activity
Parameters
----------
frames: list[Union[int, str]]
List of frames with VAD output
frame_shift: float
Frame shift of features in seconds
Returns
-------
List[CtmInterval]
Initial segmentation
"""
segments = []
cur_segment = None
silent_frames = 0
non_silent_frames = 0
for i, f in enumerate(frames):
if int(f) > 0:
non_silent_frames += 1
if cur_segment is None:
cur_segment = CtmInterval(begin=i * frame_shift, end=0, label="speech")
else:
silent_frames += 1
if cur_segment is not None:
cur_segment.end = (i - 1) * frame_shift
segments.append(cur_segment)
cur_segment = None
if cur_segment is not None:
cur_segment.end = len(frames) * frame_shift
segments.append(cur_segment)
return segments
def merge_segments(
segments: List[CtmInterval],
min_pause_duration: float,
max_segment_length: float,
min_segment_length: float,
) -> List[CtmInterval]:
"""
Merge segments together
Parameters
----------
segments: SegmentationType
Initial segments
min_pause_duration: float
Minimum amount of silence time to mark an utterance boundary
max_segment_length: float
Maximum length of segments before they're broken up
min_segment_length: float
Minimum length of segments returned
Returns
-------
List[CtmInterval]
Merged segments
"""
merged_segments = []
snap_boundary_threshold = min_pause_duration / 2
for s in segments:
if (
not merged_segments
or s.begin > merged_segments[-1].end + min_pause_duration
or s.end - merged_segments[-1].begin > max_segment_length
):
if s.end - s.begin > min_pause_duration:
if merged_segments and snap_boundary_threshold:
boundary_gap = s.begin - merged_segments[-1].end
if boundary_gap < snap_boundary_threshold:
half_boundary = boundary_gap / 2
else:
half_boundary = snap_boundary_threshold / 2
merged_segments[-1].end += half_boundary
s.begin -= half_boundary
merged_segments.append(s)
else:
merged_segments[-1].end = s.end
return [x for x in merged_segments if x.end - x.begin > min_segment_length]
def construct_utterance_segmentation_fst(
text: str,
word_symbol_table: pywrapfst.SymbolTable,
interjection_words: typing.List[str] = None,
):
if interjection_words is None:
interjection_words = []
words = text.split()
fst = pynini.Fst()
start_state = fst.add_state()
fst.set_start(start_state)
fst.add_states(len(words))
for i, w in enumerate(words):
next_state = i + 1
label = word_symbol_table.find(w)
if i != 0:
fst.add_arc(
start_state,
pywrapfst.Arc(label, label, pywrapfst.Weight.one(fst.weight_type()), next_state),
)
fst.add_arc(
i, pywrapfst.Arc(label, label, pywrapfst.Weight.one(fst.weight_type()), next_state)
)
fst.set_final(next_state, pywrapfst.Weight(fst.weight_type(), 1))
for interjection in interjection_words:
start_interjection_state = fst.add_state()
fst.add_arc(
next_state,
pywrapfst.Arc(
word_symbol_table.find("<eps>"),
word_symbol_table.find("<eps>"),
pywrapfst.Weight(fst.weight_type(), 10),
start_interjection_state,
),
)
if " " in interjection:
i_words = interjection.split()
for j, iw in enumerate(i_words):
next_interjection_state = fst.add_state()
if j == 0:
prev_state = start_interjection_state
else:
prev_state = next_interjection_state - 1
label = word_symbol_table.find(iw)
weight = pywrapfst.Weight.one(fst.weight_type())
fst.add_arc(
prev_state, pywrapfst.Arc(label, label, weight, next_interjection_state)
)
final_interjection_state = next_interjection_state
else:
final_interjection_state = fst.add_state()
label = word_symbol_table.find(interjection)
weight = pywrapfst.Weight.one(fst.weight_type())
fst.add_arc(
start_interjection_state,
pywrapfst.Arc(label, label, weight, final_interjection_state),
)
# Path to next word in text
weight = pywrapfst.Weight.one(fst.weight_type())
fst.add_arc(
final_interjection_state,
pywrapfst.Arc(
word_symbol_table.find("<eps>"),
word_symbol_table.find("<eps>"),
weight,
next_state,
),
)
for interjection in interjection_words:
start_interjection_state = fst.add_state()
fst.add_arc(
start_state,
pywrapfst.Arc(
word_symbol_table.find("<eps>"),
word_symbol_table.find("<eps>"),
pywrapfst.Weight(fst.weight_type(), 10),
start_interjection_state,
),
)
if " " in interjection:
i_words = interjection.split()
for j, iw in enumerate(i_words):
next_interjection_state = fst.add_state()
if j == 0:
prev_state = start_interjection_state
else:
prev_state = next_interjection_state - 1
label = word_symbol_table.find(iw)
weight = pywrapfst.Weight.one(fst.weight_type())
fst.add_arc(
prev_state, pywrapfst.Arc(label, label, weight, next_interjection_state)
)
final_interjection_state = next_interjection_state
else:
final_interjection_state = fst.add_state()
label = word_symbol_table.find(interjection)
weight = pywrapfst.Weight.one(fst.weight_type())
fst.add_arc(
start_interjection_state,
pywrapfst.Arc(label, label, weight, final_interjection_state),
)
# Path to next word in text
weight = pywrapfst.Weight.one(fst.weight_type())
fst.add_arc(
final_interjection_state,
pywrapfst.Arc(
word_symbol_table.find("<eps>"),
word_symbol_table.find("<eps>"),
weight,
start_state,
),
)
fst.set_final(next_state, pywrapfst.Weight.one(fst.weight_type()))
fst = pynini.determinize(fst)
fst = pynini.rmepsilon(fst)
fst = pynini.disambiguate(fst)
fst = pynini.determinize(fst)
return fst
def segment_utterance(
session: sqlalchemy.orm.Session,
working_directory: Path,
utterance_id: int,
vad_model: VAD,
segmentation_options: MetaDict,
mfcc_options: MetaDict,
pitch_options: MetaDict,
lda_options: MetaDict,
decode_options: MetaDict,
):
log_path = working_directory.joinpath("log", "utterance_segmentation.log")
utterance, speaker, dictionary, sound_file = (
session.query(Utterance, Speaker, Dictionary, SoundFile)
.join(Utterance.speaker)
.join(Speaker.dictionary)
.join(Utterance.file)
.join(File.sound_file)
.filter(Utterance.id == utterance_id)
.first()
)
text = utterance.normalized_text
if not text:
text = utterance.text
oovs = utterance.oovs.split()
normalized_text = " ".join([x if x not in oovs else dictionary.oov_word for x in text.split()])
words = set(normalized_text.split() + [dictionary.bracketed_word])
interjection_words = (
session.query(Word.word)
.filter(Word.dictionary_id == dictionary.id)
.filter(Word.word_type == WordType.interjection)
.all()
)
words.update(interjection_words)
query = session.query(Word.word, Word.mapping_id, Word.initial_cost, Word.final_cost).filter(
Word.dictionary_id == dictionary.id
)
initial_costs = {}
final_costs = {}
reversed_word_mapping = {}
for w, m_id, ic, fc in query:
reversed_word_mapping[m_id] = w
if w not in words:
continue
if ic is not None:
initial_costs[w] = ic
if fc is not None:
final_costs[w] = fc
segments = segment_utterance_vad_speech_brain(
utterance, sound_file, vad_model, segmentation_options
)
word_symbol_table = pywrapfst.SymbolTable.read_text(dictionary.words_symbol_path)
utterance_fst_path = working_directory.joinpath("utterance.fst")
utterance_fst = construct_utterance_segmentation_fst(
normalized_text,
word_symbol_table,
interjection_words=interjection_words,
)
utterance_fst.write(utterance_fst_path)
wav_path = working_directory.joinpath("wav.scp")
segment_path = working_directory.joinpath("segments.scp")
utt2spk_path = working_directory.joinpath("utt2spk.scp")
cmvn_path = working_directory.joinpath("cmvn.scp")
trans_path = working_directory.joinpath("trans.scp")
with mfa_open(wav_path, "w") as f:
f.write(f"{utterance.file_id} {sound_file.sox_string}\n")
if speaker.cmvn:
with mfa_open(cmvn_path, "w") as f:
f.write(f"{utterance.speaker_id} {speaker.cmvn}\n")
if speaker.fmllr:
with mfa_open(trans_path, "w") as f:
f.write(f"{utterance.speaker_id} {speaker.fmllr}\n")
sub_utterance_information = {}
with mfa_open(segment_path, "w") as f, mfa_open(utt2spk_path, "w") as utt2spk_f:
for i in range(segments.shape[0]):
begin, end = segments[i]
begin = max(begin - 0.05, 0)
f.write(
f"{utterance.speaker_id}-{i} {utterance.file_id} {begin} {end} {utterance.channel}\n"
)
utt2spk_f.write(f"{utterance.speaker_id}-{i} {utterance.speaker_id}\n")
sub_utterance_information[i] = {
"file_id": utterance.file_id,
"begin": float(begin),
"end": float(end),
"channel": utterance.channel,
"speaker_id": utterance.speaker_id,
}
model_path = working_directory.joinpath("final.alimdl")
tree_path = working_directory.joinpath("tree")
hclg_path = working_directory.joinpath("hclg.fst")
with open(working_directory.joinpath("utterance.text_fst"), "w", encoding="utf8") as f:
utterance_fst.set_input_symbols(word_symbol_table)
utterance_fst.set_output_symbols(word_symbol_table)
f.write(str(utterance_fst))
with mfa_open(log_path, "w") as log_file:
proc = subprocess.Popen(
[
thirdparty_binary("compile-graph"),
f"--read-disambig-syms={dictionary.disambiguation_symbols_int_path}",
f"--transition-scale={decode_options['transition_scale']}",
f"--self-loop-scale={decode_options['self_loop_scale']}",
tree_path,
model_path,
dictionary.lexicon_disambig_fst_path,
utterance_fst_path,
f"{hclg_path}",
],
stderr=log_file,
env=os.environ,
)
proc.wait()
if proc.returncode != 0:
raise KaldiProcessingError([log_path])
feature_proc = online_feature_proc(
working_directory,
wav_path,
segment_path,
mfcc_options,
pitch_options,
lda_options,
log_file,
)
if decode_options.get("boost_silence", 1.0) != 1.0:
mdl_string = f"gmm-boost-silence --boost={decode_options['boost_silence']} {decode_options['optional_silence_csl']} {model_path} - |"
else:
mdl_string = model_path
latgen_proc = subprocess.Popen(
[
thirdparty_binary("gmm-latgen-faster"),
f"--acoustic-scale={decode_options['acoustic_scale']}",
f"--beam={decode_options['beam']}",
f"--max-active={decode_options['max_active']}",
f"--lattice-beam={decode_options['lattice_beam']}",
f"--word-symbol-table={dictionary.words_symbol_path}",
"--allow-partial=true",
mdl_string,
hclg_path,
"ark,s,cs:-",
"ark:-",
],
stderr=log_file,
stdin=feature_proc.stdout,
stdout=subprocess.PIPE,
env=os.environ,
)
lat_align_proc = subprocess.Popen(
[
thirdparty_binary("lattice-align-words-lexicon"),
dictionary.align_lexicon_int_path,
mdl_string,
"ark,s,cs:-",
"ark:-",
],
stderr=log_file,
stdin=latgen_proc.stdout,
stdout=subprocess.PIPE,
env=os.environ,
)
ctm_proc = subprocess.Popen(
[
thirdparty_binary("lattice-to-ctm-conf"),
f"--acoustic-scale={decode_options['acoustic_scale']}",
"ark,s,cs:-",
"-",
],
stderr=log_file,
stdin=lat_align_proc.stdout,
stdout=subprocess.PIPE,
env=os.environ,
encoding="utf8",
)
split_texts = {}
for sub_id, intervals in parse_ctm_output(ctm_proc, reversed_word_mapping):
split_text = " ".join([x.label for x in intervals if x.confidence == 1.0])
if not split_text:
del sub_utterance_information[sub_id]
continue
split_texts[sub_id] = split_text
ctm_proc.wait()
split_texts = align_text(split_texts, text, oovs, dictionary.oov_word, interjection_words)
for i, split_text in split_texts.items():
split_oovs = set(x for x in oovs if x in split_text.split())
sub_utterance_information[i]["text"] = split_text
sub_utterance_information[i]["oovs"] = " ".join(split_oovs)
sub_utterance_information[i]["normalized_text"] = split_text
sub_utterance_information = {
k: v for k, v in sub_utterance_information.items() if "text" in v
}
return utterance_id, sub_utterance_information
def align_text(split_utterance_texts, text, oovs, oov_word, interjection_words):
text = text.split()
split_utterance_text = []
lengths = []
indices = list(split_utterance_texts.keys())
for t in split_utterance_texts.values():
t = t.split()
lengths.append(len(t))
split_utterance_text.extend(t)
def score_func(first_element, second_element):
if first_element == second_element:
return 0
if first_element == oov_word and second_element in oovs:
return 0
if first_element == oov_word and second_element not in oovs:
return -10
if first_element in interjection_words:
return -10
return -2
alignments = pairwise2.align.globalcs(
split_utterance_text, text, score_func, -0.5, -0.1, gap_char=["-"], one_alignment_only=True
)
results = [[]]
split_ind = 0
current_size = 0
for a in alignments:
for i, sa in enumerate(a.seqA):
sb = a.seqB[i]
if sa == "<unk>":
sa = sb
if sa != "-":
if (
split_ind < len(lengths) - 1
and sa not in split_utterance_texts[indices[split_ind]].split()
and split_utterance_texts[indices[split_ind + 1]].split()[0] == sa
):
results.append([])
split_ind += 1
current_size = 0
results[-1].append(sa)
current_size += 1
if split_ind < len(lengths) - 1 and current_size >= lengths[split_ind]:
results.append([])
split_ind += 1
current_size = 0
elif sb != "-":
results[-1].append(sb)
results = {k: " ".join(r) for k, r in zip(split_utterance_texts.keys(), results)}
return results
def segment_utterance_vad_speech_brain(
utterance: Utterance, sound_file: SoundFile, vad_model: VAD, segmentation_options: MetaDict
) -> np.ndarray:
y, _ = librosa.load(
sound_file.sound_file_path,
sr=16000,
mono=False,
offset=utterance.begin,
duration=utterance.duration,
)
if len(y.shape) > 1:
y = y[:, utterance.channel]
prob_chunks = vad_model.get_speech_prob_chunk(
torch.tensor(y[np.newaxis, :], device=vad_model.device)
).cpu()
prob_th = vad_model.apply_threshold(
prob_chunks,
activation_th=segmentation_options["activation_th"],
deactivation_th=segmentation_options["deactivation_th"],
).float()
# Compute the boundaries of the speech segments
boundaries = vad_model.get_boundaries(prob_th, output_value="seconds")
boundaries += utterance.begin
# Apply energy-based VAD on the detected speech segments
if True or segmentation_options["apply_energy_VAD"]:
boundaries = vad_model.energy_VAD(
sound_file.sound_file_path,
boundaries,
activation_th=segmentation_options["en_activation_th"],
deactivation_th=segmentation_options["en_deactivation_th"],
)
# Merge short segments
boundaries = vad_model.merge_close_segments(
boundaries, close_th=segmentation_options["close_th"]
)
# Remove short segments
boundaries = vad_model.remove_short_segments(boundaries, len_th=segmentation_options["len_th"])
# Double check speech segments
if segmentation_options["double_check"]:
boundaries = vad_model.double_check_speech_segments(
boundaries, sound_file.sound_file_path, speech_th=segmentation_options["speech_th"]
)
boundaries[:, 0] -= round(segmentation_options["close_th"] / 3, 3)
boundaries[:, 1] += round(segmentation_options["close_th"] / 3, 3)
return boundaries.numpy()
class SegmentVadFunction(KaldiFunction):
"""
Multiprocessing function to generate segments from VAD output.
See Also
--------
:meth:`montreal_forced_aligner.segmenter.Segmenter.segment_vad`
Main function that calls this function in parallel
:meth:`montreal_forced_aligner.segmenter.Segmenter.segment_vad_arguments`
Job method for generating arguments for this function
:kaldi_utils:`segmentation.pl`
Kaldi utility
Parameters
----------
args: :class:`~montreal_forced_aligner.segmenter.SegmentVadArguments`
Arguments for the function
"""
progress_pattern = re.compile(
r"^LOG.*processed (?P<done>\d+) utterances.*(?P<no_feats>\d+) had.*(?P<unvoiced>\d+) were.*"
)
def __init__(self, args: SegmentVadArguments):
super().__init__(args)
self.vad_path = args.vad_path
self.segmentation_options = args.segmentation_options
def _run(self) -> typing.Generator[typing.Tuple[int, float, float]]:
"""Run the function"""
with mfa_open(self.log_path, "w") as log_file:
copy_proc = subprocess.Popen(
[
thirdparty_binary("copy-vector"),
"--binary=false",
f"scp:{self.vad_path}",
"ark,t:-",
],
stdout=subprocess.PIPE,
stderr=log_file,
env=os.environ,
)
for utt_id, frames in read_feats(copy_proc):
initial_segments = get_initial_segmentation(
frames, self.segmentation_options["frame_shift"]
)
merged = merge_segments(
initial_segments,
self.segmentation_options["close_th"],
self.segmentation_options["large_chunk_size"],
self.segmentation_options["len_th"],
)
yield utt_id, merged
| 22,837 | 35.599359 | 145 | py |
Montreal-Forced-Aligner | Montreal-Forced-Aligner-main/docs/source/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Montreal Forced Aligner documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 15 13:27:38 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from datetime import date
sys.path.insert(0, os.path.abspath("../../"))
import montreal_forced_aligner # noqa
from montreal_forced_aligner.utils import get_mfa_version # noqa
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.4'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
extensions = [
"sphinx_needs",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.intersphinx",
"sphinx.ext.extlinks",
"myst_parser",
"external_links",
# "numpydoc",
"sphinx.ext.napoleon",
"sphinx_design",
"sphinx.ext.viewcode",
"sphinx_click",
# "sphinx_autodoc_typehints",
]
myst_enable_extensions = ["colon_fence"]
locale_dirs = ["locale/"] # path is example but recommended.
gettext_compact = False # optional.
panels_add_bootstrap_css = False
intersphinx_mapping = {
"sqlalchemy": ("https://docs.sqlalchemy.org/en/14/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"python": ("https://docs.python.org/3", None),
"Bio": ("https://biopython.org/docs/latest/api/", None),
"click": ("https://click.palletsprojects.com/en/8.1.x/", None),
}
rst_prolog = """
.. role:: ipa_inline
:class: ipa-inline ipa-highlight
"""
extlinks = {
"mfa_pr": ("https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner/pull/%s", "PR #%s"),
}
xref_links = {
"mfa_models": ("MFA Models", "https://mfa-models.readthedocs.io/"),
"anchor": ("Anchor Annotator", "https://anchor-annotator.readthedocs.io/en/latest/"),
"speechbrain": ("SpeechBrain", "https://speechbrain.github.io/"),
"scikit-learn": ("scikit-learn", "https://scikit-learn.org/stable/index.html"),
"click": ("click", "https://click.palletsprojects.com/en/8.1.x/"),
"pgvector": ("pgvector", "https://github.com/pgvector/pgvector"),
"pretrained_acoustic_models": (
"MFA acoustic models",
"https://mfa-models.readthedocs.io/en/latest/acoustic/index.html",
),
"pretrained_tokenizer_models": (
"MFA tokenizer models",
"https://mfa-models.readthedocs.io/en/latest/tokenizer/index.html",
),
"pretrained_dictionaries": (
"MFA dictionaries",
"https://mfa-models.readthedocs.io/en/latest/dictionary/index.html",
),
"pretrained_g2p": (
"MFA G2P models",
"https://mfa-models.readthedocs.io/en/latest/g2p/index.html",
),
"pretrained_ivector_extractor": (
"MFA G2P models",
"https://mfa-models.readthedocs.io/en/latest/ivector/index.html",
),
"pretrained_language_models": (
"MFA language models",
"https://mfa-models.readthedocs.io/en/latest/language_model/index.html",
),
"mfa_mailing_list": ("MFA mailing list", "https://groups.google.com/g/mfa-users"),
"mfa_github": (
"MFA GitHub Repo",
"https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner",
),
"mfa_github_issues": (
"MFA GitHub Issues",
"https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner/issues",
),
"memcauliffe.com": ("Michael McAuliffe's blog", "https://memcauliffe.com"),
"@wavable": ("@wavable", "https://twitter.com/wavable"),
"sonderegger": ("Morgan Sonderegger", "http://people.linguistics.mcgill.ca/~morgan/"),
"wagner": ("Michael Wagner", "https://prosodylab.org/"),
"coles": ("Arlie Coles", "https://a-coles.github.io/"),
"chodroff": ("Eleanor Chodroff", "https://www.eleanorchodroff.com/"),
"dias": ("Danoja Dias", "https://medium.com/breaktheloop"),
"rutgers_spanish_portuguese": (
"Rutgers University Department of Spanish and Portuguese",
"https://span-port.rutgers.edu/",
),
"stengel-eskin": ("Elias Stengel-Eskin", "https://esteng.github.io/"),
"socolof": ("Michaela Socolof", "https://mcqll.org/people/socolof.michaela/"),
"mihuc": ("Sarah Mihuc", "https://www.cs.mcgill.ca/~smihuc/"),
"wsl": ("Windows Subsystem for Linux", "https://docs.microsoft.com/en-us/windows/wsl/install"),
"kaldi": ("Kaldi", "http://kaldi-asr.org/"),
"kaldi_github": ("Kaldi GitHub", "https://github.com/kaldi-asr/kaldi"),
"htk": ("HTK", "http://htk.eng.cam.ac.uk/"),
"phonetisaurus": ("Phonetisaurus", "https://github.com/AdolfVonKleist/Phonetisaurus"),
"opengrm_ngram": ("OpenGrm-NGram", "https://www.openfst.org/twiki/bin/view/GRM/NGramLibrary"),
"openfst": ("OpenFst", "https://www.openfst.org/twiki/bin/view/FST"),
"baumwelch": ("Baum-Welch", "https://www.opengrm.org/twiki/bin/view/GRM/BaumWelch"),
"pynini": ("Pynini", "https://www.openfst.org/twiki/bin/view/GRM/Pynini"),
"prosodylab_aligner": ("Prosodylab-aligner", "http://prosodylab.org/tools/aligner/"),
"p2fa": (
"Penn Phonetics Forced Aligner",
"https://www.ling.upenn.edu/phonetics/old_website_2015/p2fa/",
),
"fave": ("FAVE-align", "https://github.com/JoFrhwld/FAVE/wiki/FAVE-align"),
"maus": ("MAUS", "http://www.bas.uni-muenchen.de/Bas/BasMAUS.html"),
"praat": ("Praat", "http://www.fon.hum.uva.nl/praat/"),
"easy_align": ("EasyAlign", "http://latlcui.unige.ch/phonetique/easyalign.php"),
"gentle": ("Gentle", "https://lowerquality.com/gentle/"),
"chodroff_kaldi": ("Kaldi tutorial", "https://eleanorchodroff.com/tutorial/kaldi/index.html"),
"chodroff_phonetics": (
"Corpus Phonetics Tutorial",
"https://eleanorchodroff.com/tutorial/intro.html",
),
"coqui": ("Coqui", "https://coqui.ai/"),
"conda_installation": (
"Conda installation",
"https://docs.conda.io/projects/conda/en/latest/user-guide/install/index.html",
),
"miniconda": (
"Miniconda",
"https://docs.conda.io/en/latest/miniconda.html",
),
"conda_forge": ("Conda Forge", "https://conda-forge.org/"),
"pydata_sphinx_theme": (
"Pydata Sphinx Theme",
"https://pydata-sphinx-theme.readthedocs.io/en/latest/",
),
"mfa_reorg_scripts": (
"MFA-reorganization-scripts repository",
"https://github.com/MontrealCorpusTools/MFA-reorganization-scripts",
),
"corpus_creation_scripts": (
"@mmcauliffe's corpus creation scripts",
"https://github.com/mmcauliffe/corpus-creation-scripts",
),
}
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
autosummary_generate = True
autodoc_typehints = "none"
# autodoc_typehints_description_target = 'documented'
# autoclass_content = 'both'
autodoc_docstring_signature = True
autodoc_type_aliases = {
"MultispeakerDictionary": "montreal_forced_aligner.dictionary.MultispeakerDictionary",
"Trainer": "montreal_forced_aligner.abc.Trainer",
"Aligner": "montreal_forced_aligner.abc.Aligner",
"multiprocessing.context.Process": "multiprocessing.Process",
"mp.Process": "multiprocessing.Process",
"MetaDict": "dict[str, Any]",
}
napoleon_preprocess_types = False
napoleon_attr_annotations = False
napoleon_use_param = True
napoleon_use_ivar = True
napoleon_type_aliases = {
"Labels": "list[str]",
}
typehints_fully_qualified = False
# numpydoc_xref_param_type = True
# numpydoc_show_inherited_class_members = False
numpydoc_show_class_members = False
# -----------------------------------------------------------------------------
# Autodoc
# -----------------------------------------------------------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Montreal Forced Aligner"
copyright = f"2018-{date.today().year}, Montreal Corpus Tools"
author = "Montreal Corpus Tools"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ".".join(get_mfa_version().split(".", maxsplit=2)[:2])
# The full version, including alpha/beta/rc tags.
release = get_mfa_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "autolink"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# nitpicky = True
nitpick_ignore = [
("py:class", "optional"),
("py:class", "callable"),
("py:class", "WordsType"),
("py:class", "TextIO"),
("py:class", "SegmentationType"),
("py:class", "CtmErrorDict"),
("py:class", "kwargs"),
("py:class", "Labels"),
("py:class", "multiprocessing.Value"),
("py:class", "praatio.utilities.constants.Interval"),
("py:class", "montreal_forced_aligner.abc.MetaDict"),
("py:class", "multiprocessing.context.Process"),
]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "pydata_sphinx_theme"
html_logo = "_static/logo.svg"
html_favicon = "_static/favicon.ico"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"external_links": [
{
"url": "https://mfa-models.readthedocs.io/",
"name": "Pretrained MFA models",
},
],
"icon_links": [
{
"name": "GitHub",
"url": "https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner",
"icon": "fab fa-github",
},
{
"name": "PyPI",
"url": "https://pypi.org/project/Montreal-Forced-Aligner/",
"icon": "fas fa-box",
},
{
"name": "PyPI",
"url": "https://anaconda.org/conda-forge/montreal-forced-aligner",
"icon": "fas fa-toolbox",
},
],
"logo": {
"text": "Montreal Forced Aligner",
# "image_dark": "logo-dark.svg",
},
"analytics": {
"google_analytics_id": "G-5NGNLY0CWX",
},
# "show_nav_level": 1,
# "navigation_depth": 4,
# "show_toc_level": 2,
# "collapse_navigation": True,
}
html_context = {
"github_user": "MontrealCorpusTools",
"github_repo": "Montreal-Forced-Aligner",
"github_version": "main",
"doc_path": "docs/source",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'Montreal Forced Aligner v1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = [
"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.3/css/fontawesome.min.css",
"css/mfa.css",
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = { '**': ['globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'], }
html_sidebars = {"**": ["search-field.html", "sidebar-nav-bs.html", "sidebar-ethical-ads.html"]}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "MontrealForcedAlignerdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"MontrealForcedAligner.tex",
"Montreal Forced Aligner Documentation",
"Montreal Corpus Tools",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "montrealforcedaligner", "Montreal Forced Aligner Documentation", [author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"MontrealForcedAligner",
"Montreal Forced Aligner Documentation",
author,
"MontrealForcedAligner",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| 19,302 | 31.996581 | 102 | py |
PyTorch-VAE | PyTorch-VAE-master/experiment.py | import os
import math
import torch
from torch import optim
from models import BaseVAE
from models.types_ import *
from utils import data_loader
import pytorch_lightning as pl
from torchvision import transforms
import torchvision.utils as vutils
from torchvision.datasets import CelebA
from torch.utils.data import DataLoader
class VAEXperiment(pl.LightningModule):
def __init__(self,
vae_model: BaseVAE,
params: dict) -> None:
super(VAEXperiment, self).__init__()
self.model = vae_model
self.params = params
self.curr_device = None
self.hold_graph = False
try:
self.hold_graph = self.params['retain_first_backpass']
except:
pass
def forward(self, input: Tensor, **kwargs) -> Tensor:
return self.model(input, **kwargs)
def training_step(self, batch, batch_idx, optimizer_idx = 0):
real_img, labels = batch
self.curr_device = real_img.device
results = self.forward(real_img, labels = labels)
train_loss = self.model.loss_function(*results,
M_N = self.params['kld_weight'], #al_img.shape[0]/ self.num_train_imgs,
optimizer_idx=optimizer_idx,
batch_idx = batch_idx)
self.log_dict({key: val.item() for key, val in train_loss.items()}, sync_dist=True)
return train_loss['loss']
def validation_step(self, batch, batch_idx, optimizer_idx = 0):
real_img, labels = batch
self.curr_device = real_img.device
results = self.forward(real_img, labels = labels)
val_loss = self.model.loss_function(*results,
M_N = 1.0, #real_img.shape[0]/ self.num_val_imgs,
optimizer_idx = optimizer_idx,
batch_idx = batch_idx)
self.log_dict({f"val_{key}": val.item() for key, val in val_loss.items()}, sync_dist=True)
def on_validation_end(self) -> None:
self.sample_images()
def sample_images(self):
# Get sample reconstruction image
test_input, test_label = next(iter(self.trainer.datamodule.test_dataloader()))
test_input = test_input.to(self.curr_device)
test_label = test_label.to(self.curr_device)
# test_input, test_label = batch
recons = self.model.generate(test_input, labels = test_label)
vutils.save_image(recons.data,
os.path.join(self.logger.log_dir ,
"Reconstructions",
f"recons_{self.logger.name}_Epoch_{self.current_epoch}.png"),
normalize=True,
nrow=12)
try:
samples = self.model.sample(144,
self.curr_device,
labels = test_label)
vutils.save_image(samples.cpu().data,
os.path.join(self.logger.log_dir ,
"Samples",
f"{self.logger.name}_Epoch_{self.current_epoch}.png"),
normalize=True,
nrow=12)
except Warning:
pass
def configure_optimizers(self):
optims = []
scheds = []
optimizer = optim.Adam(self.model.parameters(),
lr=self.params['LR'],
weight_decay=self.params['weight_decay'])
optims.append(optimizer)
# Check if more than 1 optimizer is required (Used for adversarial training)
try:
if self.params['LR_2'] is not None:
optimizer2 = optim.Adam(getattr(self.model,self.params['submodel']).parameters(),
lr=self.params['LR_2'])
optims.append(optimizer2)
except:
pass
try:
if self.params['scheduler_gamma'] is not None:
scheduler = optim.lr_scheduler.ExponentialLR(optims[0],
gamma = self.params['scheduler_gamma'])
scheds.append(scheduler)
# Check if another scheduler is required for the second optimizer
try:
if self.params['scheduler_gamma_2'] is not None:
scheduler2 = optim.lr_scheduler.ExponentialLR(optims[1],
gamma = self.params['scheduler_gamma_2'])
scheds.append(scheduler2)
except:
pass
return optims, scheds
except:
return optims
| 4,997 | 38.354331 | 117 | py |
PyTorch-VAE | PyTorch-VAE-master/utils.py | import pytorch_lightning as pl
## Utils to handle newer PyTorch Lightning changes from version 0.6
## ==================================================================================================== ##
def data_loader(fn):
"""
Decorator to handle the deprecation of data_loader from 0.7
:param fn: User defined data loader function
:return: A wrapper for the data_loader function
"""
def func_wrapper(self):
try: # Works for version 0.6.0
return pl.data_loader(fn)(self)
except: # Works for version > 0.6.0
return fn(self)
return func_wrapper
| 622 | 26.086957 | 106 | py |
PyTorch-VAE | PyTorch-VAE-master/dataset.py | import os
import torch
from torch import Tensor
from pathlib import Path
from typing import List, Optional, Sequence, Union, Any, Callable
from torchvision.datasets.folder import default_loader
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.datasets import CelebA
import zipfile
# Add your custom dataset class here
class MyDataset(Dataset):
def __init__(self):
pass
def __len__(self):
pass
def __getitem__(self, idx):
pass
class MyCelebA(CelebA):
"""
A work-around to address issues with pytorch's celebA dataset class.
Download and Extract
URL : https://drive.google.com/file/d/1m8-EBPgi5MRubrm6iQjafK2QMHDBMSfJ/view?usp=sharing
"""
def _check_integrity(self) -> bool:
return True
class OxfordPets(Dataset):
"""
URL = https://www.robots.ox.ac.uk/~vgg/data/pets/
"""
def __init__(self,
data_path: str,
split: str,
transform: Callable,
**kwargs):
self.data_dir = Path(data_path) / "OxfordPets"
self.transforms = transform
imgs = sorted([f for f in self.data_dir.iterdir() if f.suffix == '.jpg'])
self.imgs = imgs[:int(len(imgs) * 0.75)] if split == "train" else imgs[int(len(imgs) * 0.75):]
def __len__(self):
return len(self.imgs)
def __getitem__(self, idx):
img = default_loader(self.imgs[idx])
if self.transforms is not None:
img = self.transforms(img)
return img, 0.0 # dummy datat to prevent breaking
class VAEDataset(LightningDataModule):
"""
PyTorch Lightning data module
Args:
data_dir: root directory of your dataset.
train_batch_size: the batch size to use during training.
val_batch_size: the batch size to use during validation.
patch_size: the size of the crop to take from the original images.
num_workers: the number of parallel workers to create to load data
items (see PyTorch's Dataloader documentation for more details).
pin_memory: whether prepared items should be loaded into pinned memory
or not. This can improve performance on GPUs.
"""
def __init__(
self,
data_path: str,
train_batch_size: int = 8,
val_batch_size: int = 8,
patch_size: Union[int, Sequence[int]] = (256, 256),
num_workers: int = 0,
pin_memory: bool = False,
**kwargs,
):
super().__init__()
self.data_dir = data_path
self.train_batch_size = train_batch_size
self.val_batch_size = val_batch_size
self.patch_size = patch_size
self.num_workers = num_workers
self.pin_memory = pin_memory
def setup(self, stage: Optional[str] = None) -> None:
# ========================= OxfordPets Dataset =========================
# train_transforms = transforms.Compose([transforms.RandomHorizontalFlip(),
# transforms.CenterCrop(self.patch_size),
# # transforms.Resize(self.patch_size),
# transforms.ToTensor(),
# transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
# val_transforms = transforms.Compose([transforms.RandomHorizontalFlip(),
# transforms.CenterCrop(self.patch_size),
# # transforms.Resize(self.patch_size),
# transforms.ToTensor(),
# transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
# self.train_dataset = OxfordPets(
# self.data_dir,
# split='train',
# transform=train_transforms,
# )
# self.val_dataset = OxfordPets(
# self.data_dir,
# split='val',
# transform=val_transforms,
# )
# ========================= CelebA Dataset =========================
train_transforms = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.CenterCrop(148),
transforms.Resize(self.patch_size),
transforms.ToTensor(),])
val_transforms = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.CenterCrop(148),
transforms.Resize(self.patch_size),
transforms.ToTensor(),])
self.train_dataset = MyCelebA(
self.data_dir,
split='train',
transform=train_transforms,
download=False,
)
# Replace CelebA with your dataset
self.val_dataset = MyCelebA(
self.data_dir,
split='test',
transform=val_transforms,
download=False,
)
# ===============================================================
def train_dataloader(self) -> DataLoader:
return DataLoader(
self.train_dataset,
batch_size=self.train_batch_size,
num_workers=self.num_workers,
shuffle=True,
pin_memory=self.pin_memory,
)
def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(
self.val_dataset,
batch_size=self.val_batch_size,
num_workers=self.num_workers,
shuffle=False,
pin_memory=self.pin_memory,
)
def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(
self.val_dataset,
batch_size=144,
num_workers=self.num_workers,
shuffle=True,
pin_memory=self.pin_memory,
)
| 6,315 | 33.895028 | 118 | py |
PyTorch-VAE | PyTorch-VAE-master/run.py | import os
import yaml
import argparse
import numpy as np
from pathlib import Path
from models import *
from experiment import VAEXperiment
import torch.backends.cudnn as cudnn
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.seed import seed_everything
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from dataset import VAEDataset
from pytorch_lightning.plugins import DDPPlugin
parser = argparse.ArgumentParser(description='Generic runner for VAE models')
parser.add_argument('--config', '-c',
dest="filename",
metavar='FILE',
help = 'path to the config file',
default='configs/vae.yaml')
args = parser.parse_args()
with open(args.filename, 'r') as file:
try:
config = yaml.safe_load(file)
except yaml.YAMLError as exc:
print(exc)
tb_logger = TensorBoardLogger(save_dir=config['logging_params']['save_dir'],
name=config['model_params']['name'],)
# For reproducibility
seed_everything(config['exp_params']['manual_seed'], True)
model = vae_models[config['model_params']['name']](**config['model_params'])
experiment = VAEXperiment(model,
config['exp_params'])
data = VAEDataset(**config["data_params"], pin_memory=len(config['trainer_params']['gpus']) != 0)
data.setup()
runner = Trainer(logger=tb_logger,
callbacks=[
LearningRateMonitor(),
ModelCheckpoint(save_top_k=2,
dirpath =os.path.join(tb_logger.log_dir , "checkpoints"),
monitor= "val_loss",
save_last= True),
],
strategy=DDPPlugin(find_unused_parameters=False),
**config['trainer_params'])
Path(f"{tb_logger.log_dir}/Samples").mkdir(exist_ok=True, parents=True)
Path(f"{tb_logger.log_dir}/Reconstructions").mkdir(exist_ok=True, parents=True)
print(f"======= Training {config['model_params']['name']} =======")
runner.fit(experiment, datamodule=data) | 2,216 | 34.758065 | 97 | py |
PyTorch-VAE | PyTorch-VAE-master/models/vq_vae.py | import torch
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
class VectorQuantizer(nn.Module):
"""
Reference:
[1] https://github.com/deepmind/sonnet/blob/v2/sonnet/src/nets/vqvae.py
"""
def __init__(self,
num_embeddings: int,
embedding_dim: int,
beta: float = 0.25):
super(VectorQuantizer, self).__init__()
self.K = num_embeddings
self.D = embedding_dim
self.beta = beta
self.embedding = nn.Embedding(self.K, self.D)
self.embedding.weight.data.uniform_(-1 / self.K, 1 / self.K)
def forward(self, latents: Tensor) -> Tensor:
latents = latents.permute(0, 2, 3, 1).contiguous() # [B x D x H x W] -> [B x H x W x D]
latents_shape = latents.shape
flat_latents = latents.view(-1, self.D) # [BHW x D]
# Compute L2 distance between latents and embedding weights
dist = torch.sum(flat_latents ** 2, dim=1, keepdim=True) + \
torch.sum(self.embedding.weight ** 2, dim=1) - \
2 * torch.matmul(flat_latents, self.embedding.weight.t()) # [BHW x K]
# Get the encoding that has the min distance
encoding_inds = torch.argmin(dist, dim=1).unsqueeze(1) # [BHW, 1]
# Convert to one-hot encodings
device = latents.device
encoding_one_hot = torch.zeros(encoding_inds.size(0), self.K, device=device)
encoding_one_hot.scatter_(1, encoding_inds, 1) # [BHW x K]
# Quantize the latents
quantized_latents = torch.matmul(encoding_one_hot, self.embedding.weight) # [BHW, D]
quantized_latents = quantized_latents.view(latents_shape) # [B x H x W x D]
# Compute the VQ Losses
commitment_loss = F.mse_loss(quantized_latents.detach(), latents)
embedding_loss = F.mse_loss(quantized_latents, latents.detach())
vq_loss = commitment_loss * self.beta + embedding_loss
# Add the residue back to the latents
quantized_latents = latents + (quantized_latents - latents).detach()
return quantized_latents.permute(0, 3, 1, 2).contiguous(), vq_loss # [B x D x H x W]
class ResidualLayer(nn.Module):
def __init__(self,
in_channels: int,
out_channels: int):
super(ResidualLayer, self).__init__()
self.resblock = nn.Sequential(nn.Conv2d(in_channels, out_channels,
kernel_size=3, padding=1, bias=False),
nn.ReLU(True),
nn.Conv2d(out_channels, out_channels,
kernel_size=1, bias=False))
def forward(self, input: Tensor) -> Tensor:
return input + self.resblock(input)
class VQVAE(BaseVAE):
def __init__(self,
in_channels: int,
embedding_dim: int,
num_embeddings: int,
hidden_dims: List = None,
beta: float = 0.25,
img_size: int = 64,
**kwargs) -> None:
super(VQVAE, self).__init__()
self.embedding_dim = embedding_dim
self.num_embeddings = num_embeddings
self.img_size = img_size
self.beta = beta
modules = []
if hidden_dims is None:
hidden_dims = [128, 256]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size=4, stride=2, padding=1),
nn.LeakyReLU())
)
in_channels = h_dim
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=1, padding=1),
nn.LeakyReLU())
)
for _ in range(6):
modules.append(ResidualLayer(in_channels, in_channels))
modules.append(nn.LeakyReLU())
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, embedding_dim,
kernel_size=1, stride=1),
nn.LeakyReLU())
)
self.encoder = nn.Sequential(*modules)
self.vq_layer = VectorQuantizer(num_embeddings,
embedding_dim,
self.beta)
# Build Decoder
modules = []
modules.append(
nn.Sequential(
nn.Conv2d(embedding_dim,
hidden_dims[-1],
kernel_size=3,
stride=1,
padding=1),
nn.LeakyReLU())
)
for _ in range(6):
modules.append(ResidualLayer(hidden_dims[-1], hidden_dims[-1]))
modules.append(nn.LeakyReLU())
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=4,
stride=2,
padding=1),
nn.LeakyReLU())
)
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
out_channels=3,
kernel_size=4,
stride=2, padding=1),
nn.Tanh()))
self.decoder = nn.Sequential(*modules)
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
return [result]
def decode(self, z: Tensor) -> Tensor:
"""
Maps the given latent codes
onto the image space.
:param z: (Tensor) [B x D x H x W]
:return: (Tensor) [B x C x H x W]
"""
result = self.decoder(z)
return result
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
encoding = self.encode(input)[0]
quantized_inputs, vq_loss = self.vq_layer(encoding)
return [self.decode(quantized_inputs), input, vq_loss]
def loss_function(self,
*args,
**kwargs) -> dict:
"""
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
vq_loss = args[2]
recons_loss = F.mse_loss(recons, input)
loss = recons_loss + vq_loss
return {'loss': loss,
'Reconstruction_Loss': recons_loss,
'VQ_Loss':vq_loss}
def sample(self,
num_samples: int,
current_device: Union[int, str], **kwargs) -> Tensor:
raise Warning('VQVAE sampler is not implemented.')
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0] | 7,576 | 32.675556 | 96 | py |
PyTorch-VAE | PyTorch-VAE-master/models/base.py | from .types_ import *
from torch import nn
from abc import abstractmethod
class BaseVAE(nn.Module):
def __init__(self) -> None:
super(BaseVAE, self).__init__()
def encode(self, input: Tensor) -> List[Tensor]:
raise NotImplementedError
def decode(self, input: Tensor) -> Any:
raise NotImplementedError
def sample(self, batch_size:int, current_device: int, **kwargs) -> Tensor:
raise NotImplementedError
def generate(self, x: Tensor, **kwargs) -> Tensor:
raise NotImplementedError
@abstractmethod
def forward(self, *inputs: Tensor) -> Tensor:
pass
@abstractmethod
def loss_function(self, *inputs: Any, **kwargs) -> Tensor:
pass
| 733 | 21.9375 | 78 | py |
PyTorch-VAE | PyTorch-VAE-master/models/twostage_vae.py | import torch
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
class TwoStageVAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
hidden_dims2: List = None,
**kwargs) -> None:
super(TwoStageVAE, self).__init__()
self.latent_dim = latent_dim
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
if hidden_dims2 is None:
hidden_dims2 = [1024, 1024]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
#---------------------- Second VAE ---------------------------#
encoder2 = []
in_channels = self.latent_dim
for h_dim in hidden_dims2:
encoder2.append(nn.Sequential(
nn.Linear(in_channels, h_dim),
nn.BatchNorm1d(h_dim),
nn.LeakyReLU()))
in_channels = h_dim
self.encoder2 = nn.Sequential(*encoder2)
self.fc_mu2 = nn.Linear(hidden_dims2[-1], self.latent_dim)
self.fc_var2 = nn.Linear(hidden_dims2[-1], self.latent_dim)
decoder2 = []
hidden_dims2.reverse()
in_channels = self.latent_dim
for h_dim in hidden_dims2:
decoder2.append(nn.Sequential(
nn.Linear(in_channels, h_dim),
nn.BatchNorm1d(h_dim),
nn.LeakyReLU()))
in_channels = h_dim
self.decoder2 = nn.Sequential(*decoder2)
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
"""
Maps the given latent codes
onto the image space.
:param z: (Tensor) [B x D]
:return: (Tensor) [B x C x H x W]
"""
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
return [self.decode(z), input, mu, log_var]
def loss_function(self,
*args,
**kwargs) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
mu = args[2]
log_var = args[3]
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
recons_loss =F.mse_loss(recons, input)
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)
loss = recons_loss + kld_weight * kld_loss
return {'loss': loss, 'Reconstruction_Loss':recons_loss, 'KLD':-kld_loss}
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0] | 6,867 | 33.862944 | 104 | py |
PyTorch-VAE | PyTorch-VAE-master/models/gamma_vae.py | import torch
from models import BaseVAE
from torch import nn
from torch.distributions import Gamma
from torch.nn import functional as F
from .types_ import *
import torch.nn.init as init
class GammaVAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
gamma_shape: float = 8.,
prior_shape: float = 2.0,
prior_rate: float = 1.,
**kwargs) -> None:
super(GammaVAE, self).__init__()
self.latent_dim = latent_dim
self.B = gamma_shape
self.prior_alpha = torch.tensor([prior_shape])
self.prior_beta = torch.tensor([prior_rate])
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Sequential(nn.Linear(hidden_dims[-1] * 4, latent_dim),
nn.Softmax())
self.fc_var = nn.Sequential(nn.Linear(hidden_dims[-1] * 4, latent_dim),
nn.Softmax())
# Build Decoder
modules = []
self.decoder_input = nn.Sequential(nn.Linear(latent_dim, hidden_dims[-1] * 4))
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels=3,
kernel_size=3, padding=1),
nn.Sigmoid())
self.weight_init()
def weight_init(self):
# print(self._modules)
for block in self._modules:
for m in self._modules[block]:
init_(m)
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
alpha = self.fc_mu(result)
beta = self.fc_var(result)
return [alpha, beta]
def decode(self, z: Tensor) -> Tensor:
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, alpha: Tensor, beta: Tensor) -> Tensor:
"""
Reparameterize the Gamma distribution by the shape augmentation trick.
Reference:
[1] https://arxiv.org/pdf/1610.05683.pdf
:param alpha: (Tensor) Shape parameter of the latent Gamma
:param beta: (Tensor) Rate parameter of the latent Gamma
:return:
"""
# Sample from Gamma to guarantee acceptance
alpha_ = alpha.clone().detach()
z_hat = Gamma(alpha_ + self.B, torch.ones_like(alpha_)).sample()
# Compute the eps ~ N(0,1) that produces z_hat
eps = self.inv_h_func(alpha + self.B , z_hat)
z = self.h_func(alpha + self.B, eps)
# When beta != 1, scale by beta
return z / beta
def h_func(self, alpha: Tensor, eps: Tensor) -> Tensor:
"""
Reparameterize a sample eps ~ N(0, 1) so that h(z) ~ Gamma(alpha, 1)
:param alpha: (Tensor) Shape parameter
:param eps: (Tensor) Random sample to reparameterize
:return: (Tensor)
"""
z = (alpha - 1./3.) * (1 + eps / torch.sqrt(9. * alpha - 3.))**3
return z
def inv_h_func(self, alpha: Tensor, z: Tensor) -> Tensor:
"""
Inverse reparameterize the given z into eps.
:param alpha: (Tensor)
:param z: (Tensor)
:return: (Tensor)
"""
eps = torch.sqrt(9. * alpha - 3.) * ((z / (alpha - 1./3.))**(1. / 3.) - 1.)
return eps
def forward(self, input: Tensor, **kwargs) -> Tensor:
alpha, beta = self.encode(input)
z = self.reparameterize(alpha, beta)
return [self.decode(z), input, alpha, beta]
# def I_function(self, alpha_p, beta_p, alpha_q, beta_q):
# return - (alpha_q * beta_q) / alpha_p - \
# beta_p * torch.log(alpha_p) - torch.lgamma(beta_p) + \
# (beta_p - 1) * torch.digamma(beta_q) + \
# (beta_p - 1) * torch.log(alpha_q)
def I_function(self, a, b, c, d):
return - c * d / a - b * torch.log(a) - torch.lgamma(b) + (b - 1) * (torch.digamma(d) + torch.log(c))
def vae_gamma_kl_loss(self, a, b, c, d):
"""
https://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions
b and d are Gamma shape parameters and
a and c are scale parameters.
(All, therefore, must be positive.)
"""
a = 1 / a
c = 1 / c
losses = self.I_function(c, d, c, d) - self.I_function(a, b, c, d)
return torch.sum(losses, dim=1)
def loss_function(self,
*args,
**kwargs) -> dict:
recons = args[0]
input = args[1]
alpha = args[2]
beta = args[3]
curr_device = input.device
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
recons_loss = torch.mean(F.mse_loss(recons, input, reduction = 'none'), dim = (1,2,3))
# https://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions
# alpha = 1./ alpha
self.prior_alpha = self.prior_alpha.to(curr_device)
self.prior_beta = self.prior_beta.to(curr_device)
# kld_loss = - self.I_function(alpha, beta, self.prior_alpha, self.prior_beta)
kld_loss = self.vae_gamma_kl_loss(alpha, beta, self.prior_alpha, self.prior_beta)
# kld_loss = torch.sum(kld_loss, dim=1)
loss = recons_loss + kld_loss
loss = torch.mean(loss, dim = 0)
# print(loss, recons_loss, kld_loss)
return {'loss': loss} #, 'Reconstruction_Loss': recons_loss, 'KLD': -kld_loss}
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the modelSay
:return: (Tensor)
"""
z = Gamma(self.prior_alpha, self.prior_beta).sample((num_samples, self.latent_dim))
z = z.squeeze().to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0]
def init_(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.orthogonal_(m.weight)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.fill_(0)
| 8,650 | 33.883065 | 117 | py |
PyTorch-VAE | PyTorch-VAE-master/models/swae.py | import torch
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from torch import distributions as dist
from .types_ import *
class SWAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
reg_weight: int = 100,
wasserstein_deg: float= 2.,
num_projections: int = 50,
projection_dist: str = 'normal',
**kwargs) -> None:
super(SWAE, self).__init__()
self.latent_dim = latent_dim
self.reg_weight = reg_weight
self.p = wasserstein_deg
self.num_projections = num_projections
self.proj_dist = projection_dist
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_z = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
def encode(self, input: Tensor) -> Tensor:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
z = self.fc_z(result)
return z
def decode(self, z: Tensor) -> Tensor:
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
z = self.encode(input)
return [self.decode(z), input, z]
def loss_function(self,
*args,
**kwargs) -> dict:
recons = args[0]
input = args[1]
z = args[2]
batch_size = input.size(0)
bias_corr = batch_size * (batch_size - 1)
reg_weight = self.reg_weight / bias_corr
recons_loss_l2 = F.mse_loss(recons, input)
recons_loss_l1 = F.l1_loss(recons, input)
swd_loss = self.compute_swd(z, self.p, reg_weight)
loss = recons_loss_l2 + recons_loss_l1 + swd_loss
return {'loss': loss, 'Reconstruction_Loss':(recons_loss_l2 + recons_loss_l1), 'SWD': swd_loss}
def get_random_projections(self, latent_dim: int, num_samples: int) -> Tensor:
"""
Returns random samples from latent distribution's (Gaussian)
unit sphere for projecting the encoded samples and the
distribution samples.
:param latent_dim: (Int) Dimensionality of the latent space (D)
:param num_samples: (Int) Number of samples required (S)
:return: Random projections from the latent unit sphere
"""
if self.proj_dist == 'normal':
rand_samples = torch.randn(num_samples, latent_dim)
elif self.proj_dist == 'cauchy':
rand_samples = dist.Cauchy(torch.tensor([0.0]),
torch.tensor([1.0])).sample((num_samples, latent_dim)).squeeze()
else:
raise ValueError('Unknown projection distribution.')
rand_proj = rand_samples / rand_samples.norm(dim=1).view(-1,1)
return rand_proj # [S x D]
def compute_swd(self,
z: Tensor,
p: float,
reg_weight: float) -> Tensor:
"""
Computes the Sliced Wasserstein Distance (SWD) - which consists of
randomly projecting the encoded and prior vectors and computing
their Wasserstein distance along those projections.
:param z: Latent samples # [N x D]
:param p: Value for the p^th Wasserstein distance
:param reg_weight:
:return:
"""
prior_z = torch.randn_like(z) # [N x D]
device = z.device
proj_matrix = self.get_random_projections(self.latent_dim,
num_samples=self.num_projections).transpose(0,1).to(device)
latent_projections = z.matmul(proj_matrix) # [N x S]
prior_projections = prior_z.matmul(proj_matrix) # [N x S]
# The Wasserstein distance is computed by sorting the two projections
# across the batches and computing their element-wise l2 distance
w_dist = torch.sort(latent_projections.t(), dim=1)[0] - \
torch.sort(prior_projections.t(), dim=1)[0]
w_dist = w_dist.pow(p)
return reg_weight * w_dist.mean()
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0]
| 7,340 | 34.463768 | 109 | py |
PyTorch-VAE | PyTorch-VAE-master/models/cat_vae.py | import torch
import numpy as np
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
class CategoricalVAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
categorical_dim: int = 40, # Num classes
hidden_dims: List = None,
temperature: float = 0.5,
anneal_rate: float = 3e-5,
anneal_interval: int = 100, # every 100 batches
alpha: float = 30.,
**kwargs) -> None:
super(CategoricalVAE, self).__init__()
self.latent_dim = latent_dim
self.categorical_dim = categorical_dim
self.temp = temperature
self.min_temp = temperature
self.anneal_rate = anneal_rate
self.anneal_interval = anneal_interval
self.alpha = alpha
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_z = nn.Linear(hidden_dims[-1]*4,
self.latent_dim * self.categorical_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(self.latent_dim * self.categorical_dim
, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
self.sampling_dist = torch.distributions.OneHotCategorical(1. / categorical_dim * torch.ones((self.categorical_dim, 1)))
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [B x C x H x W]
:return: (Tensor) Latent code [B x D x Q]
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
z = self.fc_z(result)
z = z.view(-1, self.latent_dim, self.categorical_dim)
return [z]
def decode(self, z: Tensor) -> Tensor:
"""
Maps the given latent codes
onto the image space.
:param z: (Tensor) [B x D x Q]
:return: (Tensor) [B x C x H x W]
"""
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, z: Tensor, eps:float = 1e-7) -> Tensor:
"""
Gumbel-softmax trick to sample from Categorical Distribution
:param z: (Tensor) Latent Codes [B x D x Q]
:return: (Tensor) [B x D]
"""
# Sample from Gumbel
u = torch.rand_like(z)
g = - torch.log(- torch.log(u + eps) + eps)
# Gumbel-Softmax sample
s = F.softmax((z + g) / self.temp, dim=-1)
s = s.view(-1, self.latent_dim * self.categorical_dim)
return s
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
q = self.encode(input)[0]
z = self.reparameterize(q)
return [self.decode(z), input, q]
def loss_function(self,
*args,
**kwargs) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
q = args[2]
q_p = F.softmax(q, dim=-1) # Convert the categorical codes into probabilities
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
batch_idx = kwargs['batch_idx']
# Anneal the temperature at regular intervals
if batch_idx % self.anneal_interval == 0 and self.training:
self.temp = np.maximum(self.temp * np.exp(- self.anneal_rate * batch_idx),
self.min_temp)
recons_loss =F.mse_loss(recons, input, reduction='mean')
# KL divergence between gumbel-softmax distribution
eps = 1e-7
# Entropy of the logits
h1 = q_p * torch.log(q_p + eps)
# Cross entropy with the categorical distribution
h2 = q_p * np.log(1. / self.categorical_dim + eps)
kld_loss = torch.mean(torch.sum(h1 - h2, dim =(1,2)), dim=0)
# kld_weight = 1.2
loss = self.alpha * recons_loss + kld_weight * kld_loss
return {'loss': loss, 'Reconstruction_Loss':recons_loss, 'KLD':-kld_loss}
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
# [S x D x Q]
M = num_samples * self.latent_dim
np_y = np.zeros((M, self.categorical_dim), dtype=np.float32)
np_y[range(M), np.random.choice(self.categorical_dim, M)] = 1
np_y = np.reshape(np_y, [M // self.latent_dim, self.latent_dim, self.categorical_dim])
z = torch.from_numpy(np_y)
# z = self.sampling_dist.sample((num_samples * self.latent_dim, ))
z = z.view(num_samples, self.latent_dim * self.categorical_dim).to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0] | 7,531 | 35.038278 | 128 | py |
PyTorch-VAE | PyTorch-VAE-master/models/dip_vae.py | import torch
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
class DIPVAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
lambda_diag: float = 10.,
lambda_offdiag: float = 5.,
**kwargs) -> None:
super(DIPVAE, self).__init__()
self.latent_dim = latent_dim
self.lambda_diag = lambda_diag
self.lambda_offdiag = lambda_offdiag
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
"""
Maps the given latent codes
onto the image space.
:param z: (Tensor) [B x D]
:return: (Tensor) [B x C x H x W]
"""
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
return [self.decode(z), input, mu, log_var]
def loss_function(self,
*args,
**kwargs) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
mu = args[2]
log_var = args[3]
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
recons_loss =F.mse_loss(recons, input, reduction='sum')
kld_loss = torch.sum(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)
# DIP Loss
centered_mu = mu - mu.mean(dim=1, keepdim = True) # [B x D]
cov_mu = centered_mu.t().matmul(centered_mu).squeeze() # [D X D]
# Add Variance for DIP Loss II
cov_z = cov_mu + torch.mean(torch.diagonal((2. * log_var).exp(), dim1 = 0), dim = 0) # [D x D]
# For DIp Loss I
# cov_z = cov_mu
cov_diag = torch.diag(cov_z) # [D]
cov_offdiag = cov_z - torch.diag(cov_diag) # [D x D]
dip_loss = self.lambda_offdiag * torch.sum(cov_offdiag ** 2) + \
self.lambda_diag * torch.sum((cov_diag - 1) ** 2)
loss = recons_loss + kld_weight * kld_loss + dip_loss
return {'loss': loss,
'Reconstruction_Loss':recons_loss,
'KLD':-kld_loss,
'DIP_Loss':dip_loss}
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0] | 6,597 | 33.544503 | 103 | py |
PyTorch-VAE | PyTorch-VAE-master/models/wae_mmd.py | import torch
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
class WAE_MMD(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
reg_weight: int = 100,
kernel_type: str = 'imq',
latent_var: float = 2.,
**kwargs) -> None:
super(WAE_MMD, self).__init__()
self.latent_dim = latent_dim
self.reg_weight = reg_weight
self.kernel_type = kernel_type
self.z_var = latent_var
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_z = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
def encode(self, input: Tensor) -> Tensor:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
z = self.fc_z(result)
return z
def decode(self, z: Tensor) -> Tensor:
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
z = self.encode(input)
return [self.decode(z), input, z]
def loss_function(self,
*args,
**kwargs) -> dict:
recons = args[0]
input = args[1]
z = args[2]
batch_size = input.size(0)
bias_corr = batch_size * (batch_size - 1)
reg_weight = self.reg_weight / bias_corr
recons_loss =F.mse_loss(recons, input)
mmd_loss = self.compute_mmd(z, reg_weight)
loss = recons_loss + mmd_loss
return {'loss': loss, 'Reconstruction_Loss':recons_loss, 'MMD': mmd_loss}
def compute_kernel(self,
x1: Tensor,
x2: Tensor) -> Tensor:
# Convert the tensors into row and column vectors
D = x1.size(1)
N = x1.size(0)
x1 = x1.unsqueeze(-2) # Make it into a column tensor
x2 = x2.unsqueeze(-3) # Make it into a row tensor
"""
Usually the below lines are not required, especially in our case,
but this is useful when x1 and x2 have different sizes
along the 0th dimension.
"""
x1 = x1.expand(N, N, D)
x2 = x2.expand(N, N, D)
if self.kernel_type == 'rbf':
result = self.compute_rbf(x1, x2)
elif self.kernel_type == 'imq':
result = self.compute_inv_mult_quad(x1, x2)
else:
raise ValueError('Undefined kernel type.')
return result
def compute_rbf(self,
x1: Tensor,
x2: Tensor,
eps: float = 1e-7) -> Tensor:
"""
Computes the RBF Kernel between x1 and x2.
:param x1: (Tensor)
:param x2: (Tensor)
:param eps: (Float)
:return:
"""
z_dim = x2.size(-1)
sigma = 2. * z_dim * self.z_var
result = torch.exp(-((x1 - x2).pow(2).mean(-1) / sigma))
return result
def compute_inv_mult_quad(self,
x1: Tensor,
x2: Tensor,
eps: float = 1e-7) -> Tensor:
"""
Computes the Inverse Multi-Quadratics Kernel between x1 and x2,
given by
k(x_1, x_2) = \sum \frac{C}{C + \|x_1 - x_2 \|^2}
:param x1: (Tensor)
:param x2: (Tensor)
:param eps: (Float)
:return:
"""
z_dim = x2.size(-1)
C = 2 * z_dim * self.z_var
kernel = C / (eps + C + (x1 - x2).pow(2).sum(dim = -1))
# Exclude diagonal elements
result = kernel.sum() - kernel.diag().sum()
return result
def compute_mmd(self, z: Tensor, reg_weight: float) -> Tensor:
# Sample from prior (Gaussian) distribution
prior_z = torch.randn_like(z)
prior_z__kernel = self.compute_kernel(prior_z, prior_z)
z__kernel = self.compute_kernel(z, z)
priorz_z__kernel = self.compute_kernel(prior_z, z)
mmd = reg_weight * prior_z__kernel.mean() + \
reg_weight * z__kernel.mean() - \
2 * reg_weight * priorz_z__kernel.mean()
return mmd
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0] | 7,427 | 31.155844 | 81 | py |
PyTorch-VAE | PyTorch-VAE-master/models/mssim_vae.py | import torch
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
from math import exp
class MSSIMVAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
window_size: int = 11,
size_average: bool = True,
**kwargs) -> None:
super(MSSIMVAE, self).__init__()
self.latent_dim = latent_dim
self.in_channels = in_channels
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
self.mssim_loss = MSSIM(self.in_channels,
window_size,
size_average)
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
"""
Maps the given latent codes
onto the image space.
:param z: (Tensor) [B x D]
:return: (Tensor) [B x C x H x W]
"""
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
return [self.decode(z), input, mu, log_var]
def loss_function(self,
*args: Any,
**kwargs) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
mu = args[2]
log_var = args[3]
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
recons_loss = self.mssim_loss(recons, input)
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)
loss = recons_loss + kld_weight * kld_loss
return {'loss': loss, 'Reconstruction_Loss':recons_loss, 'KLD':-kld_loss}
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dim)
z = z.cuda(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0]
class MSSIM(nn.Module):
def __init__(self,
in_channels: int = 3,
window_size: int=11,
size_average:bool = True) -> None:
"""
Computes the differentiable MS-SSIM loss
Reference:
[1] https://github.com/jorge-pessoa/pytorch-msssim/blob/dev/pytorch_msssim/__init__.py
(MIT License)
:param in_channels: (Int)
:param window_size: (Int)
:param size_average: (Bool)
"""
super(MSSIM, self).__init__()
self.in_channels = in_channels
self.window_size = window_size
self.size_average = size_average
def gaussian_window(self, window_size:int, sigma: float) -> Tensor:
kernel = torch.tensor([exp((x - window_size // 2)**2/(2 * sigma ** 2))
for x in range(window_size)])
return kernel/kernel.sum()
def create_window(self, window_size, in_channels):
_1D_window = self.gaussian_window(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(in_channels, 1, window_size, window_size).contiguous()
return window
def ssim(self,
img1: Tensor,
img2: Tensor,
window_size: int,
in_channel: int,
size_average: bool) -> Tensor:
device = img1.device
window = self.create_window(window_size, in_channel).to(device)
mu1 = F.conv2d(img1, window, padding= window_size//2, groups=in_channel)
mu2 = F.conv2d(img2, window, padding= window_size//2, groups=in_channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding = window_size//2, groups=in_channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding = window_size//2, groups=in_channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding = window_size//2, groups=in_channel) - mu1_mu2
img_range = 1.0 #img1.max() - img1.min() # Dynamic range
C1 = (0.01 * img_range) ** 2
C2 = (0.03 * img_range) ** 2
v1 = 2.0 * sigma12 + C2
v2 = sigma1_sq + sigma2_sq + C2
cs = torch.mean(v1 / v2) # contrast sensitivity
ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
if size_average:
ret = ssim_map.mean()
else:
ret = ssim_map.mean(1).mean(1).mean(1)
return ret, cs
def forward(self, img1: Tensor, img2: Tensor) -> Tensor:
device = img1.device
weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(device)
levels = weights.size()[0]
mssim = []
mcs = []
for _ in range(levels):
sim, cs = self.ssim(img1, img2,
self.window_size,
self.in_channels,
self.size_average)
mssim.append(sim)
mcs.append(cs)
img1 = F.avg_pool2d(img1, (2, 2))
img2 = F.avg_pool2d(img2, (2, 2))
mssim = torch.stack(mssim)
mcs = torch.stack(mcs)
# # Normalize (to avoid NaNs during training unstable models, not compliant with original definition)
# if normalize:
# mssim = (mssim + 1) / 2
# mcs = (mcs + 1) / 2
pow1 = mcs ** weights
pow2 = mssim ** weights
output = torch.prod(pow1[:-1] * pow2[-1])
return 1 - output
| 9,644 | 33.081272 | 109 | py |
PyTorch-VAE | PyTorch-VAE-master/models/betatc_vae.py | import torch
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
import math
class BetaTCVAE(BaseVAE):
num_iter = 0 # Global static variable to keep track of iterations
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
anneal_steps: int = 200,
alpha: float = 1.,
beta: float = 6.,
gamma: float = 1.,
**kwargs) -> None:
super(BetaTCVAE, self).__init__()
self.latent_dim = latent_dim
self.anneal_steps = anneal_steps
self.alpha = alpha
self.beta = beta
self.gamma = gamma
modules = []
if hidden_dims is None:
hidden_dims = [32, 32, 32, 32]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 4, stride= 2, padding = 1),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc = nn.Linear(hidden_dims[-1]*16, 256)
self.fc_mu = nn.Linear(256, latent_dim)
self.fc_var = nn.Linear(256, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, 256 * 2)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
result = self.fc(result)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
"""
Maps the given latent codes
onto the image space.
:param z: (Tensor) [B x D]
:return: (Tensor) [B x C x H x W]
"""
result = self.decoder_input(z)
result = result.view(-1, 32, 4, 4)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
return [self.decode(z), input, mu, log_var, z]
def log_density_gaussian(self, x: Tensor, mu: Tensor, logvar: Tensor):
"""
Computes the log pdf of the Gaussian with parameters mu and logvar at x
:param x: (Tensor) Point at whichGaussian PDF is to be evaluated
:param mu: (Tensor) Mean of the Gaussian distribution
:param logvar: (Tensor) Log variance of the Gaussian distribution
:return:
"""
norm = - 0.5 * (math.log(2 * math.pi) + logvar)
log_density = norm - 0.5 * ((x - mu) ** 2 * torch.exp(-logvar))
return log_density
def loss_function(self,
*args,
**kwargs) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
mu = args[2]
log_var = args[3]
z = args[4]
weight = 1 #kwargs['M_N'] # Account for the minibatch samples from the dataset
recons_loss =F.mse_loss(recons, input, reduction='sum')
log_q_zx = self.log_density_gaussian(z, mu, log_var).sum(dim = 1)
zeros = torch.zeros_like(z)
log_p_z = self.log_density_gaussian(z, zeros, zeros).sum(dim = 1)
batch_size, latent_dim = z.shape
mat_log_q_z = self.log_density_gaussian(z.view(batch_size, 1, latent_dim),
mu.view(1, batch_size, latent_dim),
log_var.view(1, batch_size, latent_dim))
# Reference
# [1] https://github.com/YannDubs/disentangling-vae/blob/535bbd2e9aeb5a200663a4f82f1d34e084c4ba8d/disvae/utils/math.py#L54
dataset_size = (1 / kwargs['M_N']) * batch_size # dataset size
strat_weight = (dataset_size - batch_size + 1) / (dataset_size * (batch_size - 1))
importance_weights = torch.Tensor(batch_size, batch_size).fill_(1 / (batch_size -1)).to(input.device)
importance_weights.view(-1)[::batch_size] = 1 / dataset_size
importance_weights.view(-1)[1::batch_size] = strat_weight
importance_weights[batch_size - 2, 0] = strat_weight
log_importance_weights = importance_weights.log()
mat_log_q_z += log_importance_weights.view(batch_size, batch_size, 1)
log_q_z = torch.logsumexp(mat_log_q_z.sum(2), dim=1, keepdim=False)
log_prod_q_z = torch.logsumexp(mat_log_q_z, dim=1, keepdim=False).sum(1)
mi_loss = (log_q_zx - log_q_z).mean()
tc_loss = (log_q_z - log_prod_q_z).mean()
kld_loss = (log_prod_q_z - log_p_z).mean()
# kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)
if self.training:
self.num_iter += 1
anneal_rate = min(0 + 1 * self.num_iter / self.anneal_steps, 1)
else:
anneal_rate = 1.
loss = recons_loss/batch_size + \
self.alpha * mi_loss + \
weight * (self.beta * tc_loss +
anneal_rate * self.gamma * kld_loss)
return {'loss': loss,
'Reconstruction_Loss':recons_loss,
'KLD':kld_loss,
'TC_Loss':tc_loss,
'MI_Loss':mi_loss}
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0] | 8,558 | 34.962185 | 130 | py |
PyTorch-VAE | PyTorch-VAE-master/models/dfcvae.py | import torch
from models import BaseVAE
from torch import nn
from torchvision.models import vgg19_bn
from torch.nn import functional as F
from .types_ import *
class DFCVAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
alpha:float = 1,
beta:float = 0.5,
**kwargs) -> None:
super(DFCVAE, self).__init__()
self.latent_dim = latent_dim
self.alpha = alpha
self.beta = beta
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
self.feature_network = vgg19_bn(pretrained=True)
# Freeze the pretrained feature network
for param in self.feature_network.parameters():
param.requires_grad = False
self.feature_network.eval()
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
"""
Maps the given latent codes
onto the image space.
:param z: (Tensor) [B x D]
:return: (Tensor) [B x C x H x W]
"""
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
recons = self.decode(z)
recons_features = self.extract_features(recons)
input_features = self.extract_features(input)
return [recons, input, recons_features, input_features, mu, log_var]
def extract_features(self,
input: Tensor,
feature_layers: List = None) -> List[Tensor]:
"""
Extracts the features from the pretrained model
at the layers indicated by feature_layers.
:param input: (Tensor) [B x C x H x W]
:param feature_layers: List of string of IDs
:return: List of the extracted features
"""
if feature_layers is None:
feature_layers = ['14', '24', '34', '43']
features = []
result = input
for (key, module) in self.feature_network.features._modules.items():
result = module(result)
if(key in feature_layers):
features.append(result)
return features
def loss_function(self,
*args,
**kwargs) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
recons_features = args[2]
input_features = args[3]
mu = args[4]
log_var = args[5]
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
recons_loss =F.mse_loss(recons, input)
feature_loss = 0.0
for (r, i) in zip(recons_features, input_features):
feature_loss += F.mse_loss(r, i)
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)
loss = self.beta * (recons_loss + feature_loss) + self.alpha * kld_weight * kld_loss
return {'loss': loss, 'Reconstruction_Loss':recons_loss, 'KLD':-kld_loss}
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0] | 7,315 | 32.714286 | 104 | py |
PyTorch-VAE | PyTorch-VAE-master/models/fvae.py | import torch
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
class FactorVAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
gamma: float = 40.,
**kwargs) -> None:
super(FactorVAE, self).__init__()
self.latent_dim = latent_dim
self.gamma = gamma
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
# Discriminator network for the Total Correlation (TC) loss
self.discriminator = nn.Sequential(nn.Linear(self.latent_dim, 1000),
nn.BatchNorm1d(1000),
nn.LeakyReLU(0.2),
nn.Linear(1000, 1000),
nn.BatchNorm1d(1000),
nn.LeakyReLU(0.2),
nn.Linear(1000, 1000),
nn.BatchNorm1d(1000),
nn.LeakyReLU(0.2),
nn.Linear(1000, 2))
self.D_z_reserve = None
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
"""
Maps the given latent codes
onto the image space.
:param z: (Tensor) [B x D]
:return: (Tensor) [B x C x H x W]
"""
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
return [self.decode(z), input, mu, log_var, z]
def permute_latent(self, z: Tensor) -> Tensor:
"""
Permutes each of the latent codes in the batch
:param z: [B x D]
:return: [B x D]
"""
B, D = z.size()
# Returns a shuffled inds for each latent code in the batch
inds = torch.cat([(D *i) + torch.randperm(D) for i in range(B)])
return z.view(-1)[inds].view(B, D)
def loss_function(self,
*args,
**kwargs) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
mu = args[2]
log_var = args[3]
z = args[4]
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
optimizer_idx = kwargs['optimizer_idx']
# Update the VAE
if optimizer_idx == 0:
recons_loss =F.mse_loss(recons, input)
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)
self.D_z_reserve = self.discriminator(z)
vae_tc_loss = (self.D_z_reserve[:, 0] - self.D_z_reserve[:, 1]).mean()
loss = recons_loss + kld_weight * kld_loss + self.gamma * vae_tc_loss
# print(f' recons: {recons_loss}, kld: {kld_loss}, VAE_TC_loss: {vae_tc_loss}')
return {'loss': loss,
'Reconstruction_Loss':recons_loss,
'KLD':-kld_loss,
'VAE_TC_Loss': vae_tc_loss}
# Update the Discriminator
elif optimizer_idx == 1:
device = input.device
true_labels = torch.ones(input.size(0), dtype= torch.long,
requires_grad=False).to(device)
false_labels = torch.zeros(input.size(0), dtype= torch.long,
requires_grad=False).to(device)
z = z.detach() # Detach so that VAE is not trained again
z_perm = self.permute_latent(z)
D_z_perm = self.discriminator(z_perm)
D_tc_loss = 0.5 * (F.cross_entropy(self.D_z_reserve, false_labels) +
F.cross_entropy(D_z_perm, true_labels))
# print(f'D_TC: {D_tc_loss}')
return {'loss': D_tc_loss,
'D_TC_Loss':D_tc_loss}
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0] | 8,251 | 35.192982 | 108 | py |
PyTorch-VAE | PyTorch-VAE-master/models/iwae.py | import torch
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
class IWAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
num_samples: int = 5,
**kwargs) -> None:
super(IWAE, self).__init__()
self.latent_dim = latent_dim
self.num_samples = num_samples
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
"""
Maps the given latent codes of S samples
onto the image space.
:param z: (Tensor) [B x S x D]
:return: (Tensor) [B x S x C x H x W]
"""
B, _, _ = z.size()
z = z.view(-1, self.latent_dim) #[BS x D]
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result) #[BS x C x H x W ]
result = result.view([B, -1, result.size(1), result.size(2), result.size(3)]) #[B x S x C x H x W]
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
:param mu: (Tensor) Mean of the latent Gaussian
:param logvar: (Tensor) Standard deviation of the latent Gaussian
:return:
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
mu = mu.repeat(self.num_samples, 1, 1).permute(1, 0, 2) # [B x S x D]
log_var = log_var.repeat(self.num_samples, 1, 1).permute(1, 0, 2) # [B x S x D]
z= self.reparameterize(mu, log_var) # [B x S x D]
eps = (z - mu) / log_var # Prior samples
return [self.decode(z), input, mu, log_var, z, eps]
def loss_function(self,
*args,
**kwargs) -> dict:
"""
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
mu = args[2]
log_var = args[3]
z = args[4]
eps = args[5]
input = input.repeat(self.num_samples, 1, 1, 1, 1).permute(1, 0, 2, 3, 4) #[B x S x C x H x W]
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
log_p_x_z = ((recons - input) ** 2).flatten(2).mean(-1) # Reconstruction Loss [B x S]
kld_loss = -0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim=2) ## [B x S]
# Get importance weights
log_weight = (log_p_x_z + kld_weight * kld_loss) #.detach().data
# Rescale the weights (along the sample dim) to lie in [0, 1] and sum to 1
weight = F.softmax(log_weight, dim = -1)
# kld_loss = torch.mean(kld_loss, dim = 0)
loss = torch.mean(torch.sum(weight * log_weight, dim=-1), dim = 0)
return {'loss': loss, 'Reconstruction_Loss':log_p_x_z.mean(), 'KLD':-kld_loss.mean()}
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples, 1,
self.latent_dim)
z = z.to(current_device)
samples = self.decode(z).squeeze()
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image.
Returns only the first reconstructed sample
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0][:, 0, :]
| 6,694 | 34.42328 | 106 | py |
PyTorch-VAE | PyTorch-VAE-master/models/vampvae.py | import torch
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
class VampVAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
num_components: int = 50,
**kwargs) -> None:
super(VampVAE, self).__init__()
self.latent_dim = latent_dim
self.num_components = num_components
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
self.pseudo_input = torch.eye(self.num_components, requires_grad= False)
self.embed_pseudo = nn.Sequential(nn.Linear(self.num_components, 12288),
nn.Hardtanh(0.0, 1.0)) # 3x64x64 = 12288
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Will a single z be enough ti compute the expectation
for the loss??
:param mu: (Tensor) Mean of the latent Gaussian
:param logvar: (Tensor) Standard deviation of the latent Gaussian
:return:
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
return [self.decode(z), input, mu, log_var, z]
def loss_function(self,
*args,
**kwargs) -> dict:
recons = args[0]
input = args[1]
mu = args[2]
log_var = args[3]
z = args[4]
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
recons_loss =F.mse_loss(recons, input)
E_log_q_z = torch.mean(torch.sum(-0.5 * (log_var + (z - mu) ** 2)/ log_var.exp(),
dim = 1),
dim = 0)
# Original Prior
# E_log_p_z = torch.mean(torch.sum(-0.5 * (z ** 2), dim = 1), dim = 0)
# Vamp Prior
M, C, H, W = input.size()
curr_device = input.device
self.pseudo_input = self.pseudo_input.cuda(curr_device)
x = self.embed_pseudo(self.pseudo_input)
x = x.view(-1, C, H, W)
prior_mu, prior_log_var = self.encode(x)
z_expand = z.unsqueeze(1)
prior_mu = prior_mu.unsqueeze(0)
prior_log_var = prior_log_var.unsqueeze(0)
E_log_p_z = torch.sum(-0.5 *
(prior_log_var + (z_expand - prior_mu) ** 2)/ prior_log_var.exp(),
dim = 2) - torch.log(torch.tensor(self.num_components).float())
# dim = 0)
E_log_p_z = torch.logsumexp(E_log_p_z, dim = 1)
E_log_p_z = torch.mean(E_log_p_z, dim = 0)
# KLD = E_q log q - E_q log p
kld_loss = -(E_log_p_z - E_log_q_z)
# print(E_log_p_z, E_log_q_z)
loss = recons_loss + kld_weight * kld_loss
return {'loss': loss, 'Reconstruction_Loss':recons_loss, 'KLD':-kld_loss}
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dim)
z = z.cuda(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0] | 6,760 | 33.671795 | 96 | py |
PyTorch-VAE | PyTorch-VAE-master/models/vanilla_vae.py | import torch
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
class VanillaVAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
**kwargs) -> None:
super(VanillaVAE, self).__init__()
self.latent_dim = latent_dim
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
"""
Maps the given latent codes
onto the image space.
:param z: (Tensor) [B x D]
:return: (Tensor) [B x C x H x W]
"""
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
return [self.decode(z), input, mu, log_var]
def loss_function(self,
*args,
**kwargs) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
mu = args[2]
log_var = args[3]
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
recons_loss =F.mse_loss(recons, input)
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)
loss = recons_loss + kld_weight * kld_loss
return {'loss': loss, 'Reconstruction_Loss':recons_loss.detach(), 'KLD':-kld_loss.detach()}
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0] | 5,757 | 32.283237 | 104 | py |
PyTorch-VAE | PyTorch-VAE-master/models/logcosh_vae.py | import torch
import torch.nn.functional as F
from models import BaseVAE
from torch import nn
from .types_ import *
class LogCoshVAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
alpha: float = 100.,
beta: float = 10.,
**kwargs) -> None:
super(LogCoshVAE, self).__init__()
self.latent_dim = latent_dim
self.alpha = alpha
self.beta = beta
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
"""
Maps the given latent codes
onto the image space.
:param z: (Tensor) [B x D]
:return: (Tensor) [B x C x H x W]
"""
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
return [self.decode(z), input, mu, log_var]
def loss_function(self,
*args,
**kwargs) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
mu = args[2]
log_var = args[3]
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
t = recons - input
# recons_loss = F.mse_loss(recons, input)
# cosh = torch.cosh(self.alpha * t)
# recons_loss = (1./self.alpha * torch.log(cosh)).mean()
recons_loss = self.alpha * t + \
torch.log(1. + torch.exp(- 2 * self.alpha * t)) - \
torch.log(torch.tensor(2.0))
# print(self.alpha* t.max(), self.alpha*t.min())
recons_loss = (1. / self.alpha) * recons_loss.mean()
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)
loss = recons_loss + self.beta * kld_weight * kld_loss
return {'loss': loss, 'Reconstruction_Loss':recons_loss, 'KLD':-kld_loss}
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0] | 6,292 | 33.576923 | 104 | py |
PyTorch-VAE | PyTorch-VAE-master/models/hvae.py | import torch
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
class HVAE(BaseVAE):
def __init__(self,
in_channels: int,
latent1_dim: int,
latent2_dim: int,
hidden_dims: List = None,
img_size:int = 64,
pseudo_input_size: int = 128,
**kwargs) -> None:
super(HVAE, self).__init__()
self.latent1_dim = latent1_dim
self.latent2_dim = latent2_dim
self.img_size = img_size
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
channels = in_channels
# Build z2 Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
channels = h_dim
self.encoder_z2_layers = nn.Sequential(*modules)
self.fc_z2_mu = nn.Linear(hidden_dims[-1]*4, latent2_dim)
self.fc_z2_var = nn.Linear(hidden_dims[-1]*4, latent2_dim)
# ========================================================================#
# Build z1 Encoder
self.embed_z2_code = nn.Linear(latent2_dim, img_size * img_size)
self.embed_data = nn.Conv2d(in_channels, in_channels, kernel_size=1)
modules = []
channels = in_channels + 1 # One more channel for the latent code
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
channels = h_dim
self.encoder_z1_layers = nn.Sequential(*modules)
self.fc_z1_mu = nn.Linear(hidden_dims[-1]*4, latent1_dim)
self.fc_z1_var = nn.Linear(hidden_dims[-1]*4, latent1_dim)
#========================================================================#
# Build z2 Decoder
self.recons_z1_mu = nn.Linear(latent2_dim, latent1_dim)
self.recons_z1_log_var = nn.Linear(latent2_dim, latent1_dim)
# ========================================================================#
# Build z1 Decoder
self.debed_z1_code = nn.Linear(latent1_dim, 1024)
self.debed_z2_code = nn.Linear(latent2_dim, 1024)
modules = []
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
# ========================================================================#
# Pesudo Input for the Vamp-Prior
# self.pseudo_input = torch.eye(pseudo_input_size,
# requires_grad=False).view(1, 1, pseudo_input_size, -1)
#
#
# self.pseudo_layer = nn.Conv2d(1, out_channels=in_channels,
# kernel_size=3, stride=2, padding=1)
def encode_z2(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder_z2_layers(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
z2_mu = self.fc_z2_mu(result)
z2_log_var = self.fc_z2_var(result)
return [z2_mu, z2_log_var]
def encode_z1(self, input: Tensor, z2: Tensor) -> List[Tensor]:
x = self.embed_data(input)
z2 = self.embed_z2_code(z2)
z2 = z2.view(-1, self.img_size, self.img_size).unsqueeze(1)
result = torch.cat([x, z2], dim=1)
result = self.encoder_z1_layers(result)
result = torch.flatten(result, start_dim=1)
z1_mu = self.fc_z1_mu(result)
z1_log_var = self.fc_z1_var(result)
return [z1_mu, z1_log_var]
def encode(self, input: Tensor) -> List[Tensor]:
z2_mu, z2_log_var = self.encode_z2(input)
z2 = self.reparameterize(z2_mu, z2_log_var)
# z1 ~ q(z1|x, z2)
z1_mu, z1_log_var = self.encode_z1(input, z2)
return [z1_mu, z1_log_var, z2_mu, z2_log_var, z2]
def decode(self, input: Tensor) -> Tensor:
result = self.decoder(input)
result = self.final_layer(result)
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Will a single z be enough ti compute the expectation
for the loss??
:param mu: (Tensor) Mean of the latent Gaussian
:param logvar: (Tensor) Standard deviation of the latent Gaussian
:return:
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
# Encode the input into the latent codes z1 and z2
# z2 ~q(z2 | x)
# z1 ~ q(z1|x, z2)
z1_mu, z1_log_var, z2_mu, z2_log_var, z2 = self.encode(input)
z1 = self.reparameterize(z1_mu, z1_log_var)
# Reconstruct the image using both the latent codes
# x ~ p(x|z1, z2)
debedded_z1 = self.debed_z1_code(z1)
debedded_z2 = self.debed_z2_code(z2)
result = torch.cat([debedded_z1, debedded_z2], dim=1)
result = result.view(-1, 512, 2, 2)
recons = self.decode(result)
return [recons,
input,
z1_mu, z1_log_var,
z2_mu, z2_log_var,
z1, z2]
def loss_function(self,
*args,
**kwargs) -> dict:
recons = args[0]
input = args[1]
z1_mu = args[2]
z1_log_var = args[3]
z2_mu = args[4]
z2_log_var = args[5]
z1= args[6]
z2 = args[7]
# Reconstruct (decode) z2 into z1
# z1 ~ p(z1|z2) [This for the loss calculation]
z1_p_mu = self.recons_z1_mu(z2)
z1_p_log_var = self.recons_z1_log_var(z2)
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
recons_loss =F.mse_loss(recons, input)
z1_kld = torch.mean(-0.5 * torch.sum(1 + z1_log_var - z1_mu ** 2 - z1_log_var.exp(), dim = 1),
dim = 0)
z2_kld = torch.mean(-0.5 * torch.sum(1 + z2_log_var - z2_mu ** 2 - z2_log_var.exp(), dim = 1),
dim = 0)
z1_p_kld = torch.mean(-0.5 * torch.sum(1 + z1_p_log_var - (z1 - z1_p_mu) ** 2 - z1_p_log_var.exp(),
dim = 1),
dim = 0)
z2_p_kld = torch.mean(-0.5*(z2**2), dim = 0)
kld_loss = -(z1_p_kld - z1_kld - z2_kld)
loss = recons_loss + kld_weight * kld_loss
# print(z2_p_kld)
return {'loss': loss, 'Reconstruction Loss':recons_loss, 'KLD':-kld_loss}
def sample(self, batch_size:int, current_device: int, **kwargs) -> Tensor:
z2 = torch.randn(batch_size,
self.latent2_dim)
z2 = z2.cuda(current_device)
z1_mu = self.recons_z1_mu(z2)
z1_log_var = self.recons_z1_log_var(z2)
z1 = self.reparameterize(z1_mu, z1_log_var)
debedded_z1 = self.debed_z1_code(z1)
debedded_z2 = self.debed_z2_code(z2)
result = torch.cat([debedded_z1, debedded_z2], dim=1)
result = result.view(-1, 512, 2, 2)
samples = self.decode(result)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0]
| 9,396 | 35.142308 | 107 | py |
PyTorch-VAE | PyTorch-VAE-master/models/joint_vae.py | import torch
import numpy as np
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
class JointVAE(BaseVAE):
num_iter = 1
def __init__(self,
in_channels: int,
latent_dim: int,
categorical_dim: int,
latent_min_capacity: float =0.,
latent_max_capacity: float = 25.,
latent_gamma: float = 30.,
latent_num_iter: int = 25000,
categorical_min_capacity: float =0.,
categorical_max_capacity: float = 25.,
categorical_gamma: float = 30.,
categorical_num_iter: int = 25000,
hidden_dims: List = None,
temperature: float = 0.5,
anneal_rate: float = 3e-5,
anneal_interval: int = 100, # every 100 batches
alpha: float = 30.,
**kwargs) -> None:
super(JointVAE, self).__init__()
self.latent_dim = latent_dim
self.categorical_dim = categorical_dim
self.temp = temperature
self.min_temp = temperature
self.anneal_rate = anneal_rate
self.anneal_interval = anneal_interval
self.alpha = alpha
self.cont_min = latent_min_capacity
self.cont_max = latent_max_capacity
self.disc_min = categorical_min_capacity
self.disc_max = categorical_max_capacity
self.cont_gamma = latent_gamma
self.disc_gamma = categorical_gamma
self.cont_iter = latent_num_iter
self.disc_iter = categorical_num_iter
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1]*4, self.latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1]*4, self.latent_dim)
self.fc_z = nn.Linear(hidden_dims[-1]*4, self.categorical_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(self.latent_dim + self.categorical_dim,
hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
self.sampling_dist = torch.distributions.OneHotCategorical(1. / categorical_dim * torch.ones((self.categorical_dim, 1)))
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [B x C x H x W]
:return: (Tensor) Latent code [B x D x Q]
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
z = self.fc_z(result)
z = z.view(-1, self.categorical_dim)
return [mu, log_var, z]
def decode(self, z: Tensor) -> Tensor:
"""
Maps the given latent codes
onto the image space.
:param z: (Tensor) [B x D x Q]
:return: (Tensor) [B x C x H x W]
"""
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self,
mu: Tensor,
log_var: Tensor,
q: Tensor,
eps:float = 1e-7) -> Tensor:
"""
Gumbel-softmax trick to sample from Categorical Distribution
:param mu: (Tensor) mean of the latent Gaussian [B x D]
:param log_var: (Tensor) Log variance of the latent Gaussian [B x D]
:param q: (Tensor) Categorical latent Codes [B x Q]
:return: (Tensor) [B x (D + Q)]
"""
std = torch.exp(0.5 * log_var)
e = torch.randn_like(std)
z = e * std + mu
# Sample from Gumbel
u = torch.rand_like(q)
g = - torch.log(- torch.log(u + eps) + eps)
# Gumbel-Softmax sample
s = F.softmax((q + g) / self.temp, dim=-1)
s = s.view(-1, self.categorical_dim)
return torch.cat([z, s], dim=1)
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var, q = self.encode(input)
z = self.reparameterize(mu, log_var, q)
return [self.decode(z), input, q, mu, log_var]
def loss_function(self,
*args,
**kwargs) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
q = args[2]
mu = args[3]
log_var = args[4]
q_p = F.softmax(q, dim=-1) # Convert the categorical codes into probabilities
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
batch_idx = kwargs['batch_idx']
# Anneal the temperature at regular intervals
if batch_idx % self.anneal_interval == 0 and self.training:
self.temp = np.maximum(self.temp * np.exp(- self.anneal_rate * batch_idx),
self.min_temp)
recons_loss =F.mse_loss(recons, input, reduction='mean')
# Adaptively increase the discrinimator capacity
disc_curr = (self.disc_max - self.disc_min) * \
self.num_iter/ float(self.disc_iter) + self.disc_min
disc_curr = min(disc_curr, np.log(self.categorical_dim))
# KL divergence between gumbel-softmax distribution
eps = 1e-7
# Entropy of the logits
h1 = q_p * torch.log(q_p + eps)
# Cross entropy with the categorical distribution
h2 = q_p * np.log(1. / self.categorical_dim + eps)
kld_disc_loss = torch.mean(torch.sum(h1 - h2, dim =1), dim=0)
# Compute Continuous loss
# Adaptively increase the continuous capacity
cont_curr = (self.cont_max - self.cont_min) * \
self.num_iter/ float(self.cont_iter) + self.cont_min
cont_curr = min(cont_curr, self.cont_max)
kld_cont_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(),
dim=1),
dim=0)
capacity_loss = self.disc_gamma * torch.abs(disc_curr - kld_disc_loss) + \
self.cont_gamma * torch.abs(cont_curr - kld_cont_loss)
# kld_weight = 1.2
loss = self.alpha * recons_loss + kld_weight * capacity_loss
if self.training:
self.num_iter += 1
return {'loss': loss, 'Reconstruction_Loss':recons_loss, 'Capacity_Loss':capacity_loss}
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
# [S x D]
z = torch.randn(num_samples,
self.latent_dim)
M = num_samples
np_y = np.zeros((M, self.categorical_dim), dtype=np.float32)
np_y[range(M), np.random.choice(self.categorical_dim, M)] = 1
np_y = np.reshape(np_y, [M , self.categorical_dim])
q = torch.from_numpy(np_y)
# z = self.sampling_dist.sample((num_samples * self.latent_dim, ))
z = torch.cat([z, q], dim = 1).to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0] | 9,837 | 35.708955 | 128 | py |
PyTorch-VAE | PyTorch-VAE-master/models/cvae.py | import torch
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
class ConditionalVAE(BaseVAE):
def __init__(self,
in_channels: int,
num_classes: int,
latent_dim: int,
hidden_dims: List = None,
img_size:int = 64,
**kwargs) -> None:
super(ConditionalVAE, self).__init__()
self.latent_dim = latent_dim
self.img_size = img_size
self.embed_class = nn.Linear(num_classes, img_size * img_size)
self.embed_data = nn.Conv2d(in_channels, in_channels, kernel_size=1)
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
in_channels += 1 # To account for the extra label channel
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim + num_classes, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Will a single z be enough ti compute the expectation
for the loss??
:param mu: (Tensor) Mean of the latent Gaussian
:param logvar: (Tensor) Standard deviation of the latent Gaussian
:return:
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
y = kwargs['labels'].float()
embedded_class = self.embed_class(y)
embedded_class = embedded_class.view(-1, self.img_size, self.img_size).unsqueeze(1)
embedded_input = self.embed_data(input)
x = torch.cat([embedded_input, embedded_class], dim = 1)
mu, log_var = self.encode(x)
z = self.reparameterize(mu, log_var)
z = torch.cat([z, y], dim = 1)
return [self.decode(z), input, mu, log_var]
def loss_function(self,
*args,
**kwargs) -> dict:
recons = args[0]
input = args[1]
mu = args[2]
log_var = args[3]
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
recons_loss =F.mse_loss(recons, input)
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)
loss = recons_loss + kld_weight * kld_loss
return {'loss': loss, 'Reconstruction_Loss':recons_loss, 'KLD':-kld_loss}
def sample(self,
num_samples:int,
current_device: int,
**kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
y = kwargs['labels'].float()
z = torch.randn(num_samples,
self.latent_dim)
z = z.to(current_device)
z = torch.cat([z, y], dim=1)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x, **kwargs)[0] | 6,079 | 33.350282 | 104 | py |
PyTorch-VAE | PyTorch-VAE-master/models/info_vae.py | import torch
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
class InfoVAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
alpha: float = -0.5,
beta: float = 5.0,
reg_weight: int = 100,
kernel_type: str = 'imq',
latent_var: float = 2.,
**kwargs) -> None:
super(InfoVAE, self).__init__()
self.latent_dim = latent_dim
self.reg_weight = reg_weight
self.kernel_type = kernel_type
self.z_var = latent_var
assert alpha <= 0, 'alpha must be negative or zero.'
self.alpha = alpha
self.beta = beta
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1] * 4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1] * 4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
return [self.decode(z), input, z, mu, log_var]
def loss_function(self,
*args,
**kwargs) -> dict:
recons = args[0]
input = args[1]
z = args[2]
mu = args[3]
log_var = args[4]
batch_size = input.size(0)
bias_corr = batch_size * (batch_size - 1)
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
recons_loss =F.mse_loss(recons, input)
mmd_loss = self.compute_mmd(z)
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim=1), dim=0)
loss = self.beta * recons_loss + \
(1. - self.alpha) * kld_weight * kld_loss + \
(self.alpha + self.reg_weight - 1.)/bias_corr * mmd_loss
return {'loss': loss, 'Reconstruction_Loss':recons_loss, 'MMD': mmd_loss, 'KLD':-kld_loss}
def compute_kernel(self,
x1: Tensor,
x2: Tensor) -> Tensor:
# Convert the tensors into row and column vectors
D = x1.size(1)
N = x1.size(0)
x1 = x1.unsqueeze(-2) # Make it into a column tensor
x2 = x2.unsqueeze(-3) # Make it into a row tensor
"""
Usually the below lines are not required, especially in our case,
but this is useful when x1 and x2 have different sizes
along the 0th dimension.
"""
x1 = x1.expand(N, N, D)
x2 = x2.expand(N, N, D)
if self.kernel_type == 'rbf':
result = self.compute_rbf(x1, x2)
elif self.kernel_type == 'imq':
result = self.compute_inv_mult_quad(x1, x2)
else:
raise ValueError('Undefined kernel type.')
return result
def compute_rbf(self,
x1: Tensor,
x2: Tensor,
eps: float = 1e-7) -> Tensor:
"""
Computes the RBF Kernel between x1 and x2.
:param x1: (Tensor)
:param x2: (Tensor)
:param eps: (Float)
:return:
"""
z_dim = x2.size(-1)
sigma = 2. * z_dim * self.z_var
result = torch.exp(-((x1 - x2).pow(2).mean(-1) / sigma))
return result
def compute_inv_mult_quad(self,
x1: Tensor,
x2: Tensor,
eps: float = 1e-7) -> Tensor:
"""
Computes the Inverse Multi-Quadratics Kernel between x1 and x2,
given by
k(x_1, x_2) = \sum \frac{C}{C + \|x_1 - x_2 \|^2}
:param x1: (Tensor)
:param x2: (Tensor)
:param eps: (Float)
:return:
"""
z_dim = x2.size(-1)
C = 2 * z_dim * self.z_var
kernel = C / (eps + C + (x1 - x2).pow(2).sum(dim = -1))
# Exclude diagonal elements
result = kernel.sum() - kernel.diag().sum()
return result
def compute_mmd(self, z: Tensor) -> Tensor:
# Sample from prior (Gaussian) distribution
prior_z = torch.randn_like(z)
prior_z__kernel = self.compute_kernel(prior_z, prior_z)
z__kernel = self.compute_kernel(z, z)
priorz_z__kernel = self.compute_kernel(prior_z, z)
mmd = prior_z__kernel.mean() + \
z__kernel.mean() - \
2 * priorz_z__kernel.mean()
return mmd
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0] | 8,538 | 32.355469 | 100 | py |
PyTorch-VAE | PyTorch-VAE-master/models/miwae.py | import torch
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
from torch.distributions import Normal
class MIWAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
num_samples: int = 5,
num_estimates: int = 5,
**kwargs) -> None:
super(MIWAE, self).__init__()
self.latent_dim = latent_dim
self.num_samples = num_samples # K
self.num_estimates = num_estimates # M
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
"""
Maps the given latent codes of S samples
onto the image space.
:param z: (Tensor) [B x S x D]
:return: (Tensor) [B x S x C x H x W]
"""
B, M,S, D = z.size()
z = z.contiguous().view(-1, self.latent_dim) #[BMS x D]
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result) #[BMS x C x H x W ]
result = result.view([B, M, S,result.size(-3), result.size(-2), result.size(-1)]) #[B x M x S x C x H x W]
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
:param mu: (Tensor) Mean of the latent Gaussian
:param logvar: (Tensor) Standard deviation of the latent Gaussian
:return:
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
mu = mu.repeat(self.num_estimates, self.num_samples, 1, 1).permute(2, 0, 1, 3) # [B x M x S x D]
log_var = log_var.repeat(self.num_estimates, self.num_samples, 1, 1).permute(2, 0, 1, 3) # [B x M x S x D]
z = self.reparameterize(mu, log_var) # [B x M x S x D]
eps = (z - mu) / log_var # Prior samples
return [self.decode(z), input, mu, log_var, z, eps]
def loss_function(self,
*args,
**kwargs) -> dict:
"""
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
mu = args[2]
log_var = args[3]
z = args[4]
eps = args[5]
input = input.repeat(self.num_estimates,
self.num_samples, 1, 1, 1, 1).permute(2, 0, 1, 3, 4, 5) #[B x M x S x C x H x W]
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
log_p_x_z = ((recons - input) ** 2).flatten(3).mean(-1) # Reconstruction Loss # [B x M x S]
kld_loss = -0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim=3) # [B x M x S]
# Get importance weights
log_weight = (log_p_x_z + kld_weight * kld_loss) #.detach().data
# Rescale the weights (along the sample dim) to lie in [0, 1] and sum to 1
weight = F.softmax(log_weight, dim = -1) # [B x M x S]
loss = torch.mean(torch.mean(torch.sum(weight * log_weight, dim=-1), dim = -2), dim = 0)
return {'loss': loss, 'Reconstruction_Loss':log_p_x_z.mean(), 'KLD':-kld_loss.mean()}
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples, 1, 1,
self.latent_dim)
z = z.to(current_device)
samples = self.decode(z).squeeze()
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image.
Returns only the first reconstructed sample
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0][:, 0, 0, :]
| 6,969 | 35.11399 | 114 | py |
PyTorch-VAE | PyTorch-VAE-master/models/beta_vae.py | import torch
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
class BetaVAE(BaseVAE):
num_iter = 0 # Global static variable to keep track of iterations
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
beta: int = 4,
gamma:float = 1000.,
max_capacity: int = 25,
Capacity_max_iter: int = 1e5,
loss_type:str = 'B',
**kwargs) -> None:
super(BetaVAE, self).__init__()
self.latent_dim = latent_dim
self.beta = beta
self.gamma = gamma
self.loss_type = loss_type
self.C_max = torch.Tensor([max_capacity])
self.C_stop_iter = Capacity_max_iter
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Will a single z be enough ti compute the expectation
for the loss??
:param mu: (Tensor) Mean of the latent Gaussian
:param logvar: (Tensor) Standard deviation of the latent Gaussian
:return:
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> Tensor:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
return [self.decode(z), input, mu, log_var]
def loss_function(self,
*args,
**kwargs) -> dict:
self.num_iter += 1
recons = args[0]
input = args[1]
mu = args[2]
log_var = args[3]
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
recons_loss =F.mse_loss(recons, input)
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)
if self.loss_type == 'H': # https://openreview.net/forum?id=Sy2fzU9gl
loss = recons_loss + self.beta * kld_weight * kld_loss
elif self.loss_type == 'B': # https://arxiv.org/pdf/1804.03599.pdf
self.C_max = self.C_max.to(input.device)
C = torch.clamp(self.C_max/self.C_stop_iter * self.num_iter, 0, self.C_max.data[0])
loss = recons_loss + self.gamma * kld_weight* (kld_loss - C).abs()
else:
raise ValueError('Undefined loss type.')
return {'loss': loss, 'Reconstruction_Loss':recons_loss, 'KLD':kld_loss}
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0] | 6,242 | 33.877095 | 104 | py |
PyTorch-VAE | PyTorch-VAE-master/models/types_.py | from typing import List, Callable, Union, Any, TypeVar, Tuple
# from torch import tensor as Tensor
Tensor = TypeVar('torch.tensor')
| 133 | 25.8 | 61 | py |
PyTorch-VAE | PyTorch-VAE-master/models/lvae.py | import torch
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from .types_ import *
from math import floor, pi, log
def conv_out_shape(img_size):
return floor((img_size + 2 - 3) / 2.) + 1
class EncoderBlock(nn.Module):
def __init__(self,
in_channels: int,
out_channels: int,
latent_dim: int,
img_size: int):
super(EncoderBlock, self).__init__()
# Build Encoder
self.encoder = nn.Sequential(
nn.Conv2d(in_channels,
out_channels,
kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU())
out_size = conv_out_shape(img_size)
self.encoder_mu = nn.Linear(out_channels * out_size ** 2 , latent_dim)
self.encoder_var = nn.Linear(out_channels * out_size ** 2, latent_dim)
def forward(self, input: Tensor) -> Tensor:
result = self.encoder(input)
h = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.encoder_mu(h)
log_var = self.encoder_var(h)
return [result, mu, log_var]
class LadderBlock(nn.Module):
def __init__(self,
in_channels: int,
latent_dim: int):
super(LadderBlock, self).__init__()
# Build Decoder
self.decode = nn.Sequential(nn.Linear(in_channels, latent_dim),
nn.BatchNorm1d(latent_dim))
self.fc_mu = nn.Linear(latent_dim, latent_dim)
self.fc_var = nn.Linear(latent_dim, latent_dim)
def forward(self, z: Tensor) -> Tensor:
z = self.decode(z)
mu = self.fc_mu(z)
log_var = self.fc_var(z)
return [mu, log_var]
class LVAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dims: List,
hidden_dims: List,
**kwargs) -> None:
super(LVAE, self).__init__()
self.latent_dims = latent_dims
self.hidden_dims = hidden_dims
self.num_rungs = len(latent_dims)
assert len(latent_dims) == len(hidden_dims), "Length of the latent" \
"and hidden dims must be the same"
# Build Encoder
modules = []
img_size = 64
for i, h_dim in enumerate(hidden_dims):
modules.append(EncoderBlock(in_channels,
h_dim,
latent_dims[i],
img_size))
img_size = conv_out_shape(img_size)
in_channels = h_dim
self.encoders = nn.Sequential(*modules)
# ====================================================================== #
# Build Decoder
modules = []
for i in range(self.num_rungs -1, 0, -1):
modules.append(LadderBlock(latent_dims[i],
latent_dims[i-1]))
self.ladders = nn.Sequential(*modules)
self.decoder_input = nn.Linear(latent_dims[0], hidden_dims[-1] * 4)
hidden_dims.reverse()
modules = []
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
hidden_dims.reverse()
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
h = input
# Posterior Parameters
post_params = []
for encoder_block in self.encoders:
h, mu, log_var = encoder_block(h)
post_params.append((mu, log_var))
return post_params
def decode(self, z: Tensor, post_params: List) -> Tuple:
"""
Maps the given latent codes
onto the image space.
:param z: (Tensor) [B x D]
:return: (Tensor) [B x C x H x W]
"""
kl_div = 0
post_params.reverse()
for i, ladder_block in enumerate(self.ladders):
mu_e, log_var_e = post_params[i]
mu_t, log_var_t = ladder_block(z)
mu, log_var = self.merge_gauss(mu_e, mu_t,
log_var_e, log_var_t)
z = self.reparameterize(mu, log_var)
kl_div += self.compute_kl_divergence(z, (mu, log_var), (mu_e, log_var_e))
result = self.decoder_input(z)
result = result.view(-1, self.hidden_dims[-1], 2, 2)
result = self.decoder(result)
return self.final_layer(result), kl_div
def merge_gauss(self,
mu_1: Tensor,
mu_2: Tensor,
log_var_1: Tensor,
log_var_2: Tensor) -> List:
p_1 = 1. / (log_var_1.exp() + 1e-7)
p_2 = 1. / (log_var_2.exp() + 1e-7)
mu = (mu_1 * p_1 + mu_2 * p_2)/(p_1 + p_2)
log_var = torch.log(1./(p_1 + p_2))
return [mu, log_var]
def compute_kl_divergence(self, z: Tensor, q_params: Tuple, p_params: Tuple):
mu_q, log_var_q = q_params
mu_p, log_var_p = p_params
#
# qz = -0.5 * torch.sum(1 + log_var_q + (z - mu_q) ** 2 / (2 * log_var_q.exp() + 1e-8), dim=1)
# pz = -0.5 * torch.sum(1 + log_var_p + (z - mu_p) ** 2 / (2 * log_var_p.exp() + 1e-8), dim=1)
kl = (log_var_p - log_var_q) + (log_var_q.exp() + (mu_q - mu_p)**2)/(2 * log_var_p.exp()) - 0.5
kl = torch.sum(kl, dim = -1)
return kl
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
post_params = self.encode(input)
mu, log_var = post_params.pop()
z = self.reparameterize(mu, log_var)
recons, kl_div = self.decode(z, post_params)
#kl_div += -0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1)
return [recons, input, kl_div]
def loss_function(self,
*args,
**kwargs) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
kl_div = args[2]
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
recons_loss =F.mse_loss(recons, input)
kld_loss = torch.mean(kl_div, dim = 0)
loss = recons_loss + kld_weight * kld_loss
return {'loss': loss, 'Reconstruction_Loss':recons_loss, 'KLD':-kld_loss }
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dims[-1])
z = z.to(current_device)
for ladder_block in self.ladders:
mu, log_var = ladder_block(z)
z = self.reparameterize(mu, log_var)
result = self.decoder_input(z)
result = result.view(-1, self.hidden_dims[-1], 2, 2)
result = self.decoder(result)
samples = self.final_layer(result)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0] | 9,666 | 34.671587 | 103 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/test_cat_vae.py | import torch
import unittest
from models import GumbelVAE
from torchsummary import summary
class TestVAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = GumbelVAE(3, 10)
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(128, 3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result, M_N = 0.005, batch_idx=5)
print(loss)
def test_sample(self):
self.model.cuda()
y = self.model.sample(144, 0)
print(y.shape)
if __name__ == '__main__':
unittest.main() | 951 | 24.052632 | 74 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/test_joint_Vae.py | import torch
import unittest
from models import JointVAE
from torchsummary import summary
class TestVAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = JointVAE(3, 10, 40, 0.0)
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(128, 3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result, M_N = 0.005, batch_idx=5)
print(loss)
def test_sample(self):
self.model.cuda()
y = self.model.sample(144, 0)
print(y.shape)
if __name__ == '__main__':
unittest.main() | 958 | 24.236842 | 74 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/test_wae.py | import torch
import unittest
from models import WAE_MMD
from torchsummary import summary
class TestWAE(unittest.TestCase):
def setUp(self) -> None:
self.model = WAE_MMD(3, 10, reg_weight = 100)
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(16, 3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result)
print(loss)
if __name__ == '__main__':
unittest.main() | 787 | 24.419355 | 64 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/text_cvae.py | import torch
import unittest
from models import CVAE
class TestCVAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = CVAE(3, 40, 10)
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
c = torch.randn(16, 40)
y = self.model(x, c)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(16, 3, 64, 64)
c = torch.randn(16, 40)
result = self.model(x, labels = c)
loss = self.model.loss_function(*result, M_N = 0.005)
print(loss)
if __name__ == '__main__':
unittest.main() | 705 | 24.214286 | 64 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/test_swae.py | import torch
import unittest
from models import SWAE
from torchsummary import summary
class TestSWAE(unittest.TestCase):
def setUp(self) -> None:
self.model = SWAE(3, 10, reg_weight = 100)
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(16, 3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result)
print(loss)
if __name__ == '__main__':
unittest.main() | 782 | 24.258065 | 64 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/bvae.py | import torch
import unittest
from models import BetaVAE
from torchsummary import summary
class TestVAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = BetaVAE(3, 10, loss_type='H').cuda()
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(16, 3, 64, 64).cuda()
result = self.model(x)
loss = self.model.loss_function(*result, M_N = 0.005)
print(loss)
if __name__ == '__main__':
unittest.main() | 846 | 25.46875 | 64 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/test_iwae.py | import torch
import unittest
from models import IWAE
from torchsummary import summary
class TestIWAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = IWAE(3, 10)
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(16, 3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result, M_N = 0.005)
print(loss)
def test_sample(self):
self.model.cuda()
y = self.model.sample(144, 0)
if __name__ == '__main__':
unittest.main() | 904 | 24.138889 | 64 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/test_lvae.py | import torch
import unittest
from models import LVAE
from torchsummary import summary
class TestLVAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = LVAE(3, [4,8,16,32,128], hidden_dims=[32, 64,128, 256, 512])
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(16, 3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result, M_N = 0.005)
print(loss)
def test_sample(self):
self.model.cuda()
y = self.model.sample(144, 0)
print(y.shape)
if __name__ == '__main__':
unittest.main() | 977 | 24.736842 | 81 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/test_vae.py | import torch
import unittest
from models import VanillaVAE
from torchsummary import summary
class TestVAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = VanillaVAE(3, 10)
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(16, 3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result, M_N = 0.005)
print(loss)
if __name__ == '__main__':
unittest.main() | 823 | 24.75 | 64 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/text_vamp.py | import torch
import unittest
from models import VampVAE
from torchsummary import summary
class TestVVAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = VampVAE(3, latent_dim=10).cuda()
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(144, 3, 64, 64).cuda()
result = self.model(x)
loss = self.model.loss_function(*result, M_N = 0.005)
print(loss)
if __name__ == '__main__':
unittest.main() | 844 | 25.40625 | 64 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/test_miwae.py | import torch
import unittest
from models import MIWAE
from torchsummary import summary
class TestMIWAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = MIWAE(3, 10)
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(16, 3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result, M_N = 0.005)
print(loss)
def test_sample(self):
self.model.cuda()
y = self.model.sample(144, 0)
print(y.shape)
def test_generate(self):
x = torch.randn(16, 3, 64, 64)
y = self.model.generate(x)
print(y.shape)
if __name__ == '__main__':
unittest.main() | 1,057 | 24.190476 | 64 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/test_dipvae.py | import torch
import unittest
from models import DIPVAE
from torchsummary import summary
class TestDIPVAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = DIPVAE(3, 64)
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
print(sum(p.numel() for p in self.model.parameters() if p.requires_grad))
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(16, 3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result, M_N = 0.005)
print(loss)
def test_sample(self):
self.model.cuda()
y = self.model.sample(8, 'cuda')
print(y.shape)
def test_generate(self):
x = torch.randn(16, 3, 64, 64)
y = self.model.generate(x)
print(y.shape)
if __name__ == '__main__':
unittest.main() | 1,145 | 25.651163 | 81 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/test_gvae.py | import torch
import unittest
from models import GammaVAE
from torchsummary import summary
class TestGammaVAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = GammaVAE(3, 10)
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(16, 3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result, M_N = 0.005)
print(loss)
def test_sample(self):
self.model.cuda()
y = self.model.sample(144, 0)
if __name__ == '__main__':
unittest.main() | 920 | 22.025 | 64 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/test_fvae.py | import torch
import unittest
from models import FactorVAE
from torchsummary import summary
class TestFAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = FactorVAE(3, 10)
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
#
# print(sum(p.numel() for p in self.model.parameters() if p.requires_grad))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(16, 3, 64, 64)
x2 = torch.randn(16,3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result, M_N = 0.005, optimizer_idx=0, secondary_input=x2)
loss = self.model.loss_function(*result, M_N = 0.005, optimizer_idx=1, secondary_input=x2)
print(loss)
def test_optim(self):
optim1 = torch.optim.Adam(self.model.parameters(), lr = 0.001)
optim2 = torch.optim.Adam(self.model.discrminator.parameters(), lr = 0.001)
def test_sample(self):
self.model.cuda()
y = self.model.sample(144, 0)
if __name__ == '__main__':
unittest.main() | 1,368 | 27.520833 | 98 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/test_dfc.py | import torch
import unittest
from models import DFCVAE
from torchsummary import summary
class TestDFCVAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = DFCVAE(3, 10)
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(16, 3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result, M_N = 0.005)
print(loss)
def test_sample(self):
self.model.cuda()
y = self.model.sample(144, 0)
if __name__ == '__main__':
unittest.main() | 914 | 21.875 | 64 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/test_logcosh.py | import torch
import unittest
from models import LogCoshVAE
from torchsummary import summary
class TestVAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = LogCoshVAE(3, 10, alpha=10)
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.rand(16, 3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result, M_N = 0.005)
print(loss)
if __name__ == '__main__':
unittest.main() | 832 | 25.03125 | 64 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/test_betatcvae.py | import torch
import unittest
from models import BetaTCVAE
from torchsummary import summary
class TestBetaTCVAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = BetaTCVAE(3, 64, anneal_steps= 100)
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
print(sum(p.numel() for p in self.model.parameters() if p.requires_grad))
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(16, 3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result, M_N = 0.005)
print(loss)
def test_sample(self):
self.model.cuda()
y = self.model.sample(8, 'cuda')
print(y.shape)
def test_generate(self):
x = torch.randn(16, 3, 64, 64)
y = self.model.generate(x)
print(y.shape)
if __name__ == '__main__':
unittest.main() | 1,173 | 26.302326 | 81 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/test_hvae.py | import torch
import unittest
from models import HVAE
from torchsummary import summary
class TestHVAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = HVAE(3, latent1_dim=10, latent2_dim=20)
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(16, 3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result, M_N = 0.005)
print(loss)
if __name__ == '__main__':
unittest.main() | 840 | 25.28125 | 64 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/test_mssimvae.py | import torch
import unittest
from models import MSSIMVAE
from torchsummary import summary
class TestMSSIMVAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = MSSIMVAE(3, 10)
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(16, 3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result, M_N = 0.005)
print(loss)
def test_sample(self):
self.model.cuda()
y = self.model.sample(144, 0)
if __name__ == '__main__':
unittest.main() | 920 | 22.025 | 64 | py |
PyTorch-VAE | PyTorch-VAE-master/tests/test_vq_vae.py | import torch
import unittest
from models import VQVAE
from torchsummary import summary
class TestVQVAE(unittest.TestCase):
def setUp(self) -> None:
# self.model2 = VAE(3, 10)
self.model = VQVAE(3, 64, 512)
def test_summary(self):
print(summary(self.model, (3, 64, 64), device='cpu'))
# print(summary(self.model2, (3, 64, 64), device='cpu'))
def test_forward(self):
print(sum(p.numel() for p in self.model.parameters() if p.requires_grad))
x = torch.randn(16, 3, 64, 64)
y = self.model(x)
print("Model Output size:", y[0].size())
# print("Model2 Output size:", self.model2(x)[0].size())
def test_loss(self):
x = torch.randn(16, 3, 64, 64)
result = self.model(x)
loss = self.model.loss_function(*result, M_N = 0.005)
print(loss)
def test_sample(self):
self.model.cuda()
y = self.model.sample(8, 'cuda')
print(y.shape)
def test_generate(self):
x = torch.randn(16, 3, 64, 64)
y = self.model.generate(x)
print(y.shape)
if __name__ == '__main__':
unittest.main() | 1,147 | 25.697674 | 81 | py |
SGDL | SGDL-main/code/main.py | import torch
import time
import training
import model
import pickle
import utils
import dataloader
import parse
from parse import args, log_file
from prettytable import PrettyTable
utils.set_seed(args.seed)
mem_manager = dataloader.MemLoader(args)
train_dataset = dataloader.Loader(args)
Recmodel = model.LightGCN(train_dataset)
Recmodel = Recmodel.to(parse.device)
ltw = model.LTW(args.input, args.hidden1, args.output).cuda()
utils.Logging(log_file, str(args))
results = []
args.lr /= 5
opt = torch.optim.Adam(Recmodel.params(), lr=args.lr)
# ========== Phase I: Memorization ========== #
for epoch in range(args.epochs):
time_train = time.time()
output_information = training.memorization_train(train_dataset, Recmodel, opt)
train_log = PrettyTable()
train_log.field_names = ['Epoch', 'Loss', 'Time', 'Estimated Clean Ratio', 'Memory ratio']
clean_ratio = training.estimate_noise(mem_manager, Recmodel)
mem_ratio = training.memorization_test(mem_manager, Recmodel)
train_log.add_row(
[f'{epoch + 1}/{args.epochs}', output_information, f'{(time.time() - time_train):.3f}',
f'{clean_ratio:.5f}', f'{mem_ratio:.5f}']
)
utils.Logging(log_file, str(train_log))
# memorization point
if mem_ratio >= clean_ratio:
utils.Logging(log_file, f'==================Memorization Point==================')
break
trans_epoch = epoch
clean_dataset = dataloader.CleanLoader(args)
args.lr *= 5
best_epoch = epoch
# ========== Phase II: Self-Guided Learning ========== #
for epoch in range(trans_epoch, args.epochs):
if epoch % args.eval_freq == 0:
utils.Logging(log_file, f'======================Validation======================')
valid_log = PrettyTable()
valid_log.field_names = ['Precision', 'Recall', 'NDCG', 'Current Best Epoch']
valid_result = training.test(train_dataset, Recmodel, valid=True, multicore=args.multicore)
results.append(valid_result)
with open('./{}/results_{}_{}.pkl'.format(
args.dataset,
args.lr,
args.meta_lr
), 'wb') as f:
pickle.dump(results, f)
is_stop, is_save = utils.EarlyStop(results)
# save current best model
if is_save:
best_epoch = epoch
torch.save(Recmodel.state_dict(), './{}/model_{}_{}_{}_{}_{}_schedule_{}.pth'.format(
args.dataset,
args.lr,
args.meta_lr,
args.model,
args.schedule_type,
args.tau,
args.schedule_lr
))
valid_log.add_row(
[valid_result['precision'][0], valid_result['recall'][0], valid_result['ndcg'][0], best_epoch]
)
utils.Logging(log_file, str(valid_log))
if is_stop:
break
time_train = time.time()
if args.schedule_type == 'reinforce':
output_information = training.self_guided_train_schedule_reinforce(train_dataset, clean_dataset, Recmodel, ltw)
elif args.schedule_type == 'gumbel':
output_information = training.self_guided_train_schedule_gumbel(train_dataset, clean_dataset, Recmodel, ltw)
else:
utils.Logging(log_file, 'Invalid scheduler type !')
exit()
train_log = PrettyTable()
train_log.field_names = ['Epoch', 'Train Loss', "Meta Loss", "Time"]
train_log.add_row(
[f'{epoch + 1}/{args.epochs}', output_information[0], output_information[1], f'{(time.time()-time_train):.3f}']
)
utils.Logging(log_file, str(train_log))
# ========== Test ========== #
utils.Logging(log_file, f'=========================Test=========================')
state = torch.load('./{}/model_{}_{}_{}_{}_{}_schedule_{}.pth'.format(
args.dataset,
args.lr,
args.meta_lr,
args.model,
args.schedule_type,
args.tau,
args.schedule_lr
))
Recmodel.load_state_dict(state)
training.test(train_dataset, Recmodel, valid=False, multicore=args.multicore)
| 4,100 | 35.292035 | 119 | py |
SGDL | SGDL-main/code/dataloader.py | import torch
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
from scipy.sparse import csr_matrix
import scipy.sparse as sp
from time import time
import parse
class MemLoader(Dataset):
'''
Memorization management
Function: generate and update memorized data
'''
def __init__(self, config):
self.path = f'../data/{config.dataset}'
self.dataset = config.dataset
self.history_len = config.history_len
self.n_user = 0
self.m_item = 0
self.config = config
print('Preparing memloader...')
train_file = self.path + f'/{self.dataset}.train.rating'
train_data = pd.read_csv(
train_file,
sep='\t', header=None, names=['user', 'item', 'noisy'],
usecols=[0, 1, 2], dtype={0: np.int32, 1: np.int32, 2: np.int32}
)
if self.dataset == 'adressa':
self.n_user = 212231
self.m_item = 6596
else:
self.n_user = train_data['user'].max() + 1
self.m_item = train_data['item'].max() + 1
# record number of iteractions of each user
self.user_pos_counts = pd.value_counts(train_data['user']).sort_index()
self.trainUniqueUsers = np.array(range(self.n_user))
self.trainUser = train_data['user'].values
self.trainItem = train_data['item'].values
self.traindataSize = len(self.trainItem)
# memorization history matrix, 1 for memorized and 0 for non-memorized
self.mem_dict = np.zeros((self.traindataSize, self.history_len), dtype=np.int8)
# loop pointer that indicates current epoch, increment at the beginning of each epoch
self.mem_dict_p = -1
# map index from (u,i) to row position of memorization history matrix
self.index_map = np.zeros((self.n_user, self.m_item), dtype=np.int32)
self.index_map[:, :] = -1
for ii in range(self.traindataSize):
u = self.trainUser[ii]
i = self.trainItem[ii]
self.index_map[u][i] = ii
self.UserItemNet = csr_matrix((np.ones(len(self.trainUser)), (self.trainUser, self.trainItem)),
shape=(self.n_user, self.m_item))
self.users_D = np.array(self.UserItemNet.sum(axis=1)).squeeze()
self.users_D[self.users_D == 0.] = 1
self.items_D = np.array(self.UserItemNet.sum(axis=0)).squeeze()
self.items_D[self.items_D == 0.] = 1.
self._allPos = self.getUserPosItems(list(range(self.n_user)))
def updateMemDict(self, users, items):
'''
users and items: memorized pairs
'''
# increment pointer
self.mem_dict_p += 1
# loop pointer
self.mem_dict_p %= self.history_len
# initialize (clear) memorization record of current epoch
self.mem_dict[:, self.mem_dict_p] = 0
indexes = []
for i in range(len(users)):
index = self.index_map[users[i]][items[i]]
if index != -1:
indexes.append(index)
self.mem_dict[indexes, self.mem_dict_p] = 1
self.UserItemNet = csr_matrix((np.ones(len(self.trainUser)), (self.trainUser, self.trainItem)),
shape=(self.n_user, self.m_item))
self.users_D = np.array(self.UserItemNet.sum(axis=1)).squeeze()
self.users_D[self.users_D == 0.] = 1
self.items_D = np.array(self.UserItemNet.sum(axis=0)).squeeze()
self.items_D[self.items_D == 0.] = 1.
self._allPos = self.getUserPosItems(list(range(self.n_user)))
def generate_clean_data(self):
'''
generate memorized data
'''
ismem_dict = np.sum(self.mem_dict, axis=1) >= self.history_len / 2
mem_num = np.sum(ismem_dict)
#print('Memory ratio:', mem_num / self.traindataSize)
if mem_num > 0:
indexes = np.argwhere(ismem_dict == True).reshape(1, -1)[0]
clean_us = np.array(self.trainUser)[indexes]
clean_is = np.array(self.trainItem)[indexes]
clean_data = {'user': clean_us, 'item': clean_is}
df = pd.DataFrame(clean_data)
df.to_csv('./{}/clean_data_{}_{}.txt'.format(
self.dataset, self.config.model, self.config.lr),
header=False, index=False,sep='\t')
return mem_num / self.traindataSize
else:
return False
@property
def n_users(self):
return self.n_user
@property
def m_items(self):
return self.m_item
@property
def trainDataSize(self):
return self.traindataSize
@property
def allPos(self):
return self._allPos
def getUserPosItems(self, users):
posItems = []
for user in users:
posItems.append(self.UserItemNet[user].nonzero()[1])
return posItems
class Loader(Dataset):
def __init__(self, config):
self.path = f'../data/{config.dataset}'
self.dataset = config.dataset
print(f'loading [{self.path}]...')
self.split = config.A_split
self.folds = config.A_n_fold
self.n_user = 0
self.m_item = 0
self.config = config
train_file = self.path + f'/{self.dataset}.train.rating'
test_file = self.path + f'/{self.dataset}.test.negative'
valid_file = self.path + f'/{self.dataset}.valid.rating'
trainItem, trainUser = [], []
testUniqueUsers, testItem, testUser = [], [], []
# loading training file
with open(train_file, 'r') as f:
line = f.readline()
while line and line != '':
arr = line.split('\t')
u = int(arr[0])
i = int(arr[1])
self.m_item = max(self.m_item, i)
self.n_user = max(self.n_user, u)
trainUser.append(u)
trainItem.append(i)
line = f.readline()
self.trainUser = np.array(trainUser)
self.trainItem = np.array(trainItem)
self.trainUniqueUsers = np.array(list(set(trainUser)))
self.traindataSize = len(trainUser)
# loading validation file
validUser, validItem, validUniqueusers = [], [], []
with open(valid_file, 'r') as f:
line = f.readline()
while line and line != '':
arr = line.split('\t')
u = int(arr[0])
i = int(arr[1])
self.m_item = max(self.m_item, i)
self.n_user = max(self.n_user, u)
validUser.append(u)
validItem.append(i)
line = f.readline()
self.validUser = np.array(validUser)
self.validItem = np.array(validItem)
self.validUniqueUsers = np.array(list(set(validUser)))
self.validdataSize = len(self.validItem)
# loading test file
with open(test_file, 'r') as f:
line = f.readline()
while line and line != '':
arr = line.split('\t')
if self.dataset == 'adressa':
u = eval(arr[0])[0]
i = eval(arr[0])[1]
else:
u = int(arr[0])
i = int(arr[1])
self.m_item = max(self.m_item, i)
self.n_user = max(self.n_user, u)
testUser.append(u)
testItem.append(i)
line = f.readline()
self.m_item += 1
self.n_user += 1
self.testUser = np.array(testUser)
self.testItem = np.array(testItem)
self.testUniqueUsers = np.array(list(set(testUser)))
self.testdataSize = len(self.testItem)
self.Graph = None
self.UserItemNet = csr_matrix((np.ones(len(self.trainUser)), (self.trainUser, self.trainItem)),
shape=(self.n_user, self.m_item))
self.users_D = np.array(self.UserItemNet.sum(axis=1)).squeeze()
self.users_D[self.users_D == 0.] = 1
self.items_D = np.array(self.UserItemNet.sum(axis=0)).squeeze()
self.items_D[self.items_D == 0.] = 1.
self._allPos = self.getUserPosItems(list(range(self.n_user)))
self.__testDict = self.__build_test()
self.__validDict = self.__build_valid()
@property
def n_users(self):
return self.n_user
@property
def m_items(self):
return self.m_item
@property
def trainDataSize(self):
return self.traindataSize
@property
def testDict(self):
return self.__testDict
@property
def validDict(self):
return self.__validDict
@property
def evalDict(self):
return self.__evalDict
@property
def allPos(self):
return self._allPos
def _split_A_hat(self, A):
A_fold = []
fold_len = (self.n_users + self.m_items) // self.folds
for i_fold in range(self.folds):
start = i_fold * fold_len
if i_fold == self.folds - 1:
end = self.n_users + self.m_items
else:
end = (i_fold + 1) * fold_len
A_fold.append(self._convert_sp_mat_to_sp_tensor(A[start:end]).coalesce().to(parse.device))
return A_fold
def _convert_sp_mat_to_sp_tensor(self, X):
coo = X.tocoo().astype(np.float32)
row = torch.Tensor(coo.row).long()
col = torch.Tensor(coo.col).long()
index = torch.stack([row, col])
data = torch.FloatTensor(coo.data)
return torch.sparse.FloatTensor(index, data, torch.Size(coo.shape))
def getSparseGraph(self):
print("loading adjacency matrix")
if self.Graph is None:
try:
pre_adj_mat = sp.load_npz(f'{self.path}/s_pre_adj_mat.npz')
print("successfully loaded...")
norm_adj = pre_adj_mat
except:
print("generating adjacency matrix")
s = time()
adj_mat = sp.dok_matrix((self.n_users + self.m_items, self.n_users + self.m_items), dtype=np.float32)
adj_mat = adj_mat.tolil()
R = self.UserItemNet.tolil()
adj_mat[:self.n_users, self.n_users:] = R
adj_mat[self.n_users:, :self.n_users] = R.T
adj_mat = adj_mat.todok()
# adj_mat = adj_mat + sp.eye(adj_mat.shape[0])
rowsum = np.array(adj_mat.sum(axis=1))
d_inv = np.power(rowsum, -0.5).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat = sp.diags(d_inv)
norm_adj = d_mat.dot(adj_mat)
norm_adj = norm_adj.dot(d_mat)
norm_adj = norm_adj.tocsr()
end = time()
print(f"costing {end - s}s, saved norm_mat...")
sp.save_npz(f'{self.path}/s_pre_adj_mat.npz', norm_adj)
if self.split == True:
self.Graph = self._split_A_hat(norm_adj)
print("done split matrix")
else:
self.Graph = self._convert_sp_mat_to_sp_tensor(norm_adj)
self.Graph = self.Graph.coalesce().to(parse.device)
print("don't split the matrix")
return self.Graph
def __build_test(self):
"""
return:
dict: {user: [items]}
"""
test_data = {}
for i, item in enumerate(self.testItem):
user = self.testUser[i]
if test_data.get(user):
test_data[user].append(item)
else:
test_data[user] = [item]
return test_data
def __build_valid(self):
valid_data = {}
for i, item in enumerate(self.validItem):
user = self.validUser[i]
if valid_data.get(user):
valid_data[user].append(item)
else:
valid_data[user] = [item]
return valid_data
def __build_eval(self):
eval_data = {}
for i, item in enumerate(self.trainItem):
user = self.trainUser[i]
if eval_data.get(user):
eval_data[user].append(item)
else:
eval_data[user] = [item]
return eval_data
def getUserItemFeedback(self, users, items):
"""
users:
shape [-1]
items:
shape [-1]
return:
feedback [-1]
"""
# print(self.UserItemNet[users, items])
return np.array(self.UserItemNet[users, items]).astype('uint8').reshape((-1,))
def getUserPosItems(self, users):
posItems = []
for user in users:
posItems.append(self.UserItemNet[user].nonzero()[1])
return posItems
class CleanLoader(Dataset):
def __init__(self, config):
self.path = f'{config.dataset}'
self.split = config.A_split
self.folds = config.A_n_fold
self.n_user = 0
self.m_item = 0
self.config = config
train_file = self.path + '/clean_data_{}_{}.txt'.format(config.model, config.lr)
trainItem, trainUser = [], []
with open(train_file, 'r') as f:
line = f.readline()
while line and line != '':
arr = line.split('\t')
u = int(arr[0])
i = int(arr[1])
self.m_item = max(self.m_item, i)
self.n_user = max(self.n_user, u)
# print(self.m_item)
trainUser.append(u)
trainItem.append(i)
line = f.readline()
self.trainUser = np.array(trainUser)
self.trainItem = np.array(trainItem)
self.trainUniqueUsers = np.array(list(set(trainUser)))
self.traindataSize = len(trainUser)
self.m_item += 1
self.n_user += 1
self.Graph = None
self.UserItemNet = csr_matrix((np.ones(len(self.trainUser)), (self.trainUser, self.trainItem)),
shape=(self.n_user, self.m_item))
self.users_D = np.array(self.UserItemNet.sum(axis=1)).squeeze()
self.users_D[self.users_D == 0.] = 1
self.items_D = np.array(self.UserItemNet.sum(axis=0)).squeeze()
self.items_D[self.items_D == 0.] = 1.
self._allPos = self.getUserPosItems(list(range(self.n_user)))
self.__testDict = None
self.__validDict = None
@property
def n_users(self):
return self.n_user
@property
def m_items(self):
return self.m_item
@property
def trainDataSize(self):
return self.traindataSize
@property
def testDict(self):
return self.__testDict
@property
def allPos(self):
return self._allPos
def getUserItemFeedback(self, users, items):
"""
users:
shape [-1]
items:
shape [-1]
return:
feedback [-1]
"""
# print(self.UserItemNet[users, items])
return np.array(self.UserItemNet[users, items]).astype('uint8').reshape((-1,))
def getUserPosItems(self, users):
posItems = []
for user in users:
posItems.append(self.UserItemNet[user].nonzero()[1])
return posItems | 15,363 | 33.44843 | 117 | py |
SGDL | SGDL-main/code/training.py | import numpy as np
import torch
import utils
import dataloader
from utils import timer
import model
import multiprocessing
from sklearn.mixture import GaussianMixture as GMM
from parse import args, log_file
import parse
from scheduler import Scheduler
from copy import deepcopy
CORES = multiprocessing.cpu_count() // 2
def memorization_train(dataset, recommend_model, opt):
Recmodel = recommend_model
Recmodel.train()
# sampling
S = utils.UniformSample(dataset)
users = torch.Tensor(S[:, 0]).long()
posItems = torch.Tensor(S[:, 1]).long()
negItems = torch.Tensor(S[:, 2]).long()
users = users.to(parse.device)
posItems = posItems.to(parse.device)
negItems = negItems.to(parse.device)
users, posItems, negItems = utils.shuffle(users, posItems, negItems)
total_batch = len(users) // args.batch_size + 1
aver_loss = 0.
for (batch_i,
(batch_users,
batch_pos,
batch_neg)) in enumerate(utils.minibatch(users,
posItems,
negItems,
batch_size=args.batch_size)):
loss, reg_loss = Recmodel.loss(batch_users, batch_pos, batch_neg)
opt.zero_grad()
loss.backward()
opt.step()
aver_loss += loss.cpu().item()
aver_loss = aver_loss / total_batch
timer.zero()
return f"{aver_loss:.5f}"
def estimate_noise(dataset, recommend_model):
'''
estimate noise ratio based on GMM
'''
Recmodel: model.LightGCN = recommend_model
Recmodel.eval()
dataset: dataloader.MemLoader
# sampling
S = utils.UniformSample(dataset)
users_origin = torch.Tensor(S[:, 0]).long()
posItems_origin = torch.Tensor(S[:, 1]).long()
negItems_origin = torch.Tensor(S[:, 2]).long()
users_origin = users_origin.to(parse.device)
posItems_origin = posItems_origin.to(parse.device)
negItems_origin = negItems_origin.to(parse.device)
with torch.no_grad():
losses = []
for (batch_i,
(batch_users,
batch_pos,
batch_neg)) in enumerate(utils.minibatch(users_origin,
posItems_origin,
negItems_origin,
batch_size=args.batch_size)):
loss, _ = Recmodel.loss(batch_users, batch_pos, batch_neg, reduce=False)
# concat all losses
if len(losses) == 0:
losses = loss
else:
losses = torch.cat((losses, loss), dim=0)
# split losses of each user
losses_u = []
st, ed = 0, 0
for count in dataset.user_pos_counts:
ed = st + count
losses_u.append(losses[st:ed])
st = ed
# normalize losses of each user
for i in range(len(losses_u)):
if len(losses_u[i]) > 1:
losses_u[i] = (losses_u[i] - losses_u[i].min()) / (losses_u[i].max() - losses_u[i].min())
losses = torch.cat(losses_u, dim=0)
losses = losses.reshape(-1, 1).cpu().detach().numpy()
gmm = GMM(n_components=2, max_iter=100, tol=1e-2, reg_covar=5e-4)
gmm.fit(losses)
prob = gmm.predict_proba(losses)
prob = prob[:, gmm.means_.argmax()]
return 1 - np.mean(prob)
def self_guided_train_schedule_reinforce(train_dataset, clean_dataset, recmodel, ltw:model.LTW):
train_loss, meta_loss = 0, 0
scheduler = Scheduler(len(recmodel.state_dict())).cuda()
recmodel.train()
train_opt = torch.optim.Adam(recmodel.params(), lr=args.lr)
meta_opt = torch.optim.Adam(ltw.params(), lr=args.meta_lr)
schedule_opt = torch.optim.Adam(scheduler.parameters(), lr=args.schedule_lr)
# sampling
with timer(name='Train Sample'):
train_data = utils.UniformSample(train_dataset)
with timer(name='Clean Sample'):
clean_data = utils.UniformSample(clean_dataset)
users = torch.Tensor(train_data[:, 0]).long().to(parse.device)
posItems = torch.Tensor(train_data[:, 1]).long().to(parse.device)
negItems = torch.Tensor(train_data[:, 2]).long().to(parse.device)
users_clean = torch.Tensor(clean_data[:, 0]).long().to(parse.device)
posItems_clean = torch.Tensor(clean_data[:, 1]).long().to(parse.device)
negItems_clean = torch.Tensor(clean_data[:, 2]).long().to(parse.device)
users, posItems, negItems = utils.shuffle(users, posItems, negItems)
users_clean, posItems_clean, negItems_clean = utils.shuffle(users_clean, posItems_clean, negItems_clean)
total_batch = len(users) // args.batch_size + 1
clean_data_iter = iter(
utils.minibatch(users_clean, posItems_clean, negItems_clean, batch_size=args.batch_size))
for batch_i, (batch_users, batch_pos, batch_neg) in enumerate(utils.minibatch(users,
posItems,
negItems,
batch_size=args.batch_size)):
try:
batch_users_clean, batch_pos_clean, batch_neg_clean = next(clean_data_iter)
except StopIteration:
clean_data_iter = iter(utils.minibatch(users_clean,
posItems_clean,
negItems_clean,
batch_size=args.batch_size))
batch_users_clean, batch_pos_clean, batch_neg_clean = next(clean_data_iter)
meta_model = deepcopy(recmodel)
# ============= get input of the scheduler ============= #
L_theta, _ = meta_model.loss(batch_users_clean, batch_pos_clean, batch_neg_clean, reduce=False)
L_theta = torch.reshape(L_theta, (len(L_theta), 1))
grads_theta_list = []
for k in range(len(batch_users_clean)):
grads_theta_list.append(torch.autograd.grad(L_theta[k], (meta_model.params()), create_graph=True,
retain_graph=True))
v_L_theta = ltw(L_theta.data)
# assumed update
L_theta_meta = torch.sum(L_theta * v_L_theta) / len(batch_users_clean)
meta_model.zero_grad()
grads = torch.autograd.grad(L_theta_meta, (meta_model.params()), create_graph=True, retain_graph=True)
meta_model.update_params(lr_inner=args.lr, source_params=grads)
del grads
L_theta_hat, _ = meta_model.loss(batch_users_clean, batch_pos_clean, batch_neg_clean, reduce=False)
# for each sample, calculate gradients of 2 losses
input_embedding_cos = []
for k in range(len(batch_users_clean)):
task_grad_cos = []
grads_theta = grads_theta_list[k]
grads_theta_hat = torch.autograd.grad(L_theta_hat[k], (meta_model.params()), create_graph=True,
retain_graph=True)
# calculate cosine similarity for each parameter
for j in range(len(grads_theta)):
task_grad_cos.append(scheduler.cosine(grads_theta[j].flatten().unsqueeze(0),
grads_theta_hat[j].flatten().unsqueeze(0))[0])
del grads_theta
del grads_theta_hat
# stack similarity of each parameter
task_grad_cos = torch.stack(task_grad_cos)
# stack similarity of each sample
input_embedding_cos.append(task_grad_cos.detach())
# sample clean data
weight = scheduler(L_theta, torch.stack(input_embedding_cos).cuda())
task_prob = torch.softmax(weight.reshape(-1), dim=-1)
sample_idx = scheduler.sample_task(task_prob, len(batch_users_clean))
batch_users_clean = batch_users_clean[sample_idx]
batch_pos_clean = batch_pos_clean[sample_idx]
batch_neg_clean = batch_neg_clean[sample_idx]
# ============= training ============= #
meta_model = deepcopy(recmodel)
# assumed update of theta (theta -> theta')
cost, reg_loss = meta_model.loss(batch_users, batch_pos, batch_neg, reduce=False)
cost_v = torch.reshape(cost, (len(cost), 1))
v_lambda = ltw(cost_v.data)
l_f_meta = torch.sum(cost_v * v_lambda) / len(batch_users)
meta_model.zero_grad()
grads = torch.autograd.grad(l_f_meta, (meta_model.params()), create_graph=True)
# load theta' and update params of ltw
meta_model.update_params(lr_inner=args.lr, source_params=grads)
del grads
l_g_meta, _ = meta_model.loss(batch_users_clean, batch_pos_clean, batch_neg_clean)
# REINFORCE
loss_schedule = 0
for idx in sample_idx:
loss_schedule += scheduler.m.log_prob(idx.cuda())
reward = l_g_meta
loss_schedule *= reward
meta_opt.zero_grad()
l_g_meta.backward(retain_graph=True)
meta_opt.step()
schedule_opt.zero_grad()
loss_schedule.backward()
schedule_opt.step()
# reload and actually update theta
cost_w, _ = recmodel.loss(batch_users, batch_pos, batch_neg, reduce=False)
cost_v = torch.reshape(cost_w, (len(cost_w), 1))
with torch.no_grad():
w_new = ltw(cost_v)
loss = torch.sum(cost_v * w_new) / len(batch_users)
train_opt.zero_grad()
loss.backward()
train_opt.step()
recmodel.store_params()
train_loss += loss.cpu().item()
meta_loss += l_g_meta.cpu().item()
train_loss /= total_batch
meta_loss /= total_batch
timer.zero()
return [f'{train_loss:.5f}', f'{meta_loss:.5f}']
def self_guided_train_schedule_gumbel(train_dataset, clean_dataset, recmodel, ltw:model.LTW):
train_loss, meta_loss = 0, 0
scheduler = Scheduler(len(recmodel.state_dict())).cuda()
recmodel.train()
train_opt = torch.optim.Adam(recmodel.params(), lr=args.lr)
meta_opt = torch.optim.Adam(ltw.params(), lr=args.meta_lr)
schedule_opt = torch.optim.Adam(scheduler.parameters(), lr=args.schedule_lr)
# sampling
train_data = utils.UniformSample(train_dataset)
clean_data = utils.UniformSample(clean_dataset)
users = torch.Tensor(train_data[:, 0]).long().to(parse.device)
posItems = torch.Tensor(train_data[:, 1]).long().to(parse.device)
negItems = torch.Tensor(train_data[:, 2]).long().to(parse.device)
users_clean = torch.Tensor(clean_data[:, 0]).long().to(parse.device)
posItems_clean = torch.Tensor(clean_data[:, 1]).long().to(parse.device)
negItems_clean = torch.Tensor(clean_data[:, 2]).long().to(parse.device)
users, posItems, negItems = utils.shuffle(users, posItems, negItems)
users_clean, posItems_clean, negItems_clean = utils.shuffle(users_clean, posItems_clean, negItems_clean)
total_batch = len(users) // args.batch_size + 1
clean_data_iter = iter(
utils.minibatch(users_clean, posItems_clean, negItems_clean, batch_size=args.batch_size))
for batch_i, (batch_users, batch_pos, batch_neg) in enumerate(utils.minibatch(users,
posItems,
negItems,
batch_size=args.batch_size)):
try:
batch_users_clean, batch_pos_clean, batch_neg_clean = next(clean_data_iter)
except StopIteration:
clean_data_iter = iter(utils.minibatch(users_clean,
posItems_clean,
negItems_clean,
batch_size=args.batch_size))
batch_users_clean, batch_pos_clean, batch_neg_clean = next(clean_data_iter)
meta_model = deepcopy(recmodel)
# ============= get input of the scheduler ============= #
L_theta, _ = meta_model.loss(batch_users_clean, batch_pos_clean, batch_neg_clean, reduce=False)
L_theta = torch.reshape(L_theta, (len(L_theta), 1))
grads_theta_list = []
for k in range(len(batch_users_clean)):
grads_theta_list.append(torch.autograd.grad(L_theta[k], (meta_model.params()), create_graph=True,
retain_graph=True))
v_L_theta = ltw(L_theta.data)
# assumed update
L_theta_meta = torch.sum(L_theta * v_L_theta) / len(batch_users_clean)
meta_model.zero_grad()
grads = torch.autograd.grad(L_theta_meta, (meta_model.params()), create_graph=True, retain_graph=True)
meta_model.update_params(lr_inner=args.lr, source_params=grads)
del grads
L_theta_hat, _ = meta_model.loss(batch_users_clean, batch_pos_clean, batch_neg_clean, reduce=False)
# for each sample, calculate gradients of 2 losses
input_embedding_cos = []
for k in range(len(batch_users_clean)):
task_grad_cos = []
grads_theta = grads_theta_list[k]
grads_theta_hat = torch.autograd.grad(L_theta_hat[k], (meta_model.params()), create_graph=True,
retain_graph=True)
# calculate cosine similarity for each parameter
for j in range(len(grads_theta)):
task_grad_cos.append(scheduler.cosine(grads_theta[j].flatten().unsqueeze(0),
grads_theta_hat[j].flatten().unsqueeze(0))[0])
del grads_theta
del grads_theta_hat
# stack similarity of each parameter
task_grad_cos = torch.stack(task_grad_cos)
# stack similarity of each sample
input_embedding_cos.append(task_grad_cos.detach())
# sample clean data
weight = scheduler(L_theta, torch.stack(input_embedding_cos).cuda())
task_prob = torch.softmax(weight.reshape(-1), dim=-1)
log_p = torch.log(task_prob + 1e-20)
logits = log_p.repeat([len(log_p), 1])
sample_idx = scheduler.gumbel_softmax(logits, temperature=args.tau, hard=True)
# ============= training ============= #
meta_model = deepcopy(recmodel)
# assumed update of theta (theta -> theta')
cost, reg_loss = meta_model.loss(batch_users, batch_pos, batch_neg, reduce=False)
cost_v = torch.reshape(cost, (len(cost), 1))
v_lambda = ltw(cost_v.data)
l_f_meta = torch.sum(cost_v * v_lambda) / len(batch_users)
meta_model.zero_grad()
grads = torch.autograd.grad(l_f_meta, (meta_model.params()), create_graph=True)
# load theta' and update params of ltw
meta_model.update_params(lr_inner=args.lr, source_params=grads)
del grads
if args.model == 'lgn':
user_emb, pos_emb, neg_emb, _, _, _ = meta_model.getEmbedding(batch_users_clean.long(),
batch_pos_clean.long(), batch_neg_clean.long())
else:
user_emb, pos_emb, neg_emb = meta_model(batch_users_clean, batch_pos_clean, batch_neg_clean)
batch_users_clean = torch.mm(sample_idx, user_emb)
batch_pos_clean = torch.mm(sample_idx, pos_emb)
batch_neg_clean = torch.mm(sample_idx, neg_emb)
l_g_meta = meta_model.loss_gumbel(batch_users_clean, batch_pos_clean, batch_neg_clean)
meta_opt.zero_grad()
l_g_meta.backward(retain_graph=True)
meta_opt.step()
schedule_opt.zero_grad()
l_g_meta.backward()
schedule_opt.step()
# reload and actually update theta
cost_w, _ = recmodel.loss(batch_users, batch_pos, batch_neg, reduce=False)
cost_v = torch.reshape(cost_w, (len(cost_w), 1))
with torch.no_grad():
w_new = ltw(cost_v)
loss = torch.sum(cost_v * w_new) / len(batch_users)
train_opt.zero_grad()
loss.backward()
train_opt.step()
recmodel.store_params()
train_loss += loss.cpu().item()
meta_loss += l_g_meta.cpu().item()
train_loss /= total_batch
meta_loss /= total_batch
timer.zero()
return [f'{train_loss:.5f}', f'{meta_loss:.5f}']
def test_one_batch(X):
sorted_items = X[0].numpy()
groundTrue = X[1]
r = utils.getLabel(groundTrue, sorted_items)
pre, recall, ndcg = [], [], []
for k in parse.topks:
ret = utils.RecallPrecision_ATk(groundTrue, r, k)
pre.append(ret['precision'])
recall.append(ret['recall'])
ndcg.append(utils.NDCGatK_r(groundTrue,r,k))
return {'recall':np.array(recall),
'precision':np.array(pre),
'ndcg':np.array(ndcg)}
def test(dataset, Recmodel, valid=True, multicore=0):
u_batch_size = args.test_u_batch_size
dataset: dataloader.Loader
if valid:
testDict = dataset.validDict
else:
testDict = dataset.testDict
Recmodel = Recmodel.eval()
max_K = max(parse.topks)
if multicore == 1:
pool = multiprocessing.Pool(CORES)
results = {'precision': np.zeros(len(parse.topks)),
'recall': np.zeros(len(parse.topks)),
'ndcg': np.zeros(len(parse.topks))}
with torch.no_grad():
users = list(testDict.keys())
try:
assert u_batch_size <= len(users) / 10
except AssertionError:
print(f"test_u_batch_size is too big for this dataset, try a small one {len(users) // 10}")
users_list = []
rating_list = []
groundTrue_list = []
total_batch = len(users) // u_batch_size + 1
for batch_users in utils.minibatch(users, batch_size=u_batch_size):
allPos = dataset.getUserPosItems(batch_users)
if not valid:
validDict = dataset.validDict
for i, user in enumerate(batch_users):
try:
allPos[i] = np.concatenate((allPos[i], validDict[user]))
except KeyError:
pass
groundTrue = [testDict[u] for u in batch_users]
batch_users_gpu = torch.Tensor(batch_users).long()
batch_users_gpu = batch_users_gpu.to(parse.device)
rating = Recmodel.getUsersRating(batch_users_gpu)
exclude_index = []
exclude_items = []
for range_i, items in enumerate(allPos):
exclude_index.extend([range_i] * len(items))
exclude_items.extend(items)
rating[exclude_index, exclude_items] = -(1 << 10)
_, rating_K = torch.topk(rating, k=max_K)
rating = rating.cpu().numpy()
del rating
users_list.append(batch_users)
rating_list.append(rating_K.cpu())
groundTrue_list.append(groundTrue)
assert total_batch == len(users_list)
X = zip(rating_list, groundTrue_list)
if multicore == 1:
pre_results = pool.map(test_one_batch, X)
else:
pre_results = []
for x in X:
pre_results.append(test_one_batch(x))
for result in pre_results:
results['recall'] += result['recall']
results['precision'] += result['precision']
results['ndcg'] += result['ndcg']
results['recall'] /= float(len(users))
results['precision'] /= float(len(users))
results['ndcg'] /= float(len(users))
if multicore == 1:
pool.close()
if not valid:
utils.Logging(log_file, str(results))
return results
def memorization_test(dataset, Recmodel):
'''
memorization procedure,
update memorization history matrix and generate memorized data
'''
u_batch_size = args.test_u_batch_size
with torch.no_grad():
users = dataset.trainUniqueUsers
users_list = []
items_list = []
S = utils.sample_K_neg(dataset)
for batch_users in utils.minibatch(users, batch_size=u_batch_size):
allPos = dataset.getUserPosItems(batch_users)
batch_users_gpu = torch.Tensor(batch_users).long()
batch_users_gpu = batch_users_gpu.to(parse.device)
rating = Recmodel.getUsersRating(batch_users_gpu)
excluded_users = []
excluded_items = []
k_list = []
for range_i, u in enumerate(batch_users):
neg_items = S[u]
items = allPos[range_i]
k_list.append(len(items))
neg_items.extend(items)
excluded_items.extend(neg_items)
excluded_users.extend([range_i] * (len(neg_items)))
rating[excluded_users, excluded_items] += 100
# rating_K: [batch_size, K]
max_K = max(k_list)
_, rating_K = torch.topk(rating, k=max_K)
for i in range(len(rating_K)):
user = batch_users[i]
items = rating_K[i].tolist()[:k_list[i]]
users_list.extend([user] * len(items))
items_list.extend(items)
try:
assert len(users_list) == len(items_list)
except AssertionError:
print('len(users_list) != len(items_list)')
del rating
dataset.updateMemDict(users_list, items_list)
return dataset.generate_clean_data()
| 21,779 | 39.634328 | 119 | py |
SGDL | SGDL-main/code/utils.py | import numpy as np
from sklearn.metrics import roc_auc_score
from parse import args
import torch
def EarlyStop(results, loss=False):
if loss:
min_i = results.index(min(results))
curr_i = len(results)-1
is_stop = True if curr_i-min_i >= args.stop_step else False
if results[-1] <= results[min_i]:
is_save = True
else:
is_save = False
return is_stop, is_save
recalls = [x['recall'][0] for x in results]
max_i = recalls.index(max(recalls))
curr_i = len(recalls)-1
is_stop = True if curr_i-max_i >= args.stop_step else False
if recalls[-1] >= recalls[max_i]:
is_save = True
else:
is_save = False
return is_stop, is_save
def UniformSample(dataset, valid=False):
if valid:
# validation
users = list(dataset.validDict.keys())
allPos = dataset.validDict
else:
# training
users = dataset.trainUniqueUsers
allPos = dataset.allPos
S = []
for user in users:
posForUser = allPos[user]
if len(posForUser) == 0:
continue
for positem in posForUser:
while True:
negitem = np.random.randint(0, dataset.m_items)
if negitem in posForUser:
continue
else:
break
S.append([user, positem, negitem])
return np.array(S)
def sample_K_neg(dataset):
'''
randomly sample K negatives for each user,
where K = number of each user's interactions
'''
users = dataset.trainUniqueUsers
allPos = dataset.allPos
S = {}
for u in users:
posForUser = allPos[u]
negForUser = []
length = len(posForUser)
while len(negForUser)<length:
negitem = np.random.randint(0, dataset.m_items)
if negitem in posForUser:
continue
else:
negForUser.append(negitem)
S[u] = negForUser
return S
def minibatch(*tensors, **kwargs):
batch_size = kwargs.get('batch_size',args.batch_size)
if len(tensors) == 1:
tensor = tensors[0]
for i in range(0, len(tensor), batch_size):
yield tensor[i:i + batch_size]
else:
for i in range(0, len(tensors[0]), batch_size):
yield tuple(x[i:i + batch_size] for x in tensors)
def shuffle(*arrays, **kwargs):
require_indices = kwargs.get('indices', False)
if len(set(len(x) for x in arrays)) != 1:
raise ValueError('All inputs to shuffle must have '
'the same length.')
shuffle_indices = np.arange(len(arrays[0]))
np.random.shuffle(shuffle_indices)
if len(arrays) == 1:
result = arrays[0][shuffle_indices]
else:
result = tuple(x[shuffle_indices] for x in arrays)
if require_indices:
return result, shuffle_indices
else:
return result
class timer:
"""
Time context manager for code block
with timer():
do something
timer.get()
"""
from time import time
TAPE = [-1] # global time record
NAMED_TAPE = {}
@staticmethod
def get():
if len(timer.TAPE) > 1:
return timer.TAPE.pop()
else:
return -1
@staticmethod
def dict(select_keys=None):
hint = "|"
if select_keys is None:
for key, value in timer.NAMED_TAPE.items():
hint = hint + f"{key}:{value:.2f}|"
else:
for key in select_keys:
value = timer.NAMED_TAPE[key]
hint = hint + f"{key}:{value:.2f}|"
return hint
@staticmethod
def zero(select_keys=None):
if select_keys is None:
for key, value in timer.NAMED_TAPE.items():
timer.NAMED_TAPE[key] = 0
else:
for key in select_keys:
timer.NAMED_TAPE[key] = 0
def __init__(self, tape=None, **kwargs):
if kwargs.get('name'):
timer.NAMED_TAPE[kwargs['name']] = timer.NAMED_TAPE[
kwargs['name']] if timer.NAMED_TAPE.get(kwargs['name']) else 0.
self.named = kwargs['name']
if kwargs.get("group"):
#TODO: add group function
pass
else:
self.named = False
self.tape = tape or timer.TAPE
def __enter__(self):
self.start = timer.time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.named:
timer.NAMED_TAPE[self.named] += timer.time() - self.start
else:
self.tape.append(timer.time() - self.start)
def set_seed(seed):
np.random.seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
# ====================Metrics==============================
# =========================================================
def RecallPrecision_ATk(test_data, r, k):
"""
test_data should be a list? cause users may have different amount of pos items. shape (test_batch, k)
pred_data : shape (test_batch, k) NOTE: pred_data should be pre-sorted
k : top-k
"""
right_pred = r[:, :k].sum(1)
precis_n = k
recall_n = np.array([len(test_data[i]) for i in range(len(test_data))])
recall = np.sum(right_pred/recall_n)
precis = np.sum(right_pred)/precis_n
return {'recall': recall, 'precision': precis}
def MRRatK_r(r, k):
"""
Mean Reciprocal Rank
"""
pred_data = r[:, :k]
scores = np.log2(1./np.arange(1, k+1))
pred_data = pred_data/scores
pred_data = pred_data.sum(1)
return np.sum(pred_data)
def NDCGatK_r(test_data,r,k):
"""
Normalized Discounted Cumulative Gain
rel_i = 1 or 0, so 2^{rel_i} - 1 = 1 or 0
"""
assert len(r) == len(test_data)
pred_data = r[:, :k]
test_matrix = np.zeros((len(pred_data), k))
for i, items in enumerate(test_data):
length = k if k <= len(items) else len(items)
test_matrix[i, :length] = 1
max_r = test_matrix
idcg = np.sum(max_r * 1./np.log2(np.arange(2, k + 2)), axis=1)
dcg = pred_data*(1./np.log2(np.arange(2, k + 2)))
dcg = np.sum(dcg, axis=1)
idcg[idcg == 0.] = 1.
ndcg = dcg/idcg
ndcg[np.isnan(ndcg)] = 0.
return np.sum(ndcg)
def AUC(all_item_scores, dataset, test_data):
"""
design for a single user
"""
r_all = np.zeros((dataset.m_items, ))
r_all[test_data] = 1
r = r_all[all_item_scores >= 0]
test_item_scores = all_item_scores[all_item_scores >= 0]
return roc_auc_score(r, test_item_scores)
def getLabel(test_data, pred_data):
r = []
for i in range(len(test_data)):
groundTrue = test_data[i]
predictTopK = pred_data[i]
pred = list(map(lambda x: x in groundTrue, predictTopK))
pred = np.array(pred).astype("float")
r.append(pred)
return np.array(r).astype('float')
# ===============Logging=============== #
def Logging(file, log):
print(log)
with open(file, 'a+') as f:
f.write(log + '\n')
class BPRLoss:
def __init__(self,
recmodel):
self.model = recmodel
self.weight_decay = args.weight_decay
self.lr = args.lr
self.opt = torch.optim.Adam(recmodel.parameters(), lr=self.lr)
def stageOne(self, users, pos, neg):
loss, reg_loss = self.model.loss(users, pos, neg, reduce=True)
if reg_loss:
reg_loss = reg_loss*self.weight_decay
#loss = loss + reg_loss
self.opt.zero_grad()
loss.backward()
self.opt.step()
return loss.cpu().item() | 7,742 | 28.441065 | 105 | py |
SGDL | SGDL-main/code/model.py | from parse import args
import torch
from torch import nn
from copy import deepcopy
from collections import OrderedDict
from torch.autograd import Variable
def to_var(x, requires_grad=True):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, requires_grad=requires_grad)
class MetaModule(nn.Module):
# adopted from: Adrien Ecoffet https://github.com/AdrienLE
def params(self):
for name, param in self.named_params(self):
yield param
def named_leaves(self):
return []
def named_submodules(self):
return []
def named_params(self, curr_module=None, memo=None, prefix=''):
if memo is None:
memo = set()
if hasattr(curr_module, 'named_leaves'):
for name, p in curr_module.named_leaves():
if p is not None and p not in memo:
memo.add(p)
yield prefix + ('.' if prefix else '') + name, p
else:
for name, p in curr_module._parameters.items():
if p is not None and p not in memo:
memo.add(p)
yield prefix + ('.' if prefix else '') + name, p
for mname, module in curr_module.named_children():
submodule_prefix = prefix + ('.' if prefix else '') + mname
for name, p in self.named_params(module, memo, submodule_prefix):
yield name, p
def update_params(self, lr_inner, first_order=False, source_params=None, detach=False):
if source_params is not None:
for tgt, src in zip(self.named_params(self), source_params):
name_t, param_t = tgt
# name_s, param_s = src
# grad = param_s.grad
# name_s, param_s = src
grad = src
tmp = param_t - lr_inner * grad
self.set_param(self, name_t, tmp)
else:
for name, param in self.named_params(self):
if not detach:
grad = param.grad
tmp = param - lr_inner * grad
self.set_param(self, name, tmp)
else:
param = param.detach_() # https://blog.csdn.net/qq_39709535/article/details/81866686
self.set_param(self, name, param)
def set_param(self, curr_mod, name, param):
if '.' in name:
n = name.split('.')
module_name = n[0]
rest = '.'.join(n[1:])
for name, mod in curr_mod.named_children():
if module_name == name:
self.set_param(mod, rest, param)
break
else:
setattr(curr_mod, name, param)
def detach_params(self):
for name, param in self.named_params(self):
self.set_param(self, name, param.detach())
def copy(self, other, same_var=False):
for name, param in other.named_params():
# if not same_var:
# param = to_var(param.data.clone(), requires_grad=True)
self.set_param(name, param)
class MetaEmbed(MetaModule):
def __init__(self, dim_1, dim_2):
super().__init__()
ignore = nn.Embedding(dim_1, dim_2)
self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))
self.register_buffer('bias', None)
def forward(self, index):
return self.weight[index]
def named_leaves(self):
return [('weight', self.weight), ('bias', self.bias)]
class LightGCN(MetaModule):
def __init__(self, dataset):
super(LightGCN, self).__init__()
self.config = args
self.dataset = dataset
self.__init_weight()
self.store_params()
def __init_weight(self):
self.num_users = self.dataset.n_users
self.num_items = self.dataset.m_items
self.latent_dim = self.config.latent_dim_rec
self.n_layers = self.config.lightGCN_n_layers
self.keep_prob = self.config.keep_prob
self.A_split = self.config.A_split
self.embedding_user = MetaEmbed(self.num_users,self.latent_dim)
self.embedding_item = MetaEmbed(self.num_items,self.latent_dim)
nn.init.normal_(self.embedding_user.weight, std=0.1)
nn.init.normal_(self.embedding_item.weight, std=0.1)
self.f = nn.Sigmoid()
self.Graph = self.dataset.getSparseGraph()
def __dropout_x(self, x, keep_prob):
size = x.size()
index = x.indices().t()
values = x.values()
random_index = torch.rand(len(values)) + keep_prob
random_index = random_index.int().bool()
index = index[random_index]
values = values[random_index] / keep_prob
g = torch.sparse.FloatTensor(index.t(), values, size)
return g
def __dropout(self, keep_prob):
if self.A_split:
graph = []
for g in self.Graph:
graph.append(self.__dropout_x(g, keep_prob))
else:
graph = self.__dropout_x(self.Graph, keep_prob)
return graph
def computer(self):
"""
propagate methods for lightGCN
"""
# [num_users, emb_dim]
users_emb = self.embedding_user.weight
items_emb = self.embedding_item.weight
# [M + N, emb_dim]
all_emb = torch.cat([users_emb, items_emb])
embs = [all_emb]
# [M + N, M + N] adjacency matrix
g_droped = self.Graph
for layer in range(self.n_layers):
if self.A_split:
temp_emb = []
for f in range(len(g_droped)):
temp_emb.append(torch.sparse.mm(g_droped[f], all_emb))
side_emb = torch.cat(temp_emb, dim=0)
all_emb = side_emb
else:
# [M + N, emb_dim] E(k+1) = D^(-1/2)AD^(1/2)E(k)
all_emb = torch.sparse.mm(g_droped, all_emb)
embs.append(all_emb)
# [M + N, num_layer, emb_dim]
embs = torch.stack(embs, dim=1)
# mean of all layers
light_out = torch.mean(embs, dim=1)
users, items = torch.split(light_out, [self.num_users, self.num_items])
return users, items
def getUsersRating(self, users):
all_users, all_items = self.computer()
users_emb = all_users[users.long()]
items_emb = all_items
rating = self.f(torch.matmul(users_emb, items_emb.t()))
return rating
def getEmbedding(self, users, pos_items, neg_items):
all_users, all_items = self.computer()
users_emb = all_users[users]
pos_emb = all_items[pos_items]
neg_emb = all_items[neg_items]
users_emb_ego = self.embedding_user(users)
pos_emb_ego = self.embedding_item(pos_items)
neg_emb_ego = self.embedding_item(neg_items)
return users_emb, pos_emb, neg_emb, users_emb_ego, pos_emb_ego, neg_emb_ego
def loss(self, users, pos, neg, reduce=True):
# [batch_size, emb_dim]
(users_emb, pos_emb, neg_emb,
userEmb0, posEmb0, negEmb0) = self.getEmbedding(users.long(), pos.long(), neg.long())
reg_loss = (1 / 2) * (userEmb0.norm(2).pow(2) +
posEmb0.norm(2).pow(2) +
negEmb0.norm(2).pow(2)) / float(len(users))
# [batch_size, emb_dim]
pos_scores = torch.mul(users_emb, pos_emb)
# [batch_size, ]
pos_scores = torch.sum(pos_scores, dim=1)
neg_scores = torch.mul(users_emb, neg_emb)
neg_scores = torch.sum(neg_scores, dim=1)
if reduce:
loss = torch.mean(torch.nn.functional.softplus(neg_scores - pos_scores))
else:
loss = torch.nn.functional.softplus(neg_scores - pos_scores)
return loss, reg_loss
def loss_gumbel(self, users, pos, neg, reduce=True):
# [batch_size, emb_dim]
users_emb = users
pos_emb = pos
neg_emb = neg
# [batch_size, emb_dim]
pos_scores = torch.mul(users_emb, pos_emb)
# [batch_size, ]
pos_scores = torch.sum(pos_scores, dim=1)
neg_scores = torch.mul(users_emb, neg_emb)
neg_scores = torch.sum(neg_scores, dim=1)
if reduce:
loss = torch.mean(torch.nn.functional.softplus(neg_scores - pos_scores))
else:
loss = torch.nn.functional.softplus(neg_scores - pos_scores)
return loss
def forward(self, users, items):
# compute embedding
all_users, all_items = self.computer()
users_emb = all_users[users]
items_emb = all_items[items]
inner_pro = torch.mul(users_emb, items_emb)
gamma = torch.sum(inner_pro, dim=1)
return gamma
def store_params(self):
self.keep_weight = deepcopy(self.state_dict())
self.fast_weights = OrderedDict()
self.weight_names = list(self.keep_weight.keys())
class LTW(MetaModule):
'''
learning-to-weight module
input:loss
output:weight of each (u,i) pair
'''
def __init__(self, input, hidden1, output):
super(LTW, self).__init__()
self.linear1 = nn.Linear(input, hidden1)
self.relu1 = nn.ReLU(inplace=True)
self.linear2 = nn.Linear(hidden1, output)
def forward(self, x):
x = self.linear1(x)
x = self.relu1(x)
out = self.linear2(x)
return torch.sigmoid(out) | 9,447 | 34.122677 | 105 | py |
SGDL | SGDL-main/code/scheduler.py | import torch
import torch.nn as nn
import numpy as np
from torch.distributions.categorical import Categorical
class Scheduler(nn.Module):
def __init__(self, N):
super(Scheduler, self).__init__()
self.grad_lstm = nn.LSTM(N, 10, 1, bidirectional=True)
self.loss_lstm = nn.LSTM(1, 10, 1, bidirectional=True)
self.cosine = torch.nn.CosineSimilarity(dim=-1, eps=1e-8)
input_dim = 40
self.fc1 = nn.Linear(input_dim, 20)
self.fc2 = nn.Linear(20, 1)
def forward(self, loss, input):
grad_output, (hn, cn) = self.grad_lstm(input.reshape(1, len(input), -1))
grad_output = grad_output.sum(0)
loss_output, (hn, cn) = self.loss_lstm(loss.reshape(1, len(loss), 1))
loss_output = loss_output.sum(0)
x = torch.cat((grad_output, loss_output), dim=1)
z = torch.tanh(self.fc1(x))
z = self.fc2(z)
return z
def sample_task(self, prob, size, replace=True):
self.m = Categorical(prob)
p = prob.detach().cpu().numpy()
if len(np.where(p > 0)[0]) < size:
actions = torch.tensor(np.where(p > 0)[0])
else:
actions = np.random.choice(np.arange(len(prob)), p=p / np.sum(p), size=size,
replace=replace)
actions = [torch.tensor(x).cuda() for x in actions]
return torch.LongTensor(actions)
def sample_gumbel(self, shape, eps=1e-20):
"""Sample from Gumbel(0, 1)"""
U = torch.rand(shape)
return -torch.log(-torch.log(U + eps) + eps).cuda()
def gumbel_softmax_sample(self, logits, temperature):
""" Draw a sample from the Gumbel-Softmax distribution"""
y = logits + self.sample_gumbel(logits.shape)
return torch.softmax(y / temperature, dim=-1)
def gumbel_softmax(self, logits, temperature, hard=False):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = self.gumbel_softmax_sample(logits, temperature)
if hard:
y_hard = torch.eq(y, torch.max(y, 1, keepdim=True).values).long().cuda()
y = (y_hard - y).detach() + y
#y = torch.nonzero(y)[:, 1]
return y | 2,684 | 39.074627 | 88 | py |
SGDL | SGDL-main/code/parse.py | import argparse
import os
from os.path import join
import sys
import torch
import utils
import multiprocessing
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--lr', type=float, default=0.0005, help='learning rate')
parser.add_argument('--test_u_batch_size', type=int, default=100,
help="the batch size of users for testing")
parser.add_argument('--multicore', type=int, default=1,
help='whether we use multiprocessing or not in test')
parser.add_argument('--weight_decay', type=float, default=1e-4,
help="the weight decay for l2 normalizaton")
parser.add_argument('--history_len', type=int, default=10,
help='length of memorization history')
parser.add_argument('--meta_lr', type=float, default=0.0005,
help="the learning rate of meta-learning procedure")
parser.add_argument('--schedule_lr', type=float, default=0.005,
help="the learning rate of scheduler")
parser.add_argument('--model', type=str, default='lgn', help='backbone model')
parser.add_argument('--eval_freq', type=int, default=10, help='validation frequency')
parser.add_argument('--stop_step', type=int, default=4, help='for early stop')
parser.add_argument('--cuda_device', type=str, default='0')
parser.add_argument('--seed', type=int, default=2020, help='random seed')
parser.add_argument('--topks', nargs='?', default="[5,20]",
help="@k test list")
parser.add_argument('--dataset', type=str, default='ml100k',
help="available datasets: [yelp, ml100k, adressa]")
parser.add_argument('--epochs', type=int, default=1000)
# ============= Params for LightGCN =============== #
parser.add_argument('--latent_dim_rec', type=int, default=64,
help='the embedding size of lightGCN')
parser.add_argument('--lightGCN_n_layers', type=int, default=3,
help='the layer num of lightGCN')
parser.add_argument('--dropout', type=int, default=0,
help="using the dropout or not")
parser.add_argument('--A_split', type=bool, default=False)
parser.add_argument('--keep_prob', type=float, default=0.6,
help="the batch size for bpr loss training procedure")
parser.add_argument('--A_n_fold', type=int, default=100,
help="the fold num used to split large adj matrix, like gowalla")
# ============= Params for LTW =============== #
parser.add_argument('--input', type=int, default=1, help='input size of LTW')
parser.add_argument('--hidden1', type=int, default=100, help='hidden size of LTW')
parser.add_argument('--output', type=int, default=1, help='output size of LTW')
# ============= Params for Scheduler =============== #
parser.add_argument('--schedule_type', type=str, default='gumbel', help='training strategy of scheduler: reinforce, gumbel')
parser.add_argument('--tau', type=float, default=1.0, help='temperature of gumbel softmax')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_device
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
ROOT_PATH = "./"
CODE_PATH = join(ROOT_PATH, 'code')
FILE_PATH = join(CODE_PATH, 'checkpoints')
sys.path.append(join(CODE_PATH, 'sources'))
if not os.path.exists(FILE_PATH):
os.makedirs(FILE_PATH, exist_ok=True)
GPU = torch.cuda.is_available()
device = torch.device('cuda' if GPU else "cpu")
CORES = multiprocessing.cpu_count() // 2
topks = eval(args.topks)
log_file = f'./log/{args.dataset}_{args.model}_lr{args.lr}_metalr{args.meta_lr}_{args.schedule_type}_tau{args.tau}_schedule_{args.schedule_lr}.txt'
#log_file = f'./log/debug.txt'
f = open(log_file, 'w')
f.close()
from warnings import simplefilter
simplefilter(action="ignore", category=FutureWarning) | 3,840 | 46.419753 | 147 | py |
ElasticBERT | ElasticBERT-main/finetune-static/evaluations.py | import logging
import os
import sys
sys.path.append('../')
import numpy as np
import torch
from torch.utils.data import DataLoader, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
from transformers import glue_compute_metrics
from elue import elue_compute_metrics, elue_processors
from load_data import (
load_and_cache_examples_glue,
load_and_cache_examples_elue,
)
logger = logging.getLogger(__name__)
def evaluate_glue(args, model, tokenizer):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples_glue(args, eval_task, tokenizer, data_type='dev')
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = glue_compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
print(" %s = %s" % (key, str(result[key])))
return results
def evaluate_elue(args, model, tokenizer):
results = {}
eval_task = args.task_name
eval_output_dir = args.output_dir
eval_dataset = load_and_cache_examples_elue(args, eval_task, tokenizer, data_type='dev')
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = elue_compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
print(" %s = %s" % (key, str(result[key])))
return results
| 5,779 | 34.900621 | 118 | py |
ElasticBERT | ElasticBERT-main/finetune-static/inferences.py | import os
import csv
import sys
import logging
sys.path.append('../')
import numpy as np
import torch
from torch.utils.data import DataLoader, SequentialSampler
from tqdm import tqdm
from transformers import glue_processors
from elue import elue_compute_metrics, elue_processors
from load_data import (
load_and_cache_examples_glue,
load_and_cache_examples_elue,
)
logger = logging.getLogger(__name__)
def inference_glue(args, model, tokenizer):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
processor = glue_processors[eval_task]()
label_list = processor.get_labels()
eval_dataset = load_and_cache_examples_glue(args, eval_task, tokenizer, data_type="test")
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running Inference *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
preds = None
for batch in tqdm(eval_dataloader, desc="Infering"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
logits = outputs[1]
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
output_infer_file = os.path.join(eval_output_dir, "{}.tsv".format(eval_task))
with open(output_infer_file, "w", encoding='utf-8') as fout:
writer = csv.writer(fout, delimiter='\t', quotechar=None)
writer.writerow(["index", "prediction"])
for i, pred in enumerate(preds):
if args.output_mode == "classification":
prediction = label_list[pred]
elif args.output_mode == "regression":
prediction = str(pred)
writer.writerow([i, prediction])
def inference_elue(args, model, tokenizer):
eval_task = args.task_name
eval_output_dir = args.output_dir
processor = elue_processors[eval_task]()
label_list = processor.get_labels()
eval_dataset = load_and_cache_examples_elue(args, eval_task, tokenizer, data_type="test")
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running Inference *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
preds = None
for batch in tqdm(eval_dataloader, desc="Infering"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
logits = outputs[1]
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
output_infer_file = os.path.join(eval_output_dir, "{}.tsv".format(eval_task))
with open(output_infer_file, "w", encoding='utf-8') as fout:
writer = csv.writer(fout, delimiter='\t', quotechar=None)
writer.writerow(["index", "prediction"])
for i, pred in enumerate(preds):
if args.output_mode == "classification":
prediction = label_list[pred]
elif args.output_mode == "regression":
prediction = str(pred)
writer.writerow([i, prediction])
| 5,492 | 35.865772 | 118 | py |
ElasticBERT | ElasticBERT-main/finetune-static/run_glue.py | import argparse
import glob
import json
import logging
import os
import random
import time
from arguments import get_args
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import fitlog
import transformers
from transformers import BertTokenizer as ElasticBertTokenizer
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
from transformers.trainer_utils import is_main_process
from models.configuration_elasticbert import ElasticBertConfig
from models.modeling_elasticbert import ElasticBertForSequenceClassification
from evaluations import evaluate_glue
from inferences import inference_glue
from load_data import load_and_cache_examples_glue
logger = logging.getLogger(__name__)
def get_metric_key(task_name):
if task_name == "cola":
return "mcc"
elif task_name == "sst-2":
return "acc"
elif task_name == "mrpc":
return "acc_and_f1"
elif task_name == "sts-b":
return "corr"
elif task_name == "qqp":
return "acc_and_f1"
elif task_name == "mnli":
return "mnli/acc"
elif task_name == "mnli-mm":
return "mnli-mm/acc"
elif task_name == "qnli":
return "acc"
elif task_name == "rte":
return "acc"
elif task_name == "wnli":
return "acc"
elif task_name == "hans":
return "acc"
else:
raise KeyError(task_name)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
if args.debug:
fitlog.debug()
if args.local_rank in [-1, 0]:
fitlog.set_log_dir(args.log_dir)
fitlog.add_hyper(args)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
if args.warmup_steps > 0:
num_warmup_steps = args.warmup_steps
else:
num_warmup_steps = args.warmup_rate * t_total
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
best_all_metric = {}
keep_best_step = 0
tr_loss, logging_loss, best = 0.0, 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained,
int(args.num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
)
set_seed(args) # Added here for reproductibility
metric_key = get_metric_key(args.task_name)
if args.task_name == 'mnli':
metric_key = 'avg_acc'
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
keep_best_step += 1
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate_glue(args, model, tokenizer)
res_for_display = {}
num_metric = 0
avg_metric = 0
for k, v in results.items():
num_metric += 1
avg_metric += v
res_for_display[k.replace("-", "_")] = v
if args.task_name == 'mnli':
results[metric_key] = avg_metric / num_metric
res_for_display[metric_key] = avg_metric / num_metric
fitlog.add_metric({"dev": res_for_display}, step=global_step)
if results[metric_key] > best:
keep_best_step = 0
best = results[metric_key]
best_all_metric.update(results)
fitlog.add_best_metric({"dev": {metric_key.replace("-", "_"): best}})
# save the best model
if not args.not_save_model:
output_dir = os.path.join(args.output_dir, "best_model")
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
fitlog.add_loss(loss_scalar, name="Loss", step=global_step)
print(json.dumps({**logs, **{"step": global_step}}))
if keep_best_step >= args.early_stop_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if (
args.local_rank == -1 and args.evaluate_during_training and args.logging_steps == 0
):
keep_best_step += 1
logs = {}
results = evaluate_glue(args, model, tokenizer)
res_for_display = {}
for k, v in results.items():
res_for_display[k.replace("-", "_")] = v
fitlog.add_metric({"dev": res_for_display}, step=global_step)
if results[metric_key] > best:
keep_best_step = 0
best = results[metric_key]
best_all_metric.update(results)
fitlog.add_best_metric({"dev": {metric_key.replace("-", "_"): best}})
# save the best model
if not args.not_save_model:
output_dir = os.path.join(args.output_dir, "best_model")
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
print(json.dumps({**logs, **{"step": global_step}}))
if keep_best_step >= args.early_stop_steps:
train_iterator.close()
logging.info("The task stops early at step {}.".format(global_step))
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training and args.logging_steps > 0 and
global_step % args.logging_steps != 0 and keep_best_step < args.early_stop_steps
):
results = evaluate_glue(args, model, tokenizer)
res_for_display = {}
for k, v in results.items():
res_for_display[k.replace("-", "_")] = v
fitlog.add_metric({"dev": res_for_display}, step=global_step)
if results[metric_key] > best:
best = results[metric_key]
best_all_metric.update(results)
fitlog.add_best_metric({"dev": {metric_key.replace("-", "_"): best}})
# save the best model
if not args.not_save_model:
output_dir = os.path.join(args.output_dir, "best_model")
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
print(json.dumps({**logs, **{"step": global_step}}))
if args.local_rank in [-1, 0]:
fitlog.finish()
return global_step, tr_loss / global_step, best_all_metric
def main():
args = get_args()
if not os.path.exists(args.log_dir):
try:
os.makedirs(args.log_dir)
except:
pass
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
config = ElasticBertConfig.from_pretrained(
args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
num_hidden_layers=args.num_hidden_layers,
num_output_layers=args.num_output_layers,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = ElasticBertTokenizer.from_pretrained(
args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = ElasticBertForSequenceClassification.from_pretrained(
args.model_name_or_path,
config=config,
add_pooling_layer=True,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
print("Total Model Parameters:", sum(param.numel() for param in model.parameters()))
logger.info("Training/evaluation parameters %s", args)
train_dataset = None
best_all_metric = None
if args.do_train:
train_dataset = load_and_cache_examples_glue(args, args.task_name, tokenizer, data_type='train')
global_step, tr_loss, best_all_metric = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
if args.do_infer:
best_model_path = os.path.join(args.output_dir, "best_model")
if os.path.exists(best_model_path):
model = ElasticBertForSequenceClassification.from_pretrained(best_model_path)
model.to(args.device)
inference_glue(args, model, tokenizer)
else:
raise Exception("There is no best model path.")
return best_all_metric
if __name__ == "__main__":
best = main()
| 19,979 | 38.253438 | 123 | py |
ElasticBERT | ElasticBERT-main/finetune-static/load_data.py | import os
import sys
import logging
sys.path.append('../')
import torch
from torch.utils.data import TensorDataset
from transformers import glue_convert_examples_to_features
from transformers import glue_output_modes
from transformers import glue_processors
from elue import (
elue_output_modes,
elue_processors,
elue_convert_examples_to_features,
)
logger = logging.getLogger(__name__)
def load_and_cache_examples_glue(args, task, tokenizer, data_type="train"):
if args.local_rank not in [-1, 0] and data_type == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = glue_processors[task]()
output_mode = glue_output_modes[task]
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}".format(
data_type,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
# examples = (
# processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
# )
if data_type == "train":
examples = processor.get_train_examples(args.data_dir)
elif data_type == "dev":
examples = processor.get_dev_examples(args.data_dir)
elif data_type == "test":
examples = processor.get_test_examples(args.data_dir)
else:
raise NotImplementedError
features = glue_convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not data_type == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_labels = None
if data_type != "test":
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
else:
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids)
return dataset
def load_and_cache_examples_elue(args, task, tokenizer, data_type="train"):
if args.local_rank not in [-1, 0] and data_type == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = elue_processors[task]()
output_mode = elue_output_modes[task]
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}".format(
data_type,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if data_type == "train":
examples = processor.get_train_examples(args.data_dir)
elif data_type == "dev":
examples = processor.get_dev_examples(args.data_dir)
elif data_type == "test":
examples = processor.get_test_examples(args.data_dir)
else:
raise NotImplementedError
features = elue_sentence_level_convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not data_type == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_labels = None
if data_type != "test":
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
else:
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids)
return dataset
| 6,271 | 41.378378 | 150 | py |
ElasticBERT | ElasticBERT-main/finetune-static/run_elue.py | import argparse
from genericpath import exists
import glob
import json
import logging
import os
import random
import time
import sys
sys.path.append('../')
from arguments import get_args
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import fitlog
import transformers
from transformers import BertTokenizer as ElasticBertTokenizer
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers.trainer_utils import is_main_process
from models.configuration_elasticbert import ElasticBertConfig
from models.modeling_elasticbert import ElasticBertForSequenceClassification
from models.modeling_elasticbert import ElasticBertForTokenClassification
from evaluations import evaluate_elue
from inferences import inference_elue
from load_data import load_and_cache_examples_elue
from elue import elue_output_modes, elue_processors
logger = logging.getLogger(__name__)
def get_metric_key(task_name):
if task_name == "sst-2":
return "acc"
elif task_name == "mrpc":
return "acc_and_f1"
elif task_name == "sts-b":
return "corr"
elif task_name == "imdb":
return "acc"
elif task_name == "snli":
return "acc"
elif task_name == "scitail":
return "acc"
else:
raise KeyError(task_name)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
if args.debug:
fitlog.debug()
if args.local_rank in [-1, 0]:
fitlog.set_log_dir(args.log_dir)
fitlog.add_hyper(args)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
if args.warmup_steps > 0:
num_warmup_steps = args.warmup_steps
else:
num_warmup_steps = args.warmup_rate * t_total
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
best_all_metric = {}
keep_best_step = 0
tr_loss, logging_loss, best = 0.0, 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained,
int(args.num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
)
set_seed(args) # Added here for reproductibility
metric_key = get_metric_key(args.task_name)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
keep_best_step += 1
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate_elue(args, model, tokenizer)
# results = evaluate(args, model, tokenizer)
res_for_display = {}
for k, v in results.items():
res_for_display[k.replace("-", "_")] = v
fitlog.add_metric({"dev": res_for_display}, step=global_step)
if results[metric_key] > best:
keep_best_step = 0
best = results[metric_key]
best_all_metric.update(results)
fitlog.add_best_metric({"dev": {metric_key.replace("-", "_"): best}})
# save the best model
if not args.not_save_model:
output_dir = os.path.join(args.output_dir, "best_model")
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
fitlog.add_loss(loss_scalar, name="Loss", step=global_step)
print(json.dumps({**logs, **{"step": global_step}}))
if keep_best_step >= args.early_stop_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if (
args.local_rank == -1 and args.evaluate_during_training and args.logging_steps == 0
):
keep_best_step += 1
logs = {}
results = evaluate_elue(args, model, tokenizer)
res_for_display = {}
for k, v in results.items():
res_for_display[k.replace("-", "_")] = v
fitlog.add_metric({"dev": res_for_display}, step=global_step)
if results[metric_key] > best:
keep_best_step = 0
best = results[metric_key]
best_all_metric.update(results)
fitlog.add_best_metric({"dev": {metric_key.replace("-", "_"): best}})
# save the best model
if not args.not_save_model:
output_dir = os.path.join(args.output_dir, "best_model")
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
print(json.dumps({**logs, **{"step": global_step}}))
if keep_best_step >= args.early_stop_steps:
train_iterator.close()
logging.info("The task stops early at step {}.".format(global_step))
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training and args.logging_steps > 0 and
global_step % args.logging_steps != 0 and keep_best_step < args.early_stop_steps
):
results = evaluate_elue(args, model, tokenizer)
res_for_display = {}
for k, v in results.items():
res_for_display[k.replace("-", "_")] = v
fitlog.add_metric({"dev": res_for_display}, step=global_step)
if results[metric_key] > best:
best = results[metric_key]
best_all_metric.update(results)
fitlog.add_best_metric({"dev": {metric_key.replace("-", "_"): best}})
# save the best model
if not args.not_save_model:
output_dir = os.path.join(args.output_dir, "best_model")
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
print(json.dumps({**logs, **{"step": global_step}}))
if args.local_rank in [-1, 0]:
fitlog.finish()
return global_step, tr_loss / global_step, best_all_metric
def main():
args = get_args()
if not os.path.exists(args.log_dir):
try:
os.makedirs(args.log_dir)
except:
pass
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in elue_processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = elue_processors[args.task_name]()
args.output_mode = elue_output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
config = ElasticBertConfig.from_pretrained(
args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
num_hidden_layers=args.num_hidden_layers,
num_output_layers=args.num_output_layers,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = ElasticBertTokenizer.from_pretrained(
args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = ElasticBertForSequenceClassification.from_pretrained(
args.model_name_or_path,
config=config,
add_pooling_layer=True,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
print("Total Model Parameters:", sum(param.numel() for param in model.parameters()))
logger.info("Training/evaluation parameters %s", args)
train_dataset = None
best_all_metric = None
if args.do_train:
train_dataset = load_and_cache_examples_elue(args, args.task_name, tokenizer, data_type="train")
global_step, tr_loss, best_all_metric = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
if args.do_infer:
best_model_path = os.path.join(args.output_dir, "best_model")
if os.path.exists(best_model_path):
model = ElasticBertForSequenceClassification.from_pretrained(best_model_path)
model.to(args.device)
results = inference_elue(args, model, tokenizer)
else:
raise Exception("There is no best model path.")
return best_all_metric
if __name__ == "__main__":
best = main()
| 19,442 | 38.3583 | 123 | py |
ElasticBERT | ElasticBERT-main/finetune-static/models/modeling_elasticbert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch ElasticBERT model. """
import math
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import LayerNorm
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from transformers.utils import logging
from .configuration_elasticbert import ElasticBertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "fnlp/elasticbert-base"
_CONFIG_FOR_DOC = "ElasticBertConfig"
_TOKENIZER_FOR_DOC = "ElasticBertTokenizer"
ELASTICBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"fnlp/elasticbert-base",
"fnlp/elasticbert-large",
]
class GradientRescaleFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight):
ctx.save_for_backward(input)
ctx.gd_scale_weight = weight
output = input
return output
@staticmethod
def backward(ctx, grad_outputs):
input = ctx.saved_tensors
grad_input = grad_weight = None
if ctx.needs_input_grad[0]:
grad_input = ctx.gd_scale_weight * grad_outputs
return grad_input, grad_weight
gradient_rescale = GradientRescaleFunction.apply
class ElasticBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.layernorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class ElasticBertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, config.hidden_size)
self.key = nn.Linear(config.hidden_size, config.hidden_size)
self.value = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in ElasticBertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class ElasticBertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class ElasticBertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = ElasticBertSelfAttention(config)
self.output = ElasticBertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class ElasticBertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class ElasticBertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class ElasticBertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = ElasticBertAttention(config)
self.intermediate = ElasticBertIntermediate(config)
self.output = ElasticBertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
):
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class ElasticBertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class ElasticBertEncoder(nn.Module):
def __init__(self, config, add_pooling_layer=None):
super().__init__()
self.config = config
self.add_pooling_layer = add_pooling_layer
self.num_output_layers = config.num_output_layers
self.num_hidden_layers = config.num_hidden_layers
self.max_output_layers = config.max_output_layers
self.layer = nn.ModuleList([ElasticBertLayer(config) for _ in range(config.num_hidden_layers)])
assert self.num_output_layers <= self.num_hidden_layers, \
"The total number of layers must be be greater than or equal to the number of the output layers. "
self.start_output_layer = None
self.current_pooler_num = None
if self.num_output_layers > 1:
self.start_output_layer = self.num_hidden_layers - self.num_output_layers
start_pooler_num = self.start_output_layer
end_pooler_num = self.num_hidden_layers - 1
if add_pooling_layer:
self.pooler = nn.ModuleList([ElasticBertPooler(config) if i >= start_pooler_num and \
i <= end_pooler_num else None for i in range(self.max_output_layers)])
elif self.num_output_layers == 1:
self.current_pooler_num = self.num_hidden_layers - 1
if add_pooling_layer:
self.pooler = nn.ModuleList([ElasticBertPooler(config) if i == self.current_pooler_num \
else None for i in range(self.max_output_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
final_pooled_output = None
output_sequence_outputs = () if self.num_output_layers > 1 else None
output_pooled_outputs = () if self.num_output_layers > 1 else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if getattr(self.config, "gradient_checkpointing", False) and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
output_attentions,
)
hidden_states = layer_outputs[0]
if self.num_output_layers > 1:
if i >= self.start_output_layer:
if self.training:
hidden_states = gradient_rescale(hidden_states, 1.0 / (self.num_hidden_layers - i))
output_sequence_outputs += (hidden_states, )
if self.add_pooling_layer:
pooled_output = self.pooler[i](hidden_states)
output_pooled_outputs += (pooled_output, )
else:
output_pooled_outputs += (hidden_states[:, 0], )
if self.training:
hidden_states = gradient_rescale(hidden_states, (self.num_hidden_layers - i -1))
elif self.num_output_layers == 1:
if i == self.num_hidden_layers - 1:
if self.add_pooling_layer:
final_pooled_output = self.pooler[self.current_pooler_num](hidden_states)
else:
final_pooled_output = hidden_states[:, 0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return tuple(
v
for v in [
hidden_states,
output_sequence_outputs,
output_pooled_outputs,
final_pooled_output,
all_hidden_states,
all_self_attentions,
]
if v is not None
)
class ElasticBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ElasticBertConfig
base_model_prefix = "elasticbert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
ELASTICBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~ElasticBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
ELASTICBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
"""
@add_start_docstrings(
"The bare ElasticBert Model transformer outputting raw hidden-states without any specific head on top.",
ELASTICBERT_START_DOCSTRING,
)
class ElasticBertModel(ElasticBertPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.add_pooling_layer = add_pooling_layer
self.num_output_layers = config.num_output_layers
self.num_hidden_layers = config.num_hidden_layers
self.max_output_layers = config.max_output_layers
self.embeddings = ElasticBertEmbeddings(config)
self.encoder = ElasticBertEncoder(config, add_pooling_layer=add_pooling_layer)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(ELASTICBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if self.num_output_layers > 1:
sequence_outputs = encoder_outputs[1]
pooled_output = encoder_outputs[2]
return (sequence_outputs, pooled_output)
elif self.num_output_layers == 1:
sequence_outputs = encoder_outputs[0]
pooled_output = encoder_outputs[1]
return (sequence_outputs, pooled_output)
@add_start_docstrings(
"""
ElasticBert Model transformer with a sequence classification/regression head on top
(a linear layer on top of the pooled output) e.g. for GLUE tasks.
""",
ELASTICBERT_START_DOCSTRING,
)
class ElasticBertForSequenceClassification(ElasticBertPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.num_labels = config.num_labels
self.add_pooling_layer = add_pooling_layer
self.elasticbert = ElasticBertModel(config, add_pooling_layer=add_pooling_layer)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ELASTICBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs = self.elasticbert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
output = outputs[1]
output = self.dropout(output)
logits = self.classifier(output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return (loss, logits)
| 29,676 | 40.506294 | 119 | py |
ElasticBERT | ElasticBERT-main/FLOPs/flops_counter.py | '''
Copyright (C) 2019 Sovrasov V. - All Rights Reserved
* You may use, distribute and modify this code under the
* terms of the MIT license.
* You should have received a copy of the MIT license with
* this file. If not visit https://opensource.org/licenses/MIT
'''
import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from models.modeling_bert import BertSelfAttention
# from transformers.models.bert.modeling_bert import BertSelfAttention
from transformers.models.albert.modeling_albert import AlbertAttention
from transformers.models.roberta.modeling_roberta import RobertaSelfAttention
from models.modeling_elasticbert import ElasticBertSelfAttention
def get_model_complexity_info(model, input_res,
print_per_layer_stat=True,
as_strings=True,
input_constructor=None, ost=sys.stdout,
verbose=False, ignore_modules=[],
custom_modules_hooks={}):
assert type(input_res) is tuple
assert len(input_res) >= 1
assert isinstance(model, nn.Module)
global CUSTOM_MODULES_MAPPING
if len(custom_modules_hooks) != 0:
CUSTOM_MODULES_MAPPING = custom_modules_hooks
flops_model = add_flops_counting_methods(model)
flops_model.eval()
flops_model.start_flops_count(ost=ost, verbose=verbose,
ignore_list=ignore_modules)
if input_constructor:
input = input_constructor(input_res)
_ = flops_model(**input)
else:
try:
batch = torch.ones(()).new_empty((1, *input_res),
dtype=next(flops_model.parameters()).dtype,
device=next(flops_model.parameters()).device)
except StopIteration:
batch = torch.ones(()).new_empty((1, *input_res))
# batch = torch.ones((1, *input_res), dtype=torch.long)
_ = flops_model(batch)
flops_count, params_count = flops_model.compute_average_flops_cost()
if print_per_layer_stat:
print_model_with_flops(flops_model, flops_count, params_count, ost=ost)
flops_model.stop_flops_count()
if as_strings:
return flops_to_string(flops_count), params_to_string(params_count)
return flops_count, params_count
def flops_to_string(flops, units='MFlops', precision=2):
if units is None:
if flops // 10**9 > 0:
return str(round(flops / 10.**9, precision)) + ' GMac'
elif flops // 10**6 > 0:
return str(round(flops / 10.**6, precision)) + ' MMac'
elif flops // 10**3 > 0:
return str(round(flops / 10.**3, precision)) + ' KMac'
else:
return str(flops) + ' Mac'
else:
if units == 'GMac':
return str(round(flops / 10.**9, precision)) + ' ' + units
elif units == 'MMac':
return str(round(flops / 10.**6, precision)) + ' ' + units
elif units == 'KMac':
return str(round(flops / 10.**3, precision)) + ' ' + units
elif units == 'GFlops':
return str(round(2 * flops / 10. ** 9, precision)) + ' ' + units
elif units == 'MFlops':
return str(round(2 * flops / 10. ** 6, precision)) + ' ' + units
elif units == 'KFlops':
return str(round(2 * flops / 10. ** 3, precision)) + ' ' + units
else:
return str(flops) + ' Mac'
def params_to_string(params_num, units=None, precision=2):
if units is None:
if params_num // 10 ** 6 > 0:
return str(round(params_num / 10 ** 6, 2)) + ' M'
elif params_num // 10 ** 3:
return str(round(params_num / 10 ** 3, 2)) + ' k'
else:
return str(params_num)
else:
if units == 'M':
return str(round(params_num / 10.**6, precision)) + ' ' + units
elif units == 'K':
return str(round(params_num / 10.**3, precision)) + ' ' + units
else:
return str(params_num)
def accumulate_flops(self):
if is_supported_instance(self):
return self.__flops__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def print_model_with_flops(model, total_flops, total_params, units='GMac',
precision=3, ost=sys.stdout):
if total_flops < 1:
total_flops = 1
def accumulate_params(self):
if is_supported_instance(self):
return self.__params__
else:
sum = 0
for m in self.children():
sum += m.accumulate_params()
return sum
def flops_repr(self):
accumulated_params_num = self.accumulate_params()
accumulated_flops_cost = self.accumulate_flops() / model.__batch_counter__
return ', '.join([params_to_string(accumulated_params_num,
units='M', precision=precision),
'{:.3%} Params'.format(accumulated_params_num / total_params),
flops_to_string(accumulated_flops_cost,
units=units, precision=precision),
'{:.3%} MACs'.format(accumulated_flops_cost / total_flops),
self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
m.accumulate_params = accumulate_params.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if m.extra_repr != flops_extra_repr:
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert m.extra_repr != m.original_extra_repr
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
model.apply(add_extra_repr)
print(repr(model), file=ost)
model.apply(del_extra_repr)
def get_model_parameters_number(model):
params_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params_num
def add_flops_counting_methods(net_main_module):
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(
net_main_module)
net_main_module.reset_flops_count()
return net_main_module
def compute_average_flops_cost(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Returns current mean flops consumption per image.
"""
for m in self.modules():
m.accumulate_flops = accumulate_flops.__get__(m)
flops_sum = self.accumulate_flops()
for m in self.modules():
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
params_sum = get_model_parameters_number(self)
return flops_sum / self.__batch_counter__, params_sum
def start_flops_count(self, **kwargs):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Activates the computation of mean flops consumption per image.
Call it before you run the network.
"""
add_batch_counter_hook_function(self)
seen_types = set()
def add_flops_counter_hook_function(module, ost, verbose, ignore_list):
if type(module) in ignore_list:
seen_types.add(type(module))
if is_supported_instance(module):
module.__params__ = 0
elif is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
if module.__class__.__name__ in CUSTOM_MODULES_MAPPING:
handle = module.register_forward_hook(
CUSTOM_MODULES_MAPPING[module.__class__.__name__])
else:
handle = module.register_forward_hook(MODULES_MAPPING[type(module)])
module.__flops_handle__ = handle
seen_types.add(type(module))
else:
# print(module.__class__.__name__)
if verbose and not type(module) in (nn.Sequential, nn.ModuleList) and \
not type(module) in seen_types:
print('Warning: module ' + type(module).__name__ +
' is treated as a zero-op.', file=ost)
seen_types.add(type(module))
self.apply(partial(add_flops_counter_hook_function, **kwargs))
def stop_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Stops computing the mean flops consumption per image.
Call whenever you want to pause the computation.
"""
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
def reset_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Resets statistics computed so far.
"""
add_batch_counter_variables_or_reset(self)
self.apply(add_flops_counter_variable_or_reset)
# ---- Internal functions
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += int(output_elements_count)
def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += int(active_elements_count)
def linear_flops_counter_hook(module, input, output):
input = input[0]
# pytorch checks dimensions, so here we don't care much
output_last_dim = output.shape[-1]
module.__flops__ += int(np.prod(input.shape) * output_last_dim)
def pool_flops_counter_hook(module, input, output):
input = input[0]
module.__flops__ += int(np.prod(input.shape))
def norm_flops_counter_hook(module, input, output):
input = input[0]
batch_flops = np.prod(input.shape)
if (getattr(module, 'affine', False)
or getattr(module, 'elementwise_affine', False)):
batch_flops *= 2
module.__flops__ += int(batch_flops)
def conv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
output_dims = list(output.shape[2:])
kernel_dims = list(conv_module.kernel_size)
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = int(np.prod(kernel_dims)) * \
in_channels * filters_per_channel
active_elements_count = batch_size * int(np.prod(output_dims))
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
bias_flops = out_channels * active_elements_count
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += int(overall_flops)
def batch_counter_hook(module, input, output):
batch_size = 1
if len(input) > 0:
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = len(input)
else:
pass
# print('Warning! No positional inputs found for a module,'
# ' assuming batch size is 1.')
module.__batch_counter__ += batch_size
def rnn_flops(flops, rnn_module, w_ih, w_hh, input_size):
# matrix matrix mult ih state and internal state
flops += w_ih.shape[0]*w_ih.shape[1]
# matrix matrix mult hh state and internal state
flops += w_hh.shape[0]*w_hh.shape[1]
if isinstance(rnn_module, (nn.RNN, nn.RNNCell)):
# add both operations
flops += rnn_module.hidden_size
elif isinstance(rnn_module, (nn.GRU, nn.GRUCell)):
# hadamard of r
flops += rnn_module.hidden_size
# adding operations from both states
flops += rnn_module.hidden_size*3
# last two hadamard product and add
flops += rnn_module.hidden_size*3
elif isinstance(rnn_module, (nn.LSTM, nn.LSTMCell)):
# adding operations from both states
flops += rnn_module.hidden_size*4
# two hadamard product and add for C state
flops += rnn_module.hidden_size + rnn_module.hidden_size + rnn_module.hidden_size
# final hadamard
flops += rnn_module.hidden_size + rnn_module.hidden_size + rnn_module.hidden_size
return flops
def rnn_flops_counter_hook(rnn_module, input, output):
"""
Takes into account batch goes at first position, contrary
to pytorch common rule (but actually it doesn't matter).
IF sigmoid and tanh are made hard, only a comparison FLOPS should be accurate
"""
flops = 0
# input is a tuple containing a sequence to process and (optionally) hidden state
inp = input[0]
batch_size = inp.shape[0]
seq_length = inp.shape[1]
num_layers = rnn_module.num_layers
for i in range(num_layers):
w_ih = rnn_module.__getattr__('weight_ih_l' + str(i))
w_hh = rnn_module.__getattr__('weight_hh_l' + str(i))
if i == 0:
input_size = rnn_module.input_size
else:
input_size = rnn_module.hidden_size
flops = rnn_flops(flops, rnn_module, w_ih, w_hh, input_size)
if rnn_module.bias:
b_ih = rnn_module.__getattr__('bias_ih_l' + str(i))
b_hh = rnn_module.__getattr__('bias_hh_l' + str(i))
flops += b_ih.shape[0] + b_hh.shape[0]
flops *= batch_size
flops *= seq_length
if rnn_module.bidirectional:
flops *= 2
rnn_module.__flops__ += int(flops)
def rnn_cell_flops_counter_hook(rnn_cell_module, input, output):
flops = 0
inp = input[0]
batch_size = inp.shape[0]
w_ih = rnn_cell_module.__getattr__('weight_ih')
w_hh = rnn_cell_module.__getattr__('weight_hh')
input_size = inp.shape[1]
flops = rnn_flops(flops, rnn_cell_module, w_ih, w_hh, input_size)
if rnn_cell_module.bias:
b_ih = rnn_cell_module.__getattr__('bias_ih')
b_hh = rnn_cell_module.__getattr__('bias_hh')
flops += b_ih.shape[0] + b_hh.shape[0]
flops *= batch_size
rnn_cell_module.__flops__ += int(flops)
def multihead_attention_counter_hook(multihead_attention_module, input, output):
flops = 0
q, k, v = input
batch_size = q.shape[1]
num_heads = multihead_attention_module.num_heads
embed_dim = multihead_attention_module.embed_dim
kdim = multihead_attention_module.kdim
vdim = multihead_attention_module.vdim
if kdim is None:
kdim = embed_dim
if vdim is None:
vdim = embed_dim
# initial projections
flops = q.shape[0] * q.shape[2] * embed_dim + \
k.shape[0] * k.shape[2] * kdim + \
v.shape[0] * v.shape[2] * vdim
if multihead_attention_module.in_proj_bias is not None:
flops += (q.shape[0] + k.shape[0] + v.shape[0]) * embed_dim
# attention heads: scale, matmul, softmax, matmul
head_dim = embed_dim // num_heads
head_flops = q.shape[0] * head_dim + \
head_dim * q.shape[0] * k.shape[0] + \
q.shape[0] * k.shape[0] + \
q.shape[0] * k.shape[0] * head_dim
flops += num_heads * head_flops
# final projection, bias is always enabled
flops += q.shape[0] * embed_dim * (embed_dim + 1)
flops *= batch_size
multihead_attention_module.__flops__ += int(flops)
# For bert roberta elasticbert attn flops
def bert_self_attention_counter_hook(bert_self_attention_module, input, output):
q = input[0]
batch_size, seq_len, hidden_size = q.shape
num_heads = bert_self_attention_module.num_attention_heads
if num_heads != 0:
# print("The number of attention heads is {}.".format(num_heads))
qkkdim = bert_self_attention_module.all_head_size
# initial projections
flops = seq_len * hidden_size * qkkdim * 3 # not considering bias
# attention heads: scale, matmul, softmax, matmul
head_dim = bert_self_attention_module.attention_head_size
head_flops = seq_len * seq_len + \
head_dim * seq_len * seq_len + \
seq_len * seq_len + \
seq_len * seq_len * head_dim
flops += num_heads * head_flops
flops *= batch_size
bert_self_attention_module.__flops__ += int(flops)
# else:
# print("The number of attention heads is {}.".format(num_heads))
def albert_attention_counter_hook(albert_attention_module, input, output):
x = input[0]
batch_size, seq_len, hidden_size = x.shape
num_heads = albert_attention_module.num_attention_heads
qkkdim = albert_attention_module.all_head_size
# initial projections
flops = seq_len * hidden_size * qkkdim * 3 # not considering bias
# attention heads: scale, matmul, softmax, matmul
head_dim = albert_attention_module.attention_head_size
head_flops = seq_len * seq_len + \
head_dim * seq_len * seq_len + \
seq_len * seq_len + \
seq_len * seq_len * head_dim
flops += num_heads * head_flops
# output linear, layernorm
flops += seq_len * hidden_size * hidden_size
flops += seq_len * hidden_size * 2
flops *= batch_size
albert_attention_module.__flops__ += int(flops)
def embedding_counter_hook(embedding_module, input, output):
num_embeddings = embedding_module.num_embeddings
embedding_dim = embedding_module.embedding_dim
embedding_module.__params__ += num_embeddings * embedding_dim
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
# if hasattr(module, '__flops__') or hasattr(module, '__params__'):
# print('Warning: variables __flops__ or __params__ are already '
# 'defined for the module' + type(module).__name__ +
# ' ptflops can affect your code!')
module.__flops__ = 0
module.__params__ = get_model_parameters_number(module)
CUSTOM_MODULES_MAPPING = {ElasticBertSelfAttention.__name__: bert_self_attention_counter_hook}
MODULES_MAPPING = {
# convolutions
nn.Conv1d: conv_flops_counter_hook,
nn.Conv2d: conv_flops_counter_hook,
nn.Conv3d: conv_flops_counter_hook,
# activations
nn.ReLU: relu_flops_counter_hook,
nn.PReLU: relu_flops_counter_hook,
nn.ELU: relu_flops_counter_hook,
nn.LeakyReLU: relu_flops_counter_hook,
nn.ReLU6: relu_flops_counter_hook,
# poolings
nn.MaxPool1d: pool_flops_counter_hook,
nn.AvgPool1d: pool_flops_counter_hook,
nn.AvgPool2d: pool_flops_counter_hook,
nn.MaxPool2d: pool_flops_counter_hook,
nn.MaxPool3d: pool_flops_counter_hook,
nn.AvgPool3d: pool_flops_counter_hook,
nn.AdaptiveMaxPool1d: pool_flops_counter_hook,
nn.AdaptiveAvgPool1d: pool_flops_counter_hook,
nn.AdaptiveMaxPool2d: pool_flops_counter_hook,
nn.AdaptiveAvgPool2d: pool_flops_counter_hook,
nn.AdaptiveMaxPool3d: pool_flops_counter_hook,
nn.AdaptiveAvgPool3d: pool_flops_counter_hook,
# BNs
nn.BatchNorm1d: norm_flops_counter_hook,
nn.BatchNorm2d: norm_flops_counter_hook,
nn.BatchNorm3d: norm_flops_counter_hook,
nn.InstanceNorm1d: norm_flops_counter_hook,
nn.InstanceNorm2d: norm_flops_counter_hook,
nn.InstanceNorm3d: norm_flops_counter_hook,
nn.GroupNorm: norm_flops_counter_hook,
# LNs
nn.LayerNorm: norm_flops_counter_hook,
# FC
nn.Linear: linear_flops_counter_hook,
# Upscale
nn.Upsample: upsample_flops_counter_hook,
# Deconvolution
nn.ConvTranspose1d: conv_flops_counter_hook,
nn.ConvTranspose2d: conv_flops_counter_hook,
nn.ConvTranspose3d: conv_flops_counter_hook,
# RNN
nn.RNN: rnn_flops_counter_hook,
nn.GRU: rnn_flops_counter_hook,
nn.LSTM: rnn_flops_counter_hook,
nn.RNNCell: rnn_cell_flops_counter_hook,
nn.LSTMCell: rnn_cell_flops_counter_hook,
nn.GRUCell: rnn_cell_flops_counter_hook,
# Transformer
nn.Embedding: embedding_counter_hook,
nn.MultiheadAttention: multihead_attention_counter_hook,
BertSelfAttention: bert_self_attention_counter_hook,
AlbertAttention: albert_attention_counter_hook,
RobertaSelfAttention: bert_self_attention_counter_hook,
}
def is_supported_instance(module):
if type(module) in MODULES_MAPPING or module.__class__.__name__ in CUSTOM_MODULES_MAPPING:
return True
return False
def remove_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
| 22,023 | 34.125997 | 94 | py |
ElasticBERT | ElasticBERT-main/finetune-dynamic/run_elue_entropy.py | import argparse
import csv
import glob
import json
import logging
import os
import random
import time
import sys
sys.path.append('../')
from arguments import get_args
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import fitlog
import transformers
from transformers import WEIGHTS_NAME
from transformers import BertTokenizer as ElasticBertTokenizer
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers.trainer_utils import is_main_process
from models.configuration_elasticbert import ElasticBertConfig
from models.modeling_elasticbert_entropy import ElasticBertForSequenceClassification
from evaluations import evaluate_elue_entropy
from inferences import inference_elue_entropy
from load_data import load_and_cache_examples_elue
from elue import elue_output_modes, elue_processors
logger = logging.getLogger(__name__)
def get_metric_key(task_name):
if task_name == "sst-2":
return "acc"
elif task_name == "mrpc":
return "acc_and_f1"
elif task_name == "sts-b":
return "corr"
elif task_name == "imdb":
return "acc"
elif task_name == "snli":
return "acc"
elif task_name == "scitail":
return "acc"
else:
raise KeyError(task_name)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
if args.debug:
fitlog.debug()
if args.local_rank in [-1, 0]:
fitlog.set_log_dir(args.log_dir)
fitlog.add_hyper(args)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
if args.warmup_steps > 0:
num_warmup_steps = args.warmup_steps
else:
assert args.warmup_rate != 0.0
num_warmup_steps = args.warmup_rate * t_total
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if args.load is not None:
if os.path.exists(args.load):
# set global_step to gobal_step of last saved checkpoint from model path
global_step = int(args.load.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(
" Will skip the first %d steps in the first epoch",
steps_trained_in_current_epoch,
)
tr_loss, logging_loss, best = 0.0, 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained,
int(args.num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
)
set_seed(args) # Added here for reproductibility
metric_key = get_metric_key(args.task_name)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results_all = evaluate_elue_entropy(args, model, tokenizer)
evg_metric_all = 0
for i, results in enumerate(results_all):
res_for_display = {}
for k, v in results.items():
res_for_display[k.replace("-", "_")] = v
evg_metric_all += results_all[i][metric_key]
fitlog.add_metric({"exit_"+str(i): res_for_display}, step=global_step)
evg_metric_all /= args.num_output_layers
if evg_metric_all > best:
best = evg_metric_all
for i, results in enumerate(results_all):
fitlog.add_best_metric({"dev": {metric_key.replace("-", "_")+"_exit_"+str(i): results[metric_key]}})
eval_key = "eval_{}".format("avg"+metric_key)
logs[eval_key] = evg_metric_all
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
fitlog.add_loss(loss_scalar, name="Loss", step=global_step)
print(json.dumps({**logs, **{"step": global_step}}))
if args.local_rank in [-1,
0] and args.save_steps > 0 and global_step % args.save_steps == 0 and not args.not_save_model:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0] and not args.not_save_model and global_step % args.save_steps != 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.local_rank in [-1, 0]:
# tb_writer.close()
fitlog.finish()
return global_step, tr_loss / global_step, best
def main():
args = get_args()
if args.do_train:
args.log_dir = os.path.join(args.log_dir, args.task_name, "train")
else:
args.log_dir = os.path.join(args.log_dir, args.task_name, "eval")
if not os.path.exists(args.log_dir):
try:
os.makedirs(args.log_dir)
except:
pass
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in elue_processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = elue_processors[args.task_name]()
args.output_mode = elue_output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# if args.per_gpu_eval_batch_size != 1:
# raise ValueError("The eval batch size must be 1 with Dynamic inference.")
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
config = ElasticBertConfig.from_pretrained(
args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
num_hidden_layers=args.num_hidden_layers,
num_output_layers=args.num_output_layers,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = ElasticBertTokenizer.from_pretrained(
args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = ElasticBertForSequenceClassification.from_pretrained(
args.model_name_or_path,
config=config,
add_pooling_layer=True,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
print("Total Model Parameters:", sum(param.numel() for param in model.parameters()))
logger.info("Training/evaluation parameters %s", args)
train_dataset = None
if args.do_train:
train_dataset = load_and_cache_examples_elue(args, args.task_name, tokenizer, data_type='train')
global_step, tr_loss, _ = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
metric_key = get_metric_key(args.task_name)
entropy_list = [float(x) for x in args.early_exit_entropy.split(',')]
tokenizer = ElasticBertTokenizer.from_pretrained(args.model_name_or_path, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for entropy in entropy_list:
if args.debug:
fitlog.debug()
if args.local_rank in [-1, 0]:
fitlog.set_log_dir(args.log_dir, new_log=True)
fitlog.add_hyper(args)
fitlog.add_hyper(value=entropy, name='entropy')
best = 0.
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = ElasticBertForSequenceClassification.from_pretrained(checkpoint)
model.to(args.device)
print(f'Evaluation for checkpoint {prefix}')
result, speed_up, exit_layer_every_ins = evaluate_elue_entropy(args, model, tokenizer, prefix=prefix, eval_highway=True, entropy=entropy)
res_for_display = {}
for k, v in result.items():
res_for_display[k.replace("-", "_")] = v
fitlog.add_metric({"dev": res_for_display, "speed_up": speed_up}, step=int(global_step))
if result[metric_key] > best:
best = result[metric_key]
fitlog.add_best_metric({"dev": {metric_key.replace("-", "_"): result[metric_key], "speed_up": speed_up}})
output_eval_file = os.path.join(args.output_dir, "{}_{}_{}_{}.tsv".format(args.task_name, 'eval', 'exit_layer', str(entropy)))
with open(output_eval_file, "w", encoding='utf-8') as fout:
writer = csv.writer(fout, delimiter='\t', quotechar=None)
writer.writerow(["index", "exit_layer"])
for i, exit_layer in enumerate(exit_layer_every_ins[0]):
writer.writerow([i, exit_layer])
infer_speed_up = inference_elue_entropy(args, model, tokenizer, prefix=prefix, eval_highway=True, entropy=entropy)
fitlog.add_metric({"test_speed_up": infer_speed_up}, step=int(global_step))
fitlog.add_best_metric({"test": {"speed_up": infer_speed_up}})
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
if args.local_rank in [-1, 0]:
fitlog.finish()
return results
if __name__ == "__main__":
best = main()
| 20,558 | 39.954183 | 153 | py |
ElasticBERT | ElasticBERT-main/finetune-dynamic/evaluations.py | import logging
import os
import csv
import sys
sys.path.append('../')
import numpy as np
import torch
from torch.utils.data import DataLoader, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
from transformers import glue_compute_metrics
from elue import elue_compute_metrics
from load_data import (
load_and_cache_examples_glue,
load_and_cache_examples_elue,
)
logger = logging.getLogger(__name__)
def evaluate_glue_patience(args, model, tokenizer, prefix="", patience=0):
model.elasticbert.set_regression_threshold(args.regression_threshold)
model.elasticbert.set_patience(patience)
model.elasticbert.reset_stats()
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
results_all = []
exit_layer = []
for i in range(args.num_hidden_layers):
results_all.append({})
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples_glue(args, eval_task, tokenizer, data_type='dev')
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
preds_all = []
for i in range(args.num_hidden_layers):
preds_all.append(None)
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if out_label_ids is None:
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
if patience == 0:
for i, pred in enumerate(preds_all):
if pred is None:
preds_all[i] = logits[i].detach().cpu().numpy()
else:
preds_all[i] = np.append(pred, logits[i].detach().cpu().numpy(), axis=0)
else:
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
if patience == 0:
for i, pred in enumerate(preds_all):
preds_all[i] = np.argmax(pred, axis=1)
else:
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
if patience == 0:
for i, pred in enumerate(preds_all):
preds_all[i] = np.squeeze(pred)
else:
preds = np.squeeze(preds)
if patience == 0:
for i, pred in enumerate(preds_all):
result = glue_compute_metrics(eval_task, pred, out_label_ids)
results_all[i].update(result)
else:
result = glue_compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
print(" %s = %s" % (key, str(result[key])))
exiting_layer_every_ins = model.elasticbert.exiting_layer_every_ins
exit_layer.append(exiting_layer_every_ins)
if args.task_name == "mnli":
model.elasticbert.exiting_layer_every_ins = []
if patience != 0:
speed_up = model.elasticbert.log_stats()
return results, speed_up, exit_layer
return results_all
def evaluate_elue_patience(args, model, tokenizer, prefix="", patience=0):
model.elasticbert.set_regression_threshold(args.regression_threshold)
model.elasticbert.set_patience(patience)
model.elasticbert.reset_stats()
eval_task = args.task_name
eval_output_dir = args.output_dir
results = {}
results_all = []
exit_layer = []
for i in range(args.num_hidden_layers):
results_all.append({})
eval_dataset = load_and_cache_examples_elue(args, eval_task, tokenizer, data_type='dev')
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
preds_all = []
for i in range(args.num_hidden_layers):
preds_all.append(None)
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if out_label_ids is None:
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
if patience == 0:
for i, pred in enumerate(preds_all):
if pred is None:
preds_all[i] = logits[i].detach().cpu().numpy()
else:
preds_all[i] = np.append(pred, logits[i].detach().cpu().numpy(), axis=0)
else:
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
if patience == 0:
for i, pred in enumerate(preds_all):
preds_all[i] = np.argmax(pred, axis=1)
else:
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
if patience == 0:
for i, pred in enumerate(preds_all):
preds_all[i] = np.squeeze(pred)
else:
preds = np.squeeze(preds)
if patience == 0:
for i, pred in enumerate(preds_all):
result = elue_compute_metrics(eval_task, pred, out_label_ids)
results_all[i].update(result)
else:
result = elue_compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
print(" %s = %s" % (key, str(result[key])))
exiting_layer_every_ins = model.elasticbert.exiting_layer_every_ins
exit_layer.append(exiting_layer_every_ins)
if patience != 0:
speed_up = model.elasticbert.log_stats()
return results, speed_up, exit_layer
return results_all
def evaluate_glue_entropy(args, model, tokenizer, prefix="", eval_highway=False, entropy=0.):
model.elasticbert.set_early_exit_entropy(entropy)
model.elasticbert.set_eval_state(eval_highway)
model.elasticbert.reset_stats()
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
results_all = []
exit_layer = []
for i in range(args.num_hidden_layers):
results_all.append({})
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples_glue(args, eval_task, tokenizer, data_type='dev')
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
preds_all = []
for i in range(args.num_hidden_layers):
preds_all.append(None)
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[-1],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if out_label_ids is None:
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
if not eval_highway:
for i, pred in enumerate(preds_all):
if pred is None:
preds_all[i] = logits[i].detach().cpu().numpy()
else:
preds_all[i] = np.append(pred, logits[i].detach().cpu().numpy(), axis=0)
else:
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
if not eval_highway:
for i, pred in enumerate(preds_all):
preds_all[i] = np.argmax(pred, axis=1)
else:
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
if not eval_highway:
for i, pred in enumerate(preds_all):
preds_all[i] = np.squeeze(pred)
else:
preds = np.squeeze(preds)
if not eval_highway:
for i, pred in enumerate(preds_all):
result = glue_compute_metrics(eval_task, pred, out_label_ids)
results_all[i].update(result)
else:
result = glue_compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
print(" %s = %s" % (key, str(result[key])))
exiting_layer_every_ins = model.elasticbert.exiting_layer_every_ins
exit_layer.append(exiting_layer_every_ins)
if args.task_name == "mnli":
model.elasticbert.exiting_layer_every_ins = []
if eval_highway:
speed_up = model.elasticbert.log_stats()
return results, speed_up, exit_layer
return results_all
def evaluate_elue_entropy(args, model, tokenizer, prefix="", eval_highway=False, entropy=0.):
model.elasticbert.set_early_exit_entropy(entropy)
model.elasticbert.set_eval_state(eval_highway)
model.elasticbert.reset_stats()
eval_task = args.task_name
eval_output_dir = args.output_dir
results = {}
results_all = []
exit_layer = []
for i in range(args.num_hidden_layers):
results_all.append({})
eval_dataset = load_and_cache_examples_elue(args, eval_task, tokenizer, data_type='dev')
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
preds_all = []
for i in range(args.num_hidden_layers):
preds_all.append(None)
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[-1],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if out_label_ids is None:
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
if not eval_highway:
for i, pred in enumerate(preds_all):
if pred is None:
preds_all[i] = logits[i].detach().cpu().numpy()
else:
preds_all[i] = np.append(pred, logits[i].detach().cpu().numpy(), axis=0)
else:
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
if not eval_highway:
for i, pred in enumerate(preds_all):
preds_all[i] = np.argmax(pred, axis=1)
else:
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
if not eval_highway:
for i, pred in enumerate(preds_all):
preds_all[i] = np.squeeze(pred)
else:
preds = np.squeeze(preds)
if not eval_highway:
for i, pred in enumerate(preds_all):
result = elue_compute_metrics(eval_task, pred, out_label_ids)
results_all[i].update(result)
else:
result = elue_compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
print(" %s = %s" % (key, str(result[key])))
exiting_layer_every_ins = model.elasticbert.exiting_layer_every_ins
exit_layer.append(exiting_layer_every_ins)
if eval_highway:
speed_up = model.elasticbert.log_stats()
return results, speed_up, exit_layer
return results_all | 17,808 | 36.651163 | 118 | py |
ElasticBERT | ElasticBERT-main/finetune-dynamic/inferences.py | import os
import csv
import sys
import logging
sys.path.append('../')
import numpy as np
import torch
from torch.utils.data import DataLoader, SequentialSampler
from tqdm import tqdm
from transformers import glue_compute_metrics
from transformers import glue_processors
from elue import elue_compute_metrics, elue_processors
from load_data import (
load_and_cache_examples_glue,
load_and_cache_examples_elue,
)
logger = logging.getLogger(__name__)
def inference_elue_patience(args, model, tokenizer, prefix="", patience=0):
model.elasticbert.set_regression_threshold(args.regression_threshold)
model.elasticbert.set_patience(patience)
model.elasticbert.reset_stats()
eval_task = args.task_name
eval_output_dir = args.output_dir
processor = elue_processors[eval_task]()
label_list = processor.get_labels()
eval_dataset = load_and_cache_examples_elue(args, eval_task, tokenizer, data_type='test')
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running inference {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
preds = None
for batch in tqdm(eval_dataloader, desc="Infering"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
logits = outputs[0]
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
output_infer_file = os.path.join(eval_output_dir, "{}_{}.tsv".format(eval_task, str(patience)))
with open(output_infer_file, "w", encoding='utf-8') as fout:
writer = csv.writer(fout, delimiter='\t', quotechar=None)
writer.writerow(["index", "prediction"])
for i, pred in enumerate(preds):
if args.output_mode == "classification":
prediction = label_list[pred]
elif args.output_mode == "regression":
prediction = str(pred)
writer.writerow([i, prediction])
exiting_layer_every_ins = model.elasticbert.exiting_layer_every_ins
output_infer_file = os.path.join(eval_output_dir, "{}_{}_{}_{}.tsv".format(eval_task, 'infer', 'exit_layer', str(patience)))
with open(output_infer_file, "w", encoding='utf-8') as fout:
writer = csv.writer(fout, delimiter='\t', quotechar=None)
writer.writerow(["index", "exit_layer"])
for i, exit_layer in enumerate(exiting_layer_every_ins):
writer.writerow([i, exit_layer])
speed_up = model.elasticbert.log_stats()
return speed_up
def inference_elue_entropy(args, model, tokenizer, prefix="", eval_highway=False, entropy=0.):
model.elasticbert.set_early_exit_entropy(entropy)
model.elasticbert.set_eval_state(eval_highway)
model.elasticbert.reset_stats()
eval_task = args.task_name
eval_output_dir = args.output_dir
eval_dataset = load_and_cache_examples_elue(args, eval_task, tokenizer, data_type='test')
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
processor = elue_processors[eval_task]()
label_list = processor.get_labels()
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running inference {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
preds = None
for batch in tqdm(eval_dataloader, desc="Infering"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
logits = outputs[0]
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
output_infer_file = os.path.join(eval_output_dir, "{}_{}.tsv".format(eval_task, str(entropy)))
with open(output_infer_file, "w", encoding='utf-8') as fout:
writer = csv.writer(fout, delimiter='\t', quotechar=None)
writer.writerow(["index", "prediction"])
for i, pred in enumerate(preds):
if args.output_mode == "classification":
prediction = label_list[pred]
elif args.output_mode == "regression":
prediction = str(pred)
writer.writerow([i, prediction])
exiting_layer_every_ins = model.elasticbert.exiting_layer_every_ins
output_infer_file = os.path.join(eval_output_dir, "{}_{}_{}_{}.tsv".format(eval_task, 'infer', 'exit_layer', str(entropy)))
with open(output_infer_file, "w", encoding='utf-8') as fout:
writer = csv.writer(fout, delimiter='\t', quotechar=None)
writer.writerow(["index", "exit_layer"])
for i, exit_layer in enumerate(exiting_layer_every_ins):
writer.writerow([i, exit_layer])
speed_up = model.elasticbert.log_stats()
return speed_up
def inference_glue_patience(args, model, tokenizer, prefix="", patience=0):
model.elasticbert.set_regression_threshold(args.regression_threshold)
model.elasticbert.set_patience(patience)
model.elasticbert.reset_stats()
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples_glue(args, eval_task, tokenizer, data_type='test')
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
processor = glue_processors[eval_task]()
label_list = processor.get_labels()
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running inference {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
preds = None
for batch in tqdm(eval_dataloader, desc="Infering"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
logits = outputs[0]
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
output_infer_file = os.path.join(eval_output_dir, "{}_{}.tsv".format(eval_task, str(patience)))
with open(output_infer_file, "w", encoding='utf-8') as fout:
writer = csv.writer(fout, delimiter='\t', quotechar=None)
writer.writerow(["index", "prediction"])
for i, pred in enumerate(preds):
if args.output_mode == "classification":
prediction = label_list[pred]
elif args.output_mode == "regression":
prediction = str(pred)
writer.writerow([i, prediction])
exiting_layer_every_ins = model.elasticbert.exiting_layer_every_ins
output_infer_file = os.path.join(eval_output_dir, "{}_{}_{}_{}.tsv".format(eval_task, 'infer', 'exit_layer', str(patience)))
with open(output_infer_file, "w", encoding='utf-8') as fout:
writer = csv.writer(fout, delimiter='\t', quotechar=None)
writer.writerow(["index", "exit_layer"])
for i, exit_layer in enumerate(exiting_layer_every_ins):
writer.writerow([i, exit_layer])
if args.task_name == 'mnli':
model.elasticbert.exiting_layer_every_ins = []
speed_up = model.elasticbert.log_stats()
return speed_up
def inference_glue_entropy(args, model, tokenizer, prefix="", eval_highway=False, entropy=0.):
model.elasticbert.set_early_exit_entropy(entropy)
model.elasticbert.set_eval_state(eval_highway)
model.elasticbert.reset_stats()
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples_glue(args, eval_task, tokenizer, data_type='test')
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
processor = glue_processors[eval_task]()
label_list = processor.get_labels()
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running inference {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
preds = None
for batch in tqdm(eval_dataloader, desc="Infering"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
logits = outputs[0]
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
output_infer_file = os.path.join(eval_output_dir, "{}_{}.tsv".format(eval_task, str(entropy)))
with open(output_infer_file, "w", encoding='utf-8') as fout:
writer = csv.writer(fout, delimiter='\t', quotechar=None)
writer.writerow(["index", "prediction"])
for i, pred in enumerate(preds):
if args.output_mode == "classification":
prediction = label_list[pred]
elif args.output_mode == "regression":
prediction = str(pred)
writer.writerow([i, prediction])
exiting_layer_every_ins = model.elasticbert.exiting_layer_every_ins
output_infer_file = os.path.join(eval_output_dir, "{}_{}_{}_{}.tsv".format(eval_task, 'infer', 'exit_layer', str(entropy)))
with open(output_infer_file, "w", encoding='utf-8') as fout:
writer = csv.writer(fout, delimiter='\t', quotechar=None)
writer.writerow(["index", "exit_layer"])
for i, exit_layer in enumerate(exiting_layer_every_ins):
writer.writerow([i, exit_layer])
if args.task_name == 'mnli':
model.elasticbert.exiting_layer_every_ins = []
speed_up = model.elasticbert.log_stats()
return speed_up
| 14,047 | 38.240223 | 132 | py |
ElasticBERT | ElasticBERT-main/finetune-dynamic/run_elue_patience.py | import argparse
import csv
import glob
import json
import logging
import os
import random
import time
import sys
sys.path.append('../')
from arguments import get_args
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import fitlog
import transformers
from transformers import WEIGHTS_NAME
from transformers import BertTokenizer as ElasticBertTokenizer
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers.trainer_utils import is_main_process
from models.configuration_elasticbert import ElasticBertConfig
from models.modeling_elasticbert_patience import ElasticBertForSequenceClassification
from evaluations import evaluate_elue_patience
from inferences import inference_elue_patience
from load_data import load_and_cache_examples_elue
from elue import elue_output_modes, elue_processors
logger = logging.getLogger(__name__)
def get_metric_key(task_name):
if task_name == "sst-2":
return "acc"
elif task_name == "mrpc":
return "acc_and_f1"
elif task_name == "sts-b":
return "corr"
elif task_name == "imdb":
return "acc"
elif task_name == "snli":
return "acc"
elif task_name == "scitail":
return "acc"
else:
raise KeyError(task_name)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
if args.debug:
fitlog.debug()
if args.local_rank in [-1, 0]:
fitlog.set_log_dir(args.log_dir)
fitlog.add_hyper(args)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
if args.warmup_steps > 0:
num_warmup_steps = args.warmup_steps
else:
assert args.warmup_rate != 0.0
num_warmup_steps = args.warmup_rate * t_total
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if args.load is not None:
if os.path.exists(args.load):
# set global_step to gobal_step of last saved checkpoint from model path
global_step = int(args.load.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(
" Will skip the first %d steps in the first epoch",
steps_trained_in_current_epoch,
)
best_all_metric = {}
tr_loss, logging_loss, best = 0.0, 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained,
int(args.num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
)
set_seed(args) # Added here for reproductibility
metric_key = get_metric_key(args.task_name)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results_all = evaluate_elue_patience(args, model, tokenizer)
evg_metric_all = 0
for i, results in enumerate(results_all):
res_for_display = {}
for k, v in results.items():
res_for_display[k.replace("-", "_")] = v
evg_metric_all += results_all[i][metric_key]
fitlog.add_metric({"exit_"+str(i): res_for_display}, step=global_step)
evg_metric_all /= args.num_output_layers
if evg_metric_all > best:
best = evg_metric_all
for i, results in enumerate(results_all):
fitlog.add_best_metric({"dev": {metric_key.replace("-", "_")+"_exit_"+str(i): results[metric_key]}})
eval_key = "eval_{}".format("avg"+metric_key)
logs[eval_key] = evg_metric_all
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
fitlog.add_loss(loss_scalar, name="Loss", step=global_step)
print(json.dumps({**logs, **{"step": global_step}}))
if args.local_rank in [-1,
0] and args.save_steps > 0 and global_step % args.save_steps == 0 and not args.not_save_model:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0] and not args.not_save_model and global_step % args.save_steps != 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.local_rank in [-1, 0]:
# tb_writer.close()
fitlog.finish()
return global_step, tr_loss / global_step, best
def main():
args = get_args()
if args.do_train:
args.log_dir = os.path.join(args.log_dir, args.task_name, "train")
else:
args.log_dir = os.path.join(args.log_dir, args.task_name, "eval")
if not os.path.exists(args.log_dir):
try:
os.makedirs(args.log_dir)
except:
pass
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in elue_processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = elue_processors[args.task_name]()
args.output_mode = elue_output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# if args.per_gpu_eval_batch_size != 1:
# raise ValueError("The eval batch size must be 1 with Dynamic inference.")
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
config = ElasticBertConfig.from_pretrained(
args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
num_hidden_layers=args.num_hidden_layers,
num_output_layers=args.num_output_layers,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = ElasticBertTokenizer.from_pretrained(
args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = ElasticBertForSequenceClassification.from_pretrained(
args.model_name_or_path,
config=config,
add_pooling_layer=True,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
print("Total Model Parameters:", sum(param.numel() for param in model.parameters()))
# output_layers_param_num = sum(param.numel() for param in model.classifier.parameters())
# print("Output Layers Parameters:", output_layers_param_num)
logger.info("Training/evaluation parameters %s", args)
train_dataset = None
if args.do_train:
train_dataset = load_and_cache_examples_elue(args, args.task_name, tokenizer, data_type='train')
global_step, tr_loss, _ = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
metric_key = get_metric_key(args.task_name)
patience_list = [int(x) for x in args.patience.split(',')]
tokenizer = ElasticBertTokenizer.from_pretrained(args.model_name_or_path, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for patience in patience_list:
if args.debug:
fitlog.debug()
if args.local_rank in [-1, 0]:
fitlog.set_log_dir(args.log_dir, new_log=True)
fitlog.add_hyper(args)
fitlog.add_hyper(value=patience, name='patience')
best = 0.
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = ElasticBertForSequenceClassification.from_pretrained(checkpoint)
model.to(args.device)
print(f'Evaluation for checkpoint {prefix}')
result, speed_up, exit_layers = evaluate_elue_patience(args, model, tokenizer, prefix=prefix, patience=patience)
res_for_display = {}
for k, v in result.items():
res_for_display[k.replace("-", "_")] = v
fitlog.add_metric({"dev": res_for_display, "speed_up": speed_up}, step=int(global_step))
if result[metric_key] > best:
best = result[metric_key]
fitlog.add_best_metric({"dev": {metric_key.replace("-", "_"): result[metric_key], "speed_up": speed_up}})
output_eval_file = os.path.join(args.output_dir, "{}_{}_{}_{}.tsv".format(args.task_name, 'eval', 'exit_layer', str(patience)))
with open(output_eval_file, "w", encoding='utf-8') as fout:
writer = csv.writer(fout, delimiter='\t', quotechar=None)
writer.writerow(["index", "exit_layer"])
for i, exit_layer in enumerate(exit_layers[0]):
writer.writerow([i, exit_layer])
infer_speed_up = inference_elue_patience(args, model, tokenizer, prefix=prefix, patience=patience)
fitlog.add_metric({"test_speed_up": infer_speed_up}, step=int(global_step))
fitlog.add_best_metric({"test": {"speed_up": infer_speed_up}})
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
if args.local_rank in [-1, 0]:
fitlog.finish()
return results
if __name__ == "__main__":
best = main()
| 20,662 | 39.835968 | 147 | py |
ElasticBERT | ElasticBERT-main/finetune-dynamic/load_data.py | import os
import sys
import logging
sys.path.append('../')
import torch
from torch.utils.data import TensorDataset
from transformers import glue_convert_examples_to_features
from transformers import glue_output_modes
from transformers import glue_processors
from transformers.trainer_utils import is_main_process
from elue import (
elue_output_modes,
elue_processors,
elue_convert_examples_to_features,
)
logger = logging.getLogger(__name__)
def load_and_cache_examples_glue(args, task, tokenizer, data_type="train"):
if args.local_rank not in [-1, 0] and data_type == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = glue_processors[task]()
output_mode = glue_output_modes[task]
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}".format(
data_type,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if data_type == "train":
examples = processor.get_train_examples(args.data_dir)
elif data_type == "dev":
examples = processor.get_dev_examples(args.data_dir)
elif data_type == "test":
examples = processor.get_test_examples(args.data_dir)
else:
raise NotImplementedError
features = glue_convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not data_type == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_labels = None
if data_type != "test":
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
else:
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids)
return dataset
def load_and_cache_examples_elue(args, task, tokenizer, data_type="train"):
if args.local_rank not in [-1, 0] and data_type == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = elue_processors[task]()
output_mode = elue_output_modes[task]
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}".format(
data_type,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if data_type == "train":
examples = processor.get_train_examples(args.data_dir)
elif data_type == "dev":
examples = processor.get_dev_examples(args.data_dir)
elif data_type == "test":
examples = processor.get_test_examples(args.data_dir)
else:
raise NotImplementedError
features = elue_convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not data_type == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_labels = None
if data_type != "test":
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
else:
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids)
return dataset
| 6,173 | 39.618421 | 150 | py |
ElasticBERT | ElasticBERT-main/finetune-dynamic/models/modeling_elasticbert_entropy.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch ElasticBERT model for Early Exit with Entropy. """
import math
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import LayerNorm
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from transformers.utils import logging
from .configuration_elasticbert import ElasticBertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "fnlp/elasticbert-base"
_CONFIG_FOR_DOC = "ElasticBertConfig"
_TOKENIZER_FOR_DOC = "ElasticBertTokenizer"
ELASTICBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"fnlp/elasticbert-base",
"fnlp/elasticbert-large",
]
def entropy(x):
"""Calculate entropy of a pre-softmax logit Tensor"""
exp_x = torch.exp(x)
A = torch.sum(exp_x, dim=1) # sum of exp(x_i)
B = torch.sum(x * exp_x, dim=1) # sum of x_i * exp(x_i)
return torch.log(A) - B / A
class GradientRescaleFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight):
ctx.save_for_backward(input)
ctx.gd_scale_weight = weight
output = input
return output
@staticmethod
def backward(ctx, grad_outputs):
input = ctx.saved_tensors
grad_input = grad_weight = None
if ctx.needs_input_grad[0]:
grad_input = ctx.gd_scale_weight * grad_outputs
return grad_input, grad_weight
gradient_rescale = GradientRescaleFunction.apply
class ElasticBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.layernorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class ElasticBertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, config.hidden_size)
self.key = nn.Linear(config.hidden_size, config.hidden_size)
self.value = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in ElasticBertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class ElasticBertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class ElasticBertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = ElasticBertSelfAttention(config)
self.output = ElasticBertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class ElasticBertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class ElasticBertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class ElasticBertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = ElasticBertAttention(config)
self.intermediate = ElasticBertIntermediate(config)
self.output = ElasticBertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
):
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class ElasticBertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class ElasticBertEncoder(nn.Module):
def __init__(self, config, add_pooling_layer=None):
super().__init__()
self.config = config
self.add_pooling_layer = add_pooling_layer
self.num_output_layers = config.num_output_layers
self.num_hidden_layers = config.num_hidden_layers
self.max_output_layers = config.max_output_layers
self.layer = nn.ModuleList([ElasticBertLayer(config) for _ in range(config.num_hidden_layers)])
assert self.num_output_layers <= self.num_hidden_layers, \
"The total number of layers must be be greater than or equal to the number of the output layers. "
self.start_output_layer = None
self.current_pooler_num = None
if self.num_output_layers > 1:
self.start_output_layer = self.num_hidden_layers - self.num_output_layers
start_pooler_num = self.start_output_layer
end_pooler_num = self.num_hidden_layers - 1
if add_pooling_layer:
self.pooler = nn.ModuleList([ElasticBertPooler(config) if i >= start_pooler_num and \
i <= end_pooler_num else None for i in range(self.max_output_layers)])
elif self.num_output_layers == 1:
self.current_pooler_num = self.num_hidden_layers - 1
if add_pooling_layer:
self.pooler = nn.ModuleList([ElasticBertPooler(config) if i == self.current_pooler_num \
else None for i in range(self.max_output_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
final_pooled_output = None
output_sequence_outputs = () if self.num_output_layers > 1 else None
output_pooled_outputs = () if self.num_output_layers > 1 else None
for i, layer_module in enumerate(self.layer):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
output_attentions,
)
hidden_states = layer_outputs[0]
if self.num_output_layers > 1:
if i >= self.start_output_layer:
if self.training:
hidden_states = gradient_rescale(hidden_states, 1.0 / (self.num_hidden_layers - i))
output_sequence_outputs += (hidden_states, )
if self.add_pooling_layer:
pooled_output = self.pooler[i](hidden_states)
output_pooled_outputs += (pooled_output, )
else:
output_pooled_outputs += (hidden_states[:, 0], )
if self.training:
hidden_states = gradient_rescale(hidden_states, (self.num_hidden_layers - i -1))
elif self.num_output_layers == 1:
if i == self.num_hidden_layers - 1:
if self.add_pooling_layer:
final_pooled_output = self.pooler[self.current_pooler_num](hidden_states)
else:
final_pooled_output = hidden_states[:, 0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return tuple(
v
for v in [
hidden_states,
output_sequence_outputs,
output_pooled_outputs,
final_pooled_output,
all_hidden_states,
all_self_attentions,
]
if v is not None
)
def adaptive_forward(
self,
hidden_states=None,
current_layer=None,
attention_mask=None,
):
layer_outputs = self.layer[current_layer](
hidden_states,
attention_mask,
output_attentions=False,
)
hidden_states = layer_outputs[0]
if self.training:
hidden_states = gradient_rescale(hidden_states, 1.0 / (self.num_hidden_layers - current_layer))
pooled_output = None
if self.add_pooling_layer:
pooled_output = self.pooler[current_layer](
hidden_states,
)
return hidden_states, pooled_output
class ElasticBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ElasticBertConfig
base_model_prefix = "elasticbert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
ELASTICBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~ElasticBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
ELASTICBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
"""
@add_start_docstrings(
"The bare ElasticBert Model transformer outputting raw hidden-states without any specific head on top.",
ELASTICBERT_START_DOCSTRING,
)
class ElasticBertModel(ElasticBertPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.add_pooling_layer = add_pooling_layer
self.num_output_layers = config.num_output_layers
self.num_hidden_layers = config.num_hidden_layers
self.max_output_layers = config.max_output_layers
self.embeddings = ElasticBertEmbeddings(config)
self.encoder = ElasticBertEncoder(config, add_pooling_layer=add_pooling_layer)
self.init_weights()
self.eval_highway = False
self.inference_instances_num = 0
self.inference_layers_num = 0
self.exiting_layer_every_ins = []
def reset_stats(self):
self.inference_instances_num = 0
self.inference_layers_num = 0
self.exiting_layer_every_ins = []
def set_eval_state(self, eval_highway=False):
self.eval_highway = eval_highway
def set_early_exit_entropy(self, x):
self.early_exit_entropy = x
def log_stats(self):
avg_inf_layers = self.inference_layers_num / self.inference_instances_num
speed_up = self.config.num_hidden_layers / avg_inf_layers
message = f'*** Early_exit_entropy = {self.early_exit_entropy} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up = {speed_up:.2f} ***'
print(message)
return speed_up
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(ELASTICBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
output_dropout=None,
output_layers=None,
output_attentions=None,
output_hidden_states=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
encoder_outputs = embedding_output
if self.training:
res = []
for i in range(self.num_hidden_layers):
encoder_outputs, pooled_output = self.encoder.adaptive_forward(
encoder_outputs,
current_layer=i,
attention_mask=extended_attention_mask,
)
logits = None
if self.add_pooling_layer:
assert pooled_output is not None
logits = output_layers[i](output_dropout(pooled_output))
else:
assert pooled_output is None
logits = output_layers[i](output_dropout(encoder_outputs[:, 0]))
encoder_outputs = gradient_rescale(encoder_outputs, (self.num_hidden_layers - i -1))
res.append(logits)
assert len(res) == self.num_output_layers
elif not self.eval_highway:
encoder_outputs = self.encoder(
encoder_outputs,
attention_mask=extended_attention_mask,
)
pooled_outputs = encoder_outputs[2]
assert len(pooled_outputs) == len(output_layers)
res = []
for i, pooled_output in enumerate(pooled_outputs):
logit = output_layers[i](pooled_output)
res.append(logit)
else:
middle_result = None
calculated_layer_num = 0
highway_entropy = None
for i in range(self.num_hidden_layers):
calculated_layer_num += 1
encoder_outputs, pooled_output = self.encoder.adaptive_forward(
encoder_outputs,
current_layer=i,
attention_mask=extended_attention_mask,
)
logits = None
if self.add_pooling_layer:
assert pooled_output is not None
logits = output_layers[i](pooled_output)
else:
assert pooled_output is None
logits = output_layers[i](encoder_outputs[:, 0])
middle_result = logits
highway_entropy = entropy(logits)
if highway_entropy < self.early_exit_entropy:
self.exiting_layer_every_ins.append(i + 1)
break
res = [middle_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
if highway_entropy >= self.early_exit_entropy:
self.exiting_layer_every_ins.append(self.num_hidden_layers)
return res
@add_start_docstrings(
"""
ElasticBert Model transformer with a sequence classification/regression head on top
(a linear layer on top of the pooled output) e.g. for GLUE tasks.
""",
ELASTICBERT_START_DOCSTRING,
)
class ElasticBertForSequenceClassification(ElasticBertPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.num_labels = config.num_labels
self.add_pooling_layer = add_pooling_layer
self.elasticbert = ElasticBertModel(config, add_pooling_layer=add_pooling_layer)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifiers = nn.ModuleList([nn.Linear(config.hidden_size, self.config.num_labels) for _ in range(config.num_output_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(ELASTICBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
logits = self.elasticbert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_dropout=self.dropout,
output_layers=self.classifiers,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if not self.elasticbert.eval_highway:
outputs = (logits, )
else:
outputs = (logits[-1], )
if labels is not None:
total_loss = None
for ix, logits_item in enumerate(logits):
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits_item.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits_item.view(-1, self.num_labels), labels.view(-1))
if total_loss is None:
total_loss = loss
else:
total_loss += loss
outputs = (total_loss, ) + outputs
return outputs
| 34,058 | 39.838129 | 146 | py |
ElasticBERT | ElasticBERT-main/finetune-dynamic/models/modeling_elasticbert_patience.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch ElasticBERT model for Early Exit with Patience. """
import math
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import LayerNorm
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from transformers.utils import logging
from .configuration_elasticbert import ElasticBertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "fnlp/elasticbert-base"
_CONFIG_FOR_DOC = "ElasticBertConfig"
_TOKENIZER_FOR_DOC = "ElasticBertTokenizer"
ELASTICBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"fnlp/elasticbert-base",
"fnlp/elasticbert-large",
]
class GradientRescaleFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight):
ctx.save_for_backward(input)
ctx.gd_scale_weight = weight
output = input
return output
@staticmethod
def backward(ctx, grad_outputs):
input = ctx.saved_tensors
grad_input = grad_weight = None
if ctx.needs_input_grad[0]:
grad_input = ctx.gd_scale_weight * grad_outputs
return grad_input, grad_weight
gradient_rescale = GradientRescaleFunction.apply
class ElasticBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.layernorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class ElasticBertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, config.hidden_size)
self.key = nn.Linear(config.hidden_size, config.hidden_size)
self.value = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in ElasticBertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class ElasticBertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class ElasticBertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = ElasticBertSelfAttention(config)
self.output = ElasticBertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class ElasticBertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class ElasticBertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class ElasticBertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = ElasticBertAttention(config)
self.intermediate = ElasticBertIntermediate(config)
self.output = ElasticBertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
):
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class ElasticBertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class ElasticBertEncoder(nn.Module):
def __init__(self, config, add_pooling_layer=None):
super().__init__()
self.config = config
self.add_pooling_layer = add_pooling_layer
self.num_output_layers = config.num_output_layers
self.num_hidden_layers = config.num_hidden_layers
self.max_output_layers = config.max_output_layers
self.layer = nn.ModuleList([ElasticBertLayer(config) for _ in range(config.num_hidden_layers)])
assert self.num_output_layers <= self.num_hidden_layers, \
"The total number of layers must be be greater than or equal to the number of the output layers. "
self.start_output_layer = None
self.current_pooler_num = None
if self.num_output_layers > 1:
self.start_output_layer = self.num_hidden_layers - self.num_output_layers
start_pooler_num = self.start_output_layer
end_pooler_num = self.num_hidden_layers - 1
if add_pooling_layer:
self.pooler = nn.ModuleList([ElasticBertPooler(config) if i >= start_pooler_num and \
i <= end_pooler_num else None for i in range(self.max_output_layers)])
elif self.num_output_layers == 1:
self.current_pooler_num = self.num_hidden_layers - 1
if add_pooling_layer:
self.pooler = nn.ModuleList([ElasticBertPooler(config) if i == self.current_pooler_num \
else None for i in range(self.max_output_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
output_sequence_outputs = () if self.num_output_layers > 1 else None
output_pooled_outputs = () if self.num_output_layers > 1 else None
final_pooled_output = None
for i, layer_module in enumerate(self.layer):
if getattr(self.config, "gradient_checkpointing", False) and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
output_attentions,
)
hidden_states = layer_outputs[0]
if self.num_output_layers > 1:
if i >= self.start_output_layer:
if self.training:
hidden_states = gradient_rescale(hidden_states, 1.0 / (self.num_hidden_layers - i))
output_sequence_outputs += (hidden_states, )
if self.add_pooling_layer:
pooled_output = self.pooler[i](hidden_states)
output_pooled_outputs += (pooled_output, )
else:
output_pooled_outputs += (hidden_states[:, 0], )
if self.training:
hidden_states = gradient_rescale(hidden_states, (self.num_hidden_layers - i -1))
elif self.num_output_layers == 1:
if i == self.num_hidden_layers - 1:
if self.add_pooling_layer:
final_pooled_output = self.pooler[self.current_pooler_num](hidden_states)
else:
final_pooled_output = hidden_states[:, 0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return tuple(
v
for v in [
hidden_states,
output_sequence_outputs,
output_pooled_outputs,
final_pooled_output,
all_hidden_states,
all_self_attentions,
]
if v is not None
)
def adaptive_forward(
self,
hidden_states=None,
current_layer=None,
attention_mask=None,
):
layer_outputs = self.layer[current_layer](
hidden_states,
attention_mask,
output_attentions=False,
)
hidden_states = layer_outputs[0]
if self.training:
hidden_states = gradient_rescale(hidden_states, 1.0 / (self.num_hidden_layers - current_layer))
pooled_output = None
if self.add_pooling_layer:
pooled_output = self.pooler[current_layer](
hidden_states,
)
return hidden_states, pooled_output
class ElasticBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ElasticBertConfig
base_model_prefix = "elasticbert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
ELASTICBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~ElasticBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
ELASTICBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
"""
@add_start_docstrings(
"The bare ElasticBert Model transformer outputting raw hidden-states without any specific head on top.",
ELASTICBERT_START_DOCSTRING,
)
class ElasticBertModel(ElasticBertPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.add_pooling_layer = add_pooling_layer
self.num_output_layers = config.num_output_layers
self.num_hidden_layers = config.num_hidden_layers
self.max_output_layers = config.max_output_layers
self.embeddings = ElasticBertEmbeddings(config)
self.encoder = ElasticBertEncoder(config, add_pooling_layer=add_pooling_layer)
self.init_weights()
self.patience = 0
self.inference_instances_num = 0
self.inference_layers_num = 0
self.exiting_layer_every_ins = []
self.regression_threshold = 0
def set_regression_threshold(self, threshold):
self.regression_threshold = threshold
def set_patience(self, patience):
self.patience = patience
def reset_stats(self):
self.inference_instances_num = 0
self.inference_layers_num = 0
self.exiting_layer_every_ins = []
def log_stats(self):
avg_inf_layers = self.inference_layers_num / self.inference_instances_num
speed_up = self.config.num_hidden_layers / avg_inf_layers
message = f'*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up = {speed_up:.2f} ***'
print(message)
return speed_up
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(ELASTICBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
output_dropout=None,
output_layers=None,
regression=False,
output_attentions=None,
output_hidden_states=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
encoder_outputs = embedding_output
if self.training:
res = []
for i in range(self.num_hidden_layers):
encoder_outputs, pooled_output = self.encoder.adaptive_forward(
encoder_outputs,
current_layer=i,
attention_mask=extended_attention_mask,
)
logits = None
if self.add_pooling_layer:
assert pooled_output is not None
logits = output_layers[i](output_dropout(pooled_output))
else:
assert pooled_output is None
logits = output_layers[i](output_dropout(encoder_outputs[:, 0]))
encoder_outputs = gradient_rescale(encoder_outputs, (self.num_hidden_layers - i -1))
res.append(logits)
assert len(res) == self.num_output_layers
elif self.patience == 0:
encoder_outputs = self.encoder(
encoder_outputs,
attention_mask=extended_attention_mask,
)
pooled_outputs = encoder_outputs[2]
assert len(pooled_outputs) == len(output_layers)
res = []
for i, pooled_output in enumerate(pooled_outputs):
logit = output_layers[i](pooled_output)
res.append(logit)
else:
patient_counter = 0
patient_result = None
calculated_layer_num = 0
for i in range(self.num_hidden_layers):
calculated_layer_num += 1
encoder_outputs, pooled_output = self.encoder.adaptive_forward(
encoder_outputs,
current_layer=i,
attention_mask=extended_attention_mask,
)
logits = None
if self.add_pooling_layer:
assert pooled_output is not None
logits = output_layers[i](pooled_output)
else:
assert pooled_output is None
logits = output_layers[i](encoder_outputs[:, 0])
if regression:
labels = logits.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
patient_counter = 0
else:
labels = logits.detach().argmax(dim=1)
if patient_result is not None:
patient_labels = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(patient_labels)):
patient_counter += 1
else:
patient_counter = 0
patient_result = logits
if patient_counter == self.patience:
self.exiting_layer_every_ins.append(i + 1)
break
res = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
if patient_counter != self.patience:
self.exiting_layer_every_ins.append(self.num_hidden_layers)
return res
@add_start_docstrings(
"""
ElasticBert Model transformer with a sequence classification/regression head on top
(a linear layer on top of the pooled output) e.g. for GLUE tasks.
""",
ELASTICBERT_START_DOCSTRING,
)
class ElasticBertForSequenceClassification(ElasticBertPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.num_labels = config.num_labels
self.add_pooling_layer = add_pooling_layer
self.elasticbert = ElasticBertModel(config, add_pooling_layer=add_pooling_layer)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifiers = nn.ModuleList([nn.Linear(config.hidden_size, self.config.num_labels) for _ in range(config.num_output_layers)])
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
logits = self.elasticbert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_dropout=self.dropout,
output_layers=self.classifiers,
regression=self.num_labels==1,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if self.elasticbert.patience == 0:
outputs = (logits, )
else:
outputs = (logits[-1], )
if labels is not None:
total_loss = None
for ix, logits_item in enumerate(logits):
if self.num_labels == 1:
# We are doing regression
loss_fct = nn.MSELoss()
loss = loss_fct(logits_item.view(-1), labels.view(-1))
else:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits_item.view(-1, self.num_labels), labels.view(-1))
if total_loss is None:
total_loss = loss
else:
total_loss += loss
outputs = (total_loss, ) + outputs
return outputs
| 34,299 | 40.028708 | 138 | py |
N-JetNet | N-JetNet-main/demo.py | from __future__ import print_function
import argparse
import os
import shutil
import time
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from pytorch_classification.train_and_test import *
from pytorch_classification.dataset import *
import models as models
# For tensorboard
from torch.utils.tensorboard import SummaryWriter
import torchvision
# Load the model names
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser()
# N-Jet options
parser.add_argument('--srf-init-k', default=2.0, type=float,
help='spatial extent of the filters')
parser.add_argument('--srf-init-scale', default=0.0, type=float,
help='the initial scale, if not learned')
parser.add_argument('--srf-init-order', default=4.0, type=float,
help='the order of the approximation')
parser.add_argument('--srf-learn-sigma', action='store_true', default=True,
help='If the scale/sigma are fixed or learned.')
# Optimization options
parser.add_argument('--epochs', default=90, type=int,
help='The number of epochs.')
parser.add_argument('--start-epoch', default=0, type=int,
help='Epoch for restarting runs.')
parser.add_argument('--schedule', type=int, nargs='+', default=[30, 60],
help='Epoch at which the learning rate decreases.')
parser.add_argument('--lr', default=0.1, type=float,
help='Initial learning rate.')
parser.add_argument('--train-batch', default=128, type=int,
help='Training batch size.')
parser.add_argument('--test-batch', default=100, type=int,
help='Test batch size.')
parser.add_argument('--momentum', default=0.9, type=float,
help='Optimizer momentum.')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
help='Weight regularization.')
parser.add_argument('--optim', default='sgd', type=str,
help='The optimizer.')
parser.add_argument('--gamma', type=float, default=0.1,
help='LR is multiplied by gamma on schedule.')
# Checkpoints
parser.add_argument('--checkpoint', default='checkpoints', type=str,
help='Path where to save the checkpoint.')
parser.add_argument('--resume', default='', type=str,
help='Path to load the checkpoint.')
# Architecture and data
parser.add_argument('--dataset', default='cifar10', type=str,
help='The name of the dataset.')
parser.add_argument('--arch', default='nin', choices=model_names,
help='Model architecture: ' +
' | '.join(model_names) + ' (default: nin)')
# Seed
parser.add_argument('--manualSeed', type=int, default=0, help='Manual seed')
parser.add_argument('--evaluate', action='store_true',
help='Only evaluate the model.')
parser.add_argument('--workers', default=4, type=int,
help='The number of data loading workers.')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
assert args.dataset == 'cifar10' or args.dataset == 'cifar100',\
'Dataset can only be cifar10 or cifar100.'
# Use CUDA
use_cuda = torch.cuda.is_available()
# Random seed
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if use_cuda:
torch.cuda.manual_seed_all(args.manualSeed)
""" Save the checkpoint """
def save_checkpoint(state, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))
""" Adjust the learning rate at the wanted epochs """
def adjust_learning_rate(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr'] = state['lr']
""" The main training loop. """
def main():
start_epoch = args.start_epoch
if not os.path.isdir(args.checkpoint):
os.mkdir(os.path.normpath(args.checkpoint))
# For tensorboard
writer = SummaryWriter(args.checkpoint)
# Create data splits:
trainset = dataCIFAR(args.dataset, args.train_batch, train=True, val=False,
workers=args.workers)
valset = dataCIFAR(args.dataset, args.train_batch, train=True, val=True,
workers=args.workers)
testset = dataCIFAR(args.dataset, args.test_batch, train=False, val=False,
workers=args.workers)
# Loading the model
if args.arch.endswith('nin'):
model = models.__dict__[args.arch](
num_classes=trainset.num_classes)
elif args.arch.endswith('nin_shared_srf'):
model = models.__dict__[args.arch](
num_classes=trainset.num_classes,
init_k=args.srf_init_k,
init_order=args.srf_init_order,
init_scale=args.srf_init_scale,
learn_sigma=args.srf_learn_sigma,
use_cuda=use_cuda)
else:
print("Model not implemented")
# Get model summary to list the number of parameters.
summary(model, input_size=(1, 3, 32, 32))
model = torch.nn.DataParallel(model)
if use_cuda:
model = model.cuda()
cudnn.benchmark = True
# Define the criterion and the optimizer
criterion = nn.CrossEntropyLoss()
if args.optim.endswith('sgd'):
optimizer = optim.SGD(model.parameters(), lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
elif args.optim.endswith('adam'):
optimizer = optim.Adam(model.parameters(), lr=args.lr,
weight_decay=args.weight_decay)
# Resume the training from he checkpoint
title = args.dataset + args.arch
if args.resume:
print('==> Resuming from checkpoint..', args.resume)
assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
args.checkpoint = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
# If we only want to evaluate a pretrained model
if args.evaluate:
print('\nEvaluation only')
test_loss, test_acc = test(testset, model, criterion, start_epoch, use_cuda)
print(' Test Loss: %.8f, Test Acc: %.2f' % (test_loss, test_acc))
return
# Train and validation
best_acc = 0
for epoch in range(start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
train_loss, train_acc = train(trainset, model, criterion, optimizer, epoch, use_cuda, writer, args)
val_loss, val_acc = test(valset, model, criterion, epoch, use_cuda, args)
# Logging the data
writer.add_scalar("Train/Loss", train_loss, epoch)
writer.add_scalar("Val/Loss", val_loss, epoch)
writer.add_scalar("Train/acc", train_acc, epoch)
writer.add_scalar("Val/acc", val_acc, epoch)
# Saving model
is_best = val_acc > best_acc
best_acc = max(val_acc, best_acc)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'acc': val_acc,
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
}, is_best, checkpoint=args.checkpoint)
test_loss, test_acc = test(testset, model, criterion, start_epoch, use_cuda, args)
print(' Final test loss: %.8f, test Acc: %.2f' % (test_loss, test_acc))
writer.flush()
writer.close()
if __name__ == '__main__':
main()
| 8,426 | 37.834101 | 107 | py |
N-JetNet | N-JetNet-main/models/nin.py | import torch.nn as nn
import math
import torch.nn.functional as F
__all__ = ['nin']
class NiN(nn.Module):
def __init__(self,
num_classes):
super(NiN, self).__init__()
self.classifier = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=5, stride=1, padding=2, \
bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.Conv2d(192, 160, kernel_size=1, stride=1, padding=0, \
bias=False),
nn.BatchNorm2d(160),
nn.ReLU(inplace=True),
nn.Conv2d(160, 96, kernel_size=1, stride=1, padding=0, \
bias=False),
nn.BatchNorm2d(96),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.Dropout(0.5),
nn.Conv2d(96, 192, kernel_size=5, stride=1, padding=2, \
bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.Conv2d(192, 192, kernel_size=1, stride=1, padding=0, \
bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.Conv2d(192, 192, kernel_size=1, stride=1, padding=0, \
bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.AvgPool2d(kernel_size=3, stride=2, padding=1),
nn.Dropout(0.5),
nn.Conv2d(192, 192, kernel_size=3, stride=1, padding=1, \
bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.Conv2d(192, 192, kernel_size=1, stride=1, padding=0, \
bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.Conv2d(192, num_classes, kernel_size=1, stride=1, \
padding=0, bias=False),
nn.BatchNorm2d(num_classes),
nn.ReLU(inplace=True),
nn.AvgPool2d(kernel_size=8, stride=1, padding=0)
)
self.num_classes = num_classes
self.extra_reg = 0
def forward(self, x):
x = self.classifier(x)
x = x.view(x.size(0), self.num_classes)
return x
def nin(**kwargs):
"""
Constructs a standard NiN (M. Lin et. al., ICLR 2014).
"""
return NiN(**kwargs)
| 2,535 | 31.512821 | 73 | py |
N-JetNet | N-JetNet-main/models/nin_shared_srf.py | import torch.nn as nn
import math
from srf.structured_conv_layer import *
__all__ = ['nin_shared_srf']
class NiN_shared_srf(nn.Module):
def __init__(self,
num_classes,
init_k,
init_order,
init_scale,
learn_sigma,
use_cuda):
super(NiN_shared_srf, self).__init__()
""" In our model we removed all padding and pooling """
self.srf1 = Srf_layer_shared(
inC=3,
outC=192,
init_k=init_k,
init_order=init_order,
init_scale=init_scale,
learn_sigma=learn_sigma,
use_cuda=use_cuda)
self.classifier1 = nn.Sequential(
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.Conv2d(192, 160, kernel_size=1, stride=1, padding=0, \
bias=False),
nn.BatchNorm2d(160),
nn.ReLU(inplace=True),
nn.Conv2d(160, 96, kernel_size=1, stride=1, padding=0, \
bias=False),
nn.BatchNorm2d(96),
nn.ReLU(inplace=True),
nn.Dropout(0.5)
)
self.srf2 = Srf_layer_shared(
inC=96,
outC=192,
init_k=init_k,
init_order=init_order,
init_scale=init_scale,
learn_sigma=learn_sigma,
use_cuda=use_cuda)
self.classifier2 = nn.Sequential(
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.Conv2d(192, 192, kernel_size=1, stride=1, padding=0, \
bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.Conv2d(192, 192, kernel_size=1, stride=1, padding=0, \
bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.Dropout(0.5)
)
self.srf3 = Srf_layer_shared(
inC=192,
outC=192,
init_k=init_k,
init_order=init_order,
init_scale=init_scale,
learn_sigma=learn_sigma,
use_cuda=use_cuda)
self.classifier3 = nn.Sequential(
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.Conv2d(192, 192, kernel_size=1, stride=1, padding=0, \
bias=False),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.Conv2d(192, num_classes, kernel_size=1, stride=1, padding=0, \
bias=False),
nn.BatchNorm2d(num_classes),
nn.ReLU(inplace=True),
)
self.num_classes = num_classes
self.extra_reg = 0
def forward(self, x):
x = self.srf1(x)
x = self.classifier1(x)
x = self.srf2(x)
x = self.classifier2(x)
x = self.srf3(x)
x = self.classifier3(x)
self.extra_reg = self.srf1.extra_reg + self.srf2.extra_reg + \
self.srf3.extra_reg
x = torch.mean(x, dim=(2,3))
x = x.view(x.size(0), self.num_classes)
return x
def nin_shared_srf(**kwargs):
"""
Constructs a NiN with N-Jet layers.
"""
return NiN_shared_srf(**kwargs)
| 3,704 | 31.787611 | 81 | py |
N-JetNet | N-JetNet-main/pytorch_classification/dataset.py | import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.utils.data as data
import random
import numpy as np
class dataCIFAR:
def __init__(self, dataset, batch, train=True, val=True, workers=4):
if val==True: assert(train==True)
if dataset == 'cifar10':
dataloader = datasets.CIFAR10
self.num_classes = 10
else:
dataloader = datasets.CIFAR100
self.num_classes = 100
# Data transformations
if train:
transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
else:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
dataset = dataloader(root='./data', train=train, download=True,
transform=transform)
# Define some random indices for training and val splits
if train:
indices = list(range(len(dataset)))
np.random.RandomState(10).shuffle(indices)
if val:
indices = indices[:len(indices) // 10]
self.loader = data.DataLoader(dataset, batch_size=batch,
sampler=data.sampler.SubsetRandomSampler(indices),
num_workers=workers)
else:
indices = indices[len(indices) // 10:]
self.loader = data.DataLoader(dataset, batch_size=batch,
sampler=data.sampler.SubsetRandomSampler(indices),
num_workers=workers)
else:
self.loader = data.DataLoader(dataset, batch_size=batch,
shuffle=train,
num_workers=workers)
| 2,119 | 35.551724 | 89 | py |
N-JetNet | N-JetNet-main/pytorch_classification/train_and_test.py | from __future__ import print_function
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from pytorch_classification.utils.eval import AverageMeter, accuracy
from torchinfo import summary
# For tensorboard
from torch.utils.tensorboard import SummaryWriter
import torchvision
""" The training loop.
Input:
- trainset: train data with loader for the training,
- model: the network model,
- criterion: the loss criterion,
- optimizer: the optimizer used,
- epoch: the epoch used,
- use_cuda: use the GPU or not,
- writer: the tensorboard logger
"""
def train(trainset, model, criterion, optimizer, epoch, use_cuda, writer, args):
""" Use training mode"""
model.train()
""" Averaged training estimates"""
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
""" Loop over the training data """
for batch_idx, (inputs, targets) in enumerate(trainset.loader):
try:
inputs, targets = inputs.cuda(), targets.cuda()
except:
print("No cuda available")
inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets)
""" Measure data loading time """
data_time.update(time.time() - end)
""" Get predictions and loss"""
outputs = model(inputs)
loss = criterion(outputs, targets) + args.weight_decay * model.module.extra_reg
""" Measure accuracy and loss """
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
losses.update(loss.data.cpu().detach().numpy(), inputs.size(0))
top1.update(prec1, inputs.size(0))
top5.update(prec5, inputs.size(0))
""" Backward pass"""
optimizer.zero_grad()
loss.backward()
optimizer.step()
""" Measure the time for 1 training step"""
batch_time.update(time.time() - end)
end = time.time()
if batch_idx%50==0:
print("({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | "\
.format(
batch=batch_idx + 1,
size=len(trainset.loader),
data=data_time.avg,
bt=batch_time.avg)+
"Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}"\
.format(
loss=losses.avg,
top1=top1.avg,
top5=top5.avg))
return (losses.avg, top1.avg)
""" Running the test loop.
Input:
- testset: test set with data loader,
- model: the networks
- criterion: the loss criterion,
- epoch: the current epoch number,
- use_cuda: on GPU or not
"""
def test(testset, model, criterion, epoch, use_cuda, args):
""" Set the model to evaluate mode """
model.eval()
""" Averaged training estimates"""
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
""" Loop over the test data batches"""
end = time.time()
for batch_idx, (inputs, targets) in enumerate(testset.loader):
try:
inputs, targets = inputs.cuda(), targets.cuda()
except:
print("No cuda available")
inputs, targets = torch.autograd.Variable(inputs, volatile=True), torch.autograd.Variable(targets)
""" Measure the data loading time """
data_time.update(time.time() - end)
""" Get network predictions and loss"""
outputs = model(inputs)
loss = criterion(outputs, targets)
""" Estimate accuracy and loss """
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
losses.update(loss.data.cpu().detach().numpy(), inputs.size(0))
top1.update(prec1, inputs.size(0))
top5.update(prec5, inputs.size(0))
""" Measure time per batch at test time """
batch_time.update(time.time() - end)
end = time.time()
if batch_idx%20==0:
print("({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | "\
.format(
batch=batch_idx + 1,
size=len(testset.loader),
data=data_time.avg,
bt=batch_time.avg)+
"Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}"\
.format(
loss=losses.avg,
top1=top1.avg,
top5=top5.avg))
return (losses.avg, top1.avg)
| 4,838 | 31.26 | 106 | py |
N-JetNet | N-JetNet-main/pytorch_classification/utils/eval.py | from __future__ import print_function, absolute_import
__all__ = ['accuracy']
"""
From https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0).cpu().detach().numpy()
res.append(correct_k * (100.0 / batch_size))
return res
| 967 | 24.473684 | 81 | py |
N-JetNet | N-JetNet-main/srf/structured_conv_layer.py | # Import general dependencies
import numpy as np
import math
import torch
from torch.autograd import Variable
import torch.nn as nn
from torchvision import transforms
from torch.autograd import Function
from torch.distributions import normal
from srf.gaussian_basis_filters import *
import torch.nn.functional as F
import time
""" The N-Jet convolutional-layer using a linear combination of
Gaussian derivative filters.
Inputs:
- inC: input channels
- outC: output channels
- init_k: the spatial extent of the kernels (default: 2)
- init_order: the order of the approximation (default: 3)
- init_scale: the initial starting scale, where: sigma=2^scale (default: 0)
- learn_sigma: whether sigma is learnable
- use_cuda: running on GPU or not
- groups: groups for the convolution (default: 1)
- ssample: if we subsample the featuremaps based on sigma (default: False)
"""
class Srf_layer_shared(nn.Module):
def __init__(self,
inC,
outC,
init_k,
init_order,
init_scale,
learn_sigma,
use_cuda,
groups=1,
ssample=False):
super(Srf_layer_shared, self).__init__()
self.init_k = init_k
self.init_order = init_order
self.init_scale = init_scale
self.inC = inC
self.ssample = ssample
assert(outC % groups == 0)
self.outC = outC
self.groups = groups
""" Define the number of basis based on order. """
F = int((self.init_order + 1) * (self.init_order + 2) / 2)
""" Create weight variables. """
self.use_cuda = use_cuda
self.device = torch.device("cuda" if use_cuda else "cpu")
self.alphas = torch.nn.Parameter(torch.zeros([F, int(inC/groups), outC], \
device=self.device), requires_grad=True)
""" Define the scale parameter. """
torch.nn.init.normal_(self.alphas, mean=0.0, std=1)
if learn_sigma:
self.scales = torch.nn.Parameter(torch.tensor(np.full((1), \
self.init_scale), device=self.device,\
dtype=torch.float32), requires_grad=True)
else:
self.scales = torch.nn.Parameter(torch.tensor(np.full((1), \
self.init_scale), device=self.device,\
dtype=torch.float32), requires_grad=False)
self.extra_reg = 0
self.sigma = torch.zeros((1,))
self.filtersize = torch.zeros((1,))
self.hermite = torch.Tensor()
self.x = torch.Tensor()
self.filters = torch.Tensor()
self.basis = torch.Tensor()
self.gauss = torch.Tensor()
""" Forward pass without inputs to return the filters only. """
def forward_no_input(self):
""" Define sigma from the scale: sigma = 2^scale """
self.sigma = 2.0**self.scales
self.filtersize = torch.ceil(self.init_k*self.sigma[0]+0.5)
""" Define the grid on which the filter is created. """
try:
self.x = torch.arange(start=-self.filtersize.detach().cpu().float(), \
end=self.filtersize.detach().cpu().float()+1, step=1)
self.hermite = torch.arange(start=-self.filtersize.detach().cpu().float(), \
end=self.filtersize.detach().cpu().float()+1, step=1)
except:
print("Sigma value is off:", self.sigma)
""" Create the Gaussian derivative filters. """
self.filters, self.basis, self.gauss, self.hermite = gaussian_basis_filters_shared(
x=self.x,\
hermite=self.hermite,\
order=self.init_order, \
sigma=self.sigma, \
alphas=self.alphas,\
use_cuda=self.use_cuda)
return self.filters
""" Forward pass with inputs: creates the filters and performs the convolution. """
def forward(self, data):
""" Define sigma from the scale: sigma = 2^scale """
self.sigma = 2.0**self.scales
self.filtersize = torch.ceil(self.init_k*self.sigma[0]+0.5)
""" Define the grid on which the filter is created. """
try:
self.x = torch.arange(start=-self.filtersize.detach().cpu().float(), \
end=self.filtersize.detach().cpu().float()+1, step=1)
self.hermite = torch.arange(start=-self.filtersize.detach().cpu().float(), \
end=self.filtersize.detach().cpu().float()+1, step=1)
except:
print("Sigma value is off:", self.sigma, "filter size:", self.filtersize)
""" Create the Gaussian derivative filters. """
self.filters, self.basis, self.gauss, self.hermite = gaussian_basis_filters_shared(
x=self.x,\
hermite=self.hermite,\
order=self.init_order, \
sigma=self.sigma, \
alphas=self.alphas,\
use_cuda=self.use_cuda)
""" Subsample based on sigma if wanted. """
if self.ssample:
data = safe_sample(data, self.sigma)
""" Perform the convolution. """
final_conv = F.conv2d(
input=data, # NCHW
weight=self.filters, # KCHW
bias=None,
stride=1,
padding=int(self.filters.shape[2]/2),
groups=self.groups)
self.extra_reg = (self.sigma[0] + torch.norm(self.alphas)).item()
return final_conv
""" List the parameters. """
def num_params(self):
return (sum(p.numel() for p in self.parameters() if p.requires_grad))
""" Subsampling of the featuremaps based on the learned sigma.
Input:
- current: input featuremap
- sigma: the learned sigma values
- r: the hyperparameter controlling how fast the subsampling goes as a function of sigma.
"""
def safe_sample(current, sigma, r=4.0):
update_val = max(1.0, torch.div(2**sigma, r))
shape = current.shape
shape_out = max([1,1], [int(float(shape[2])/update_val), \
int(float(shape[3])/update_val)])
current_out = F.interpolate(current, shape_out)
return current_out
| 7,051 | 40.97619 | 93 | py |
N-JetNet | N-JetNet-main/srf/gaussian_basis_filters.py | import torch
from scipy import ndimage
import math
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
""" Create the Gaussian derivative basis.
Input:
- x: the input grid
- hermite: a temporary variable (initialized as the grid x)
- order: the order of the basis
- sigma: the sigma of the Gaussian
- alphas: the coefficients for combining the basis
- use_cuda: on GPU or not
"""
def gaussian_basis_filters_shared(
x,
hermite,
order,
sigma,
alphas,
use_cuda):
basis_filters = []
basis_tensors = []
""" Define the 0th order Gaussian vector for the current scale. """
try:
x = x.cuda()
except:
print("No cuda available")
gauss = torch.div(1.0, (math.sqrt(2.0 * math.pi) * sigma[0])) \
* torch.exp( torch.div( x*x, (-2.0*sigma[0]*sigma[0])) )
gauss = gauss / torch.sum(gauss)
""" Define the rest of the Gaussian derivatives. """
basis = []
for i in range(0, int(order)+1):
basis_x, hermite = get_basis(x, i, gauss, sigma[0], hermite)
basis_x = torch.pow(sigma[0], i) * basis_x
for j in range(int(order)-i, -1, -1):
basis_y, hermite = get_basis(x, j, gauss, sigma[0], hermite)
basis_y = torch.pow(sigma[0], j) * basis_y
""" Create square filters from the 1D vectors. """
basis.append(torch.einsum("i,j->ij", basis_x, basis_y))
basis_tensor = torch.stack(basis, dim=0) # FHW
""" Create the filter by combining the basis with the coefficients, alpha."""
basis_filter = None
try:
# [out_channels,in_channels,kernel_size[0],kernel_size[1]]
basis_filter = torch.einsum("fck,fhw->kchw", alphas, basis_tensor)
except:
print("No alphas given")
return basis_filter, basis_tensor, gauss, hermite
""" Hermite, recursive implementation. It's slow and recursivity does not work well with pytorch.
Inputs:
- x: the input grid
- order: the order of the derivative
"""
def hermite_recursive(x, order): # Physicists hermite
assert(order>=0.0)
if order==0.0:
return (x * 0.0 + 1.0)
elif order==1.0:
# H{1}(x) = 2 x
return 2.0 * x
else:
# H{n}(x) = 2x H{n-1}(x) - 2(n-1) H{n-2}(x)
return 2.0*x*hermite_recursive(x, order-1.0) - 2.0*(order-1.0) \
* hermite_recursive(x, order-2.0)
""" 0-order Hermite polynomial. """
def hermite_0(x):
return (x*0.0+1.0)
""" 1-order Hermite polynomial. """
def hermite_1(x):
# H{1}(x) = x
return 2.0*x
""" 2-order Hermite polynomial. """
def hermite_2(x):
# H{2}(x) = 4 x^2 - 2
return (4.0*torch.pow(x,2.0) - 2.0)
""" 3-order Hermite polynomial. """
def hermite_3(x):
# H{3}(x) = 8 x^3 - 12x
return (8.0*torch.pow(x,3.0) - 12.0 * x)
""" 4-order Hermite polynomial. """
def hermite_4(x):
# H{4}(x) = 16 x^4 - 48 x^2 + 12
return (16.0*torch.pow(x,4.0) - 48.0*torch.pow(x,2.0) + 12.0)
""" 5-order Hermite polynomial. """
def hermite_5(x):
# H{5}(x) = 32 x^5 - 160 x^3 + 120 x
return (32.0*torch.pow(x,5.0) - 160.0*torch.pow(x,3.0) + 120.0*x)
""" 6-order Hermite polynomial. """
def hermite_6(x):
# H{6}(x) = 64 x^6 - 480 x^4 + 720 x^2 - 120
return (64.0*torch.pow(x,6.0) - 480.0*torch.pow(x,4.0) \
+ 720.0*torch.pow(x,2.0) - 120.0)
""" 7-order Hermite polynomial. """
def hermite_7(x):
# H{7}(x) = 128 x^7 - 1344 x^5 + 3360 x^3 - 1680 x
return (128.0*torch.pow(x,7.0) - 1344.0*torch.pow(x,5.0) \
+ 3360.0*torch.pow(x,3.0) - 1680.0*x)
""" 8-order Hermite polynomial. """
def hermite_8(x):
# H{8}(x) = 256 x^8 - 3584 x^6 + 13440 x^4 - 13440 x^2 + 1680
return (256.0*torch.pow(x,8.0) - 3584.0*torch.pow(x,6.0) \
+ 13440.0*torch.pow(x,4.0) - 13440.0*torch.pow(x,2.0) + 1680.0)
""" 9-order Hermite polynomial. """
def hermite_9(x):
# H{9}(x) = 512 x^9 - 9216 x^7 + 48384 x^5 - 80640 x^3 + 30240 x
return (512.0*torch.pow(x,9.0) - 9216.0*torch.pow(x,7.0) \
+ 48384.0*torch.pow(x,5.0) - 80640.0*torch.pow(x,3.0) \
+ 30240.0*x)
""" 10-order Hermite polynomial. """
def hermite_10(x):
# H{10}(x) = 1024 x^10 - 23040 x^8 - 161280 x^6 - 403200 x^4 \
# + 302400 x^2 - 30240
return (1024.0*torch.pow(x,10.0) - 23040.0*torch.pow(x,8.0) \
+ 161280.0*torch.pow(x,6.0) - 403200.0*torch.pow(x,4.0) \
+ 302400.0*torch.pow(x,2.0) - 30240.0)
""" Switching between the Hermite orders."""
switcher = {
0: hermite_0,
1: hermite_1,
2: hermite_2,
3: hermite_3,
4: hermite_4,
5: hermite_5,
6: hermite_6,
7: hermite_7,
8: hermite_8,
9: hermite_9,
10: hermite_10 }
""" Calls the Hermite polynomial computation of a certain order.
Input:
- x: the grid
- order: the order of the derivative.
"""
def get_hermite(x, order):
assert(order>=0.0)
try:
func = switcher.get(int(order))
except:
func = hermite_recursive
return func(x,order)
return func(x)
""" Get the Gaussian basis using Hermite polynomials.
Input:
- x: the grid
- order: the order of the derivative.
- sigma: The sigma of the Gaussian
- hermite: temporary variable, initialized as the grid.
"""
def get_basis(
x,
order,
gauss,
sigma,
hermite):
# dg^n / dx^n = ( -1/(sqrt(2)sigma) ) ^n H(x / (sqrt(2) sigma)) g
hermite = get_hermite(torch.div(x, math.sqrt(2.0)*sigma), order)
return torch.pow(torch.div(-1.0, math.sqrt(2.0) * sigma), order) \
* hermite * gauss, hermite
""" Plotting function for plotting the Gaussian derivatives
(Visually compared to the scipy derivatives).
Input:
- g_srf: the Gaussian basis
- sigma: The sigma of the Gaussian
- order: the order of the derivative.
- truncate: the spatial extent
- title: plot title
"""
def plot2g(g_srf, sigma, order, truncate=2, title=''):
filtersize = int(math.ceil(truncate*sigma+0.5))
x = np.zeros(shape=((2*filtersize+1),(2*filtersize+1)))
x[filtersize, filtersize] = 1
g_scipy = []
for i in range(0, int(order)+1):
for j in range(int(order)-i, -1, -1):
g_scipy.append(ndimage.filters.gaussian_filter(\
x, sigma=sigma, order=(i,j), truncate=truncate))
import matplotlib.pyplot as plt
for i in range(0, len(g_scipy)):
plt.subplot(1, 2, 1)
plt.title('SRF - '+str(i)+" "+title)
plt.imshow(g_srf[i].numpy())
plt.axis('off')
plt.subplot(1, 2, 2)
plt.title('Scipy - '+str(i)+" "+title)
plt.imshow(g_scipy[i])
plt.axis('off')
plt.show()
| 6,915 | 28.181435 | 97 | py |
N-JetNet | N-JetNet-main/srf/tests/test_basis.py | import torch
import scipy
import math
import numpy as np
import matplotlib.pyplot as plt
import argparse
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import sys
sys.path.append("../")
from gaussian_basis_filters import *
def main():
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--use-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--order', type=int, default=2, \
help='Derivatives order')
parser.add_argument('--sigma', type=float, default=1.0, \
help='Derivatives sigma')
parser.add_argument('--k', type=float, default=2.0, \
help='Spatial extent')
args = parser.parse_args()
torch.manual_seed(args.seed)
""" The order of the Gaussian basis. """
F = int((args.order + 1) * (args.order + 2) / 2)
""" Get the Gaussian derivative filters. """
filtersize = math.ceil(args.k*args.sigma+0.5)
x = torch.arange(start=-filtersize,end=filtersize+1,step=1)
hermite = x
_, g_srf, _,_ = gaussian_basis_filters_shared(
x=x,
hermite=hermite,
order=args.order,
sigma=torch.tensor([args.sigma], requires_grad=False),
alphas=torch.tensor(np.ones([F,1,1]), dtype=torch.float32),
use_cuda=args.use_cuda)
plot2g(g_srf, args.sigma, args.order, truncate=args.k)
if __name__ == '__main__':
main()
| 1,728 | 31.018519 | 75 | py |
N-JetNet | N-JetNet-main/srf/tests/test_alexnet.py | import torch
import scipy
import math
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim as optim
import sys
sys.path.append("../")
sys.path.append("../../")
from gaussian_basis_filters import *
from structured_conv_layer import *
import argparse
""" Plot the pretrained filters and their approximation.
Input:
- normal: the pretrained filters
- structured: the N-Jet approximated filters
- errors: the errors per filter
- scales: the learned N-Jet scale
- alphas: the learned N-Jet coefficients
- cols: maximum suplots columns
- lattice_size: default 1.0
"""
def plot_comparison(
normal,
structured,
errors,
scales,
alphas,
cols=12,
lattice_size=1.0):
# [K, C, H, W] --> [H W C K]
structured = np.transpose(structured, (2, 3, 1, 0))
N = normal.shape[3]
C = normal.shape[2]
assert(C == 3) # Compute always have 3 channels as input
if not cols:
cols = int(np.ceil(np.sqrt(N)))
cols = min(cols, N)
rows = int(np.ceil(N / cols))
""" Define the plotting grid """
fig, axes = plt.subplots(2*rows, cols, squeeze=False)
fig.suptitle('Average L2 approximation error '+str(errors.mean()))
i = 0
for row in range(rows):
for col in range(cols):
axes[2*row, col].axis('off')
axes[2*row+1, col].axis('off')
if i >= N: continue;
""" Rescale pixel intensities for visualization"""
image = (normal[:, :, :, i] - normal[:, :, :, i].min())\
/(normal[:, :, :, i].max() - normal[:, :, :, i].min())
axes[2*row, col].imshow(image)
""" Rescale pixel intensities for visualization"""
image = (structured[:, :, :, i] - structured[:, :, :, i].min())\
/(structured[:, :, :, i].max() - structured[:, :, :, i].min())
axes[2*row+1, col].imshow(image)
print("Filter {:3d} - error = {:.5f}, \
mean sigma = {:.3f}, std sigma = {:.3f}, \
mean alphas = {:+.3f}, std alphas = {:.3f}".format(i, \
errors[i], scales[i].mean(), scales[i].std(), \
alphas[i, :, :].mean(), alphas[i, :, :].std()))
i += 1
return fig
""" MSE loss between the pretrained filter and the N-Jet approximation.
Input:
- normal: pretrained filters (1 channel)
- structured: the N-Jet filter
"""
def compute_mse(normal, structured):
structured_filters = []
for i in range(0, structured.shape[0]):
""" Crop to have the same size """
size_diff = structured.shape[2] - normal.shape[2]
if size_diff>0:
cropping = int(size_diff/2)
afilter = structured[i:i+1,:,cropping:structured.shape[2]\
-cropping,cropping:structured.shape[2]\
-cropping]
structured_filters.append(afilter)
elif size_diff<0:
padding = int(abs(size_diff)/2)
afilter = F.pad(structured[i:i+1,:,:,:], \
(padding,padding,padding,padding,0,0), \
'constant',0)
structured_filters.append(afilter)
else:
structured_filters.append(structured[i:i+1,:,:,:])
""" Now the resized prediction should match the normal filter"""
structured_tensor = torch.squeeze(torch.stack(structured_filters,dim=1),dim=0)
""" Compute the MSE loss """
loss_fn = nn.MSELoss(reduction='mean')
loss = loss_fn(normal, structured_tensor)
return loss, structured_tensor
"""
Performs pretrained filter approximation using N-Jet filters.
Input:
- normal_filters: the filter to approximate (1 channel)
- init_order: the N-Jet order
- init_scale: the N-Jet scale initialization (default: 0)
- init_k: the N-Jet filter spatial extent (default: 2)
- learn_sigma: default True
- max_steps: training steps to take
- lr: learning rate used
- weight_decay: weights regularization
- use_cuda: on the GPU or not
Output:
structured_filters [H W C K]
errors [N]
scales [C, N]
alphas [C, F, N]
"""
def approximate_normal_filters(
normal_filters,
init_order,
init_scale,
init_k,
learn_sigma,
max_steps,
lr,
weight_decay,
use_cuda):
""" Define N-Jet layer. """
model = Srf_layer_shared(
inC=normal_filters.shape[2],
outC=normal_filters.shape[3],
init_k=init_k,
init_order=init_order,
init_scale=init_scale,
learn_sigma=learn_sigma,
use_cuda=use_cuda)
if use_cuda: model = model.cuda()
""" Define the optimizer """
optimizer = optim.Adam(
model.parameters(),
lr=lr,
weight_decay=weight_decay)
""" Rearrange the filter dimensions to match the N-Jet dimensions """
device = torch.device("cuda" if use_cuda else "cpu")
normal_filters = torch.tensor(np.transpose(normal_filters,(3,2,0,1)),\
dtype=torch.float32, device=device)
""" Standardize the filter """
normal_filters = (normal_filters - normal_filters.mean())\
/(normal_filters.std())
""" Start the training loop """
for epoch in range(1, max_steps):
structured_filters = model.forward_no_input()
loss, _ = compute_mse(normal_filters, structured_filters)
optimizer.zero_grad()
loss.backward()
optimizer.step()
""" Get the final filters """
structured_filters = model.forward_no_input()
loss, structured_tensor = compute_mse(normal_filters,structured_filters)
return structured_tensor, loss, model.scales, model.alphas
""" The script implementing the test: selects a few channels of an alexnet pretrained
filter and approximates it with the N-Jet filters.
Inputs:
- args: the optimizer arguments
- use_cuda: if on the GPU or not
- channelNo: the input channel number
- rangeStart: the selected starting channel
- rangeEnd: the select end channel
"""
def run_test(args, use_cuda, channelNo=0, rangeStart=0, \
rangeEnd=5):
""" Firt layer has size: 11x11x3x96 """
weights = np.load(open("alexnet_conv1.npy", "rb"),encoding="latin1",\
allow_pickle=True)
normal_l = []
structured_l = []
errors_l = []
scales_l = []
alphas_l = []
""" Loop over the channel range """
for i in range(rangeStart, rangeEnd):
normal = weights[0][:, :, channelNo:channelNo+3, i:i+1]
""" Approximate each channel with an N-Jet filter. """
structured_i, errors_i, scales_i, alphas_i = approximate_normal_filters(
normal_filters=normal,
init_order=args.init_order,
init_scale=args.init_scale,
init_k=args.init_k,
learn_sigma=args.learn_sigma,
lr=args.lr,
weight_decay=args.weight_decay,
max_steps=args.epochs,
use_cuda=use_cuda)
normal_l.append(weights[0][:, :, :, i:i+1])
structured_l.append(structured_i.detach().cpu().numpy())
errors_l.append(errors_i.detach().cpu().numpy())
scales_l.append(scales_i.detach().cpu().numpy())
alphas_l.append(alphas_i.detach().cpu().numpy())
""" Plot the filter channels and their approximations """
normal = np.stack(normal_l, axis=3)
structured = np.squeeze(np.stack(structured_l, axis=0), axis=1)
errors = np.stack(errors_l, axis=0)
scales = np.stack(scales_l, axis=0)
alphas = np.stack(alphas_l, axis=0)
fig = plot_comparison(
weights[0][:, :, :, rangeStart:rangeEnd], \
structured, \
errors, \
scales, \
alphas)
fig.savefig('conv1-order-'+str(args.init_order)+'.pdf', \
bbox_inches='tight', pad_inches=0)
plt.show()
""" Main loop calling the test. """
def main():
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=0, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--init_order', type=int, default=3, \
help='Derivatives order')
parser.add_argument('--init_scale', type=float, default=0.0, \
help='If not lernable')
parser.add_argument('--init_k', type=float, default=2.0, \
help='Spatial extent')
parser.add_argument('--learn_sigma', action='store_true', default=True,
help='If we learn of fix sigma')
parser.add_argument('--epochs', type=int, default=1000, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--optim', type=str, default="adam", metavar='O',
help='the optimizer choice')
parser.add_argument('--lr', type=float, default=1.0e-1, metavar='LR',
help='learning rate')
parser.add_argument('--weight-decay', type=float, default=0.0,
metavar='W', help='the weight decay')
args = parser.parse_args()
torch.manual_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
run_test(args, use_cuda)
if __name__ == '__main__':
main()
| 9,885 | 34.689531 | 86 | py |
S2AFF | S2AFF-main/s2aff/model.py | import os
import torch
# have to do this to avoid weird bugs
os.environ["TOKENIZERS_PARALLELISM"] = "false"
torch.multiprocessing.set_sharing_strategy("file_system")
import gc
import numpy as np
import lightgbm as lgb
import kenlm
from s2aff.text import fix_text
from s2aff.features import make_lightgbm_features, parse_ror_entry_into_single_string_lightgbm, FEATURE_NAMES
from simpletransformers.ner import NERModel, NERArgs
from blingfire import text_to_words
from s2aff.consts import PATHS
from s2aff.text import fix_text, CERTAINLY_MAIN
FEATURE_NAMES = list(FEATURE_NAMES)
def parse_ner_prediction(ner_prediction, ror_index):
"""Parse the NER prediction that comes out of a NERPredictor
into main entity, child entities, address, and early candidates for the
next retrieval stage.
Args:
ner_prediction (list of dicts): Output of a NERPredictor.predict. Looks like this:
[{'Chinese': 'I-MAIN'},
{'Academy': 'I-MAIN'},
{'of': 'I-MAIN'},
{'Sciences': 'I-MAIN'},
{'PRC': 'I-ADDRESS'}]
Note that it's one prediction and not a list of predictions.
Returns:
main (str): Main entity of the child affiliation string.
child (list[str]): Child entities of the raw affiliation string.
address (list): Address part of the raw affiliation string.
early_candidates (list[str]): set of candidates based off child entities.
These appear in the direct ROR look-up table.
"""
main = ""
child = ""
main_child = ""
address = ""
previous_tag_suffix = "not a tag"
for d in ner_prediction:
word, tag = list(d.items())[0]
# if a second affiliation starts, we'll just take the first one only
# TODO: actually return the second affiliation?
if "SPLIT" in tag and word == "and": # sometimes the model decides other words are split words
break
elif "SPLIT" in tag:
tag = "O"
if tag != "O":
tag_suffix = tag.split("-")[1]
begin = tag.startswith("B") or (previous_tag_suffix != tag_suffix)
if word != ",":
word_to_append = word + " "
else:
word_to_append = ""
if "MAIN" in tag or "CHILD" in tag:
main_child += word_to_append
if "MAIN" in tag:
main += ("BEGIN" if begin else "") + word_to_append
else:
child += ("BEGIN" if begin else "") + word_to_append
elif "ADDRESS" in tag:
address += ("BEGIN" if begin else "") + word_to_append
previous_tag_suffix = tag_suffix
else:
previous_tag_suffix = "O" # this is how we break up multiple continuous children
address = [i.strip() for i in address.split("BEGIN") if len(i) > 1]
if len(main) > 0:
main = [i.strip() for i in main.split("BEGIN") if len(i) > 1]
child = [i.strip() for i in child.split("BEGIN") if len(i) > 1]
elif len(main_child) > 0:
main = [main_child.strip()]
child = []
elif len(address) > 0:
main = address
child = []
address = []
# check if any of the children have a "main" word and move to main
child_new = []
for i in child:
child_split = set(i.split())
if len(child_split.intersection(CERTAINLY_MAIN)) > 0:
main.append(i)
else:
child_new.append(i)
# we have direct look-up tables for both grid names and addresses
# here we: check the children. if any appear in grid names lookup
# we can add to early grid candidates
early_candidates = []
for c in child_new:
c = c.lower()
if c in ror_index.ror_name_direct_lookup:
early_candidates.extend(list(ror_index.ror_name_direct_lookup[c]))
# if something that is a main appears in grid addresses lookup
# but not in the direct name look up
# it is a sign that main was possibly incorrectly extracted.
# To mitigate, we (a) remove these from the mains and
# (b) add the children into mains to supplement
main_in_address = False
mains_to_remove = set()
for m in main:
if m.lower() in ror_index.ror_address_counter:
main_in_address = True
if m.lower() not in ror_index.ror_name_direct_lookup:
mains_to_remove.add(m)
if main_in_address:
main.extend(child_new)
# if a main is in the address lookup AND not in name lookup
# we remove it and swap into address and children. it's probably just a mislabeled address
for m in mains_to_remove:
main.remove(m)
if m not in child_new:
child_new.append(m)
if m not in address:
address.append(m)
# we might not have a main still. it just becomes child or address
if len(main) == 0:
if len(child_new) > 0:
main = child_new
child_new = []
elif len(address) > 0:
main = address
address = []
# join it all together
return main, child_new, address, early_candidates
class NERPredictor:
"""Named Entity Recognition for affiliation strings.
Uses SimpleTransformers under the hood.
"""
def __init__(self, model_path=PATHS["ner_model"], model_type="roberta", use_cuda=True):
self.model_path = model_path
self.model_type = model_type
self.use_cuda = use_cuda
if self.model_path is not None:
self.load_model(self.model_path, self.model_type)
def load_model(self, model_path=PATHS["ner_model"], model_type="roberta"):
"""Load a model from disk.
model_path (str, optional): Location of the saved NER model.
Should be what is saved by SimpleTransformers NERModel. Defaults to PATHS["ner_model"].
model_type (str, optional): Model type such as roberta or bert.
If you don't know, check the config.json in the model directory. Defaults to "roberta".
"""
self.model = NERModel(
model_type,
model_path,
use_cuda=self.use_cuda,
args={
"use_multiprocessing": False,
"use_multiprocessing_for_evaluation": False,
"process_count": 1,
"eval_batch_size": 8,
},
)
def save_model(self, model_path=PATHS["ner_model"]):
"""Save model to disk
Args:
model_output_path (str, optional): Where to save the model.
Defaults to PATHS["ner_model"].
"""
self.model.save_model(output_dir=model_path, model=self.model)
def delete_model(self):
"""Clears the model from GPU memory."""
del self.model
gc.collect()
torch.cuda.empty_cache()
def fit(
self,
df_train,
df_validation=None,
model_type="roberta",
model_name="roberta-large",
num_train_epochs=3,
learning_rate=1e-5,
):
"""Fit the NER model.
Uses SimpleTransformers under the hood.
Args:
df_train (pd.DataFrame): Training data. Assumes data has already been preprocessed
with `fix_text` and tokenized with `blingfire.text_to_words`, and then shaped
into a dataframe with 3 columns: ['sentence_id', 'words', 'labels'].
df_validation (pd.DataFrame): Same as above but for validation. None by default.
model_type (str, optional): Model type such as roberta or bert.. Defaults to "roberta".
model_name (str, optional): Specific model name of the model_class. Defaults to "roberta-large".
num_train_epochs (int, optional): Number of training epochs. Defaults to 3.
learning_rate (float, optional): Learning rate. Defaults to 1e-5.
Returns:
model: SimpleTransformers NERModel.
result_vl: Validation metrics provided by SimpleTransformers.
"""
# most of these are not part of the input params, but they could be if we wanted
model_args = NERArgs()
model_args.scheduler = "linear_schedule_with_warmup"
model_args.num_train_epochs = num_train_epochs
model_args.learning_rate = learning_rate
model_args.hidden_dropout_prob = 0.3
model_args.attention_probs_dropout_prob = 0.3
model_args.evaluate_during_training = False
model_args.reprocess_input_data = True
model_args.overwrite_output_dir = True
model_args.manual_seed = 4
model_args.train_batch_size = 32
model_args.eval_batch_size = 128
model_args.use_multiprocessing = False
model_args.use_multiprocessing_for_evaluation = False
model_args.process_count = 1
# train and save model
custom_labels = list(set(df_train.labels))
model = NERModel(model_type, model_name, labels=custom_labels, args=model_args, use_cuda=self.use_cuda)
model.train_model(df_train)
self.model = model.model
if df_validation is not None:
result_vl, _ = self.eval(df_validation)
else:
result_vl = None
return result_vl
def eval(self, df):
result, model_outputs, _ = self.model.eval_model(df)
return result, model_outputs
def predict(self, texts):
"""Do NER on input affiliation string(s).
Args:
texts (str or list[str]): Affiliation string. Can be a list of strings, in which case
the output will be a list of predictions.
Returns:
predictions: List of list of dicts. Looks like this if you pass in string.
[{'Chinese': 'I-MAIN'},
{'Academy': 'I-MAIN'},
{'of': 'I-MAIN'},
{'Sciences': 'I-MAIN'},
{'PRC': 'I-ADDRESS'}]
and will be a list of predictions if the input is a list of strings.
"""
if type(texts) == str:
texts = [texts]
input_was_single_text = True
else:
input_was_single_text = False
texts = [text_to_words(fix_text(text)) for text in texts]
predictions = self.model.predict(texts)[0] # [1] are the scores
if input_was_single_text:
return predictions[0]
else:
return predictions
class PairwiseRORLightGBMReranker:
def __init__(
self, ror_index, model_path=PATHS["lightgbm_model"], kenlm_model_path=PATHS["kenlm_model"], num_threads=0
):
self.ror_index = ror_index
self.model_path = model_path
self.kenlm_model_path = kenlm_model_path
self.load_model(model_path)
self.lm = kenlm.LanguageModel(kenlm_model_path)
self.num_threads = num_threads
self.inds_to_check = [
FEATURE_NAMES.index("names_frac_of_query_matched_in_text"),
FEATURE_NAMES.index("acronyms_frac_of_query_matched_in_text"),
]
self.city_ind = FEATURE_NAMES.index("city_frac_of_query_matched_in_text")
self.num_threads = num_threads
def load_model(self, model_path=PATHS["lightgbm_model"]):
"""Load a model from disk.
model_path (str, optional): Location of the saved parwise classifier.
Should be what is saved by SimpleTransformers ClassificationModel.
Defaults to PATHS["reranker_model"].
model_type (str, optional): Model type such as roberta or bert.
If you don't know, check the config.json in the model directory. Defaults to "roberta".
"""
self.model = lgb.Booster(model_file=model_path)
def save_model(self, model_path=PATHS["lightgbm_model"]):
"""Save model to disk
Args:
model_output_path (str, optional): Where to save the model.
Defaults to PATHS["lightgbm_model"].
"""
self.model.save_model(output_dir=model_path, model=self.model)
def delete_model(self):
"""Clears the model from GPU memory."""
del self.model
gc.collect()
def predict(self, raw_affiliation, candidates, scores):
"""
Given a list of candidates that are ROR ids, re-rank them using the trained model.
Args:
raw_affiliation (str): Raw affiliation string.
candidates (list[str]): List of candidate ROR ids.
Returns:
reranked_candidates (np.array[str]): Array of candidate ROR ids
reranked_scores (np.array[float]): Array of candidate scores
"""
fixed_affiliation_string = fix_text(raw_affiliation).lower().replace(",", "")
X = []
for i, s in zip(candidates, scores):
ror_entry = parse_ror_entry_into_single_string_lightgbm(i, self.ror_index)
x = make_lightgbm_features(fixed_affiliation_string, ror_entry, self.lm)
x[-3:] = [s, int(s == -0.15), int(s == -0.1)]
X.append(x)
X = np.array(X)
scores = self.model.predict(X, num_threads=self.num_threads)
# penalty when no match across fields
has_no_match = X[:, self.inds_to_check].sum(1) == 0
scores -= 0.05 * has_no_match # magic number!
scores += 0.05 * X[:, self.city_ind]
scores_argsort = np.argsort(scores)[::-1]
reranked = np.vstack([np.array(candidates), scores]).T[scores_argsort]
return reranked[:, 0], reranked[:, 1].astype(float)
| 13,588 | 36.852368 | 113 | py |
S2AFF | S2AFF-main/s2aff/timo/interface.py | """
This file contains the classes required by Semantic Scholar's
TIMO tooling.
You must provide a wrapper around your model, as well
as a definition of the objects it expects, and those it returns.
"""
from typing import List
from os.path import join, basename
import torch
from pydantic import BaseModel, BaseSettings, Field
from s2aff import S2AFF
from s2aff.ror import RORIndex
from s2aff.model import NERPredictor, PairwiseRORLightGBMReranker
from s2aff.consts import PATHS
class Instance(BaseModel):
"""
Describes one Instance over which the model performs inference.
The fields below are examples only; please replace them with
appropriate fields for your model.
To learn more about declaring pydantic model fields, please see:
https://pydantic-docs.helpmanual.io/
"""
raw_affiliation: str = Field(description="Raw affiliation string")
class Prediction(BaseModel):
"""
Describes the outcome of inference for one Instance
The fields below are examples only; please replace them with
appropriate fields for your model.
To learn more about declaring pydantic model fields, please see:
https://pydantic-docs.helpmanual.io/
"""
ror_id: str = Field(description="ROR id for the top result")
display_name: str = Field(description="Standardized name for the top result")
score: float = Field(description="Score from the LightGBM stage model for the top result")
main: List[str] = Field(description="Main affiliation strings from NER step")
child: List[str] = Field(description="Child affiliation strings from NER step")
address: List[str] = Field(description="Address affiliation strings from NER step")
class PredictorConfig(BaseSettings):
"""
Configuration required by the model to do its work.
Uninitialized fields will be set via Environment variables.
The fields below are examples only; please replace them with ones
appropriate for your model. These serve as a record of the ENV
vars the consuming application needs to set.
"""
class Predictor:
"""
Interface on to your underlying model.
This class is instantiated at application startup as a singleton.
You should initialize your model inside of it, and implement
prediction methods.
If you specified an artifacts.tar.gz for your model, it will
have been extracted to `artifacts_dir`, provided as a constructor
arg below.
"""
_config: PredictorConfig
_artifacts_dir: str
def __init__(self, config: PredictorConfig, artifacts_dir: str):
self._config = config
self._artifacts_dir = artifacts_dir
self._load_model()
def _load_model(self) -> None:
"""
Perform whatever start-up operations are required to get your
model ready for inference. This operation is performed only once
during the application life-cycle.
"""
ner_predictor = NERPredictor(
model_path=join(self._artifacts_dir, basename(PATHS["ner_model"])), use_cuda=torch.cuda.is_available()
)
ror_index = RORIndex(
ror_data_path=join(self._artifacts_dir, basename(PATHS["ror_data"])),
country_info_path=join(self._artifacts_dir, basename(PATHS["country_info"])),
works_counts_path=join(self._artifacts_dir, basename(PATHS["openalex_works_counts"])),
)
pairwise_model = PairwiseRORLightGBMReranker(
ror_index,
model_path=join(self._artifacts_dir, basename(PATHS["lightgbm_model"])),
kenlm_model_path=join(self._artifacts_dir, basename(PATHS["kenlm_model"])),
)
self.s2aff = S2AFF(ner_predictor, ror_index, pairwise_model)
@staticmethod
def convert_raw_prediction_to_Prediction(prediction) -> Prediction:
if len(prediction["stage2_candidates"]) == 0:
prediction_instance = Prediction(ror_id="", score=0, main="", child="", address="")
else:
prediction_instance = Prediction(
ror_id=prediction["stage2_candidates"][0],
display_name=prediction["top_candidate_display_name"],
score=prediction["stage2_scores"][0],
main=prediction["main_from_ner"],
child=prediction["child_from_ner"],
address=prediction["address_from_ner"],
)
return prediction_instance
def predict_batch(self, instances: List[Instance]) -> List[Prediction]:
"""
Method called by the client application. One or more Instances will
be provided, and the caller expects a corresponding Prediction for
each one.
If your model gets performance benefits from batching during inference,
implement that here, explicitly.
Otherwise, you can leave this method as-is and just implement
`predict_one()` above. The default implementation here passes
each Instance into `predict_one()`, one at a time.
The size of the batches passed into this method is configurable
via environment variable by the calling application.
"""
predictions = self.s2aff.predict([i.raw_affiliation for i in instances])
return [self.convert_raw_prediction_to_Prediction(i) for i in predictions]
| 5,308 | 36.652482 | 114 | py |
GTA-RL | GTA-RL-master/reinforce_baselines.py | import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from scipy.stats import ttest_rel
import copy
from train import rollout, get_inner_model
class Baseline(object):
def wrap_dataset(self, dataset):
return dataset
def unwrap_batch(self, batch):
return batch, None
def eval(self, x, c):
raise NotImplementedError("Override this method")
def get_learnable_parameters(self):
return []
def epoch_callback(self, model, epoch):
pass
def state_dict(self):
return {}
def load_state_dict(self, state_dict):
pass
class WarmupBaseline(Baseline):
def __init__(self, baseline, n_epochs=1, warmup_exp_beta=0.8, ):
super(Baseline, self).__init__()
self.baseline = baseline
assert n_epochs > 0, "n_epochs to warmup must be positive"
self.warmup_baseline = ExponentialBaseline(warmup_exp_beta)
self.alpha = 0
self.n_epochs = n_epochs
def wrap_dataset(self, dataset):
if self.alpha > 0:
return self.baseline.wrap_dataset(dataset)
return self.warmup_baseline.wrap_dataset(dataset)
def unwrap_batch(self, batch):
if self.alpha > 0:
return self.baseline.unwrap_batch(batch)
return self.warmup_baseline.unwrap_batch(batch)
def eval(self, x, c):
#if len(x.size()) == 4:
# x = x[:,0,:,:]
if self.alpha == 1:
return self.baseline.eval(x, c)
if self.alpha == 0:
return self.warmup_baseline.eval(x, c)
v, l = self.baseline.eval(x, c)
vw, lw = self.warmup_baseline.eval(x, c)
# Return convex combination of baseline and of loss
return self.alpha * v + (1 - self.alpha) * vw, self.alpha * l + (1 - self.alpha * lw)
def epoch_callback(self, model, epoch):
# Need to call epoch callback of inner model (also after first epoch if we have not used it)
self.baseline.epoch_callback(model, epoch)
self.alpha = (epoch + 1) / float(self.n_epochs)
if epoch < self.n_epochs:
print("Set warmup alpha = {}".format(self.alpha))
def state_dict(self):
# Checkpointing within warmup stage makes no sense, only save inner baseline
return self.baseline.state_dict()
def load_state_dict(self, state_dict):
# Checkpointing within warmup stage makes no sense, only load inner baseline
self.baseline.load_state_dict(state_dict)
class NoBaseline(Baseline):
def eval(self, x, c):
return 0, 0 # No baseline, no loss
class ExponentialBaseline(Baseline):
def __init__(self, beta):
super(Baseline, self).__init__()
self.beta = beta
self.v = None
def eval(self, x, c):
if self.v is None:
v = c.mean()
else:
v = self.beta * self.v + (1. - self.beta) * c.mean()
self.v = v.detach() # Detach since we never want to backprop
return self.v, 0 # No loss
def state_dict(self):
return {
'v': self.v
}
def load_state_dict(self, state_dict):
self.v = state_dict['v']
class CriticBaseline(Baseline):
def __init__(self, critic):
super(Baseline, self).__init__()
self.critic = critic
def eval(self, x, c):
v = self.critic(x)
# Detach v since actor should not backprop through baseline, only for loss
return v.detach(), F.mse_loss(v, c.detach())
def get_learnable_parameters(self):
return list(self.critic.parameters())
def epoch_callback(self, model, epoch):
pass
def state_dict(self):
return {
'critic': self.critic.state_dict()
}
def load_state_dict(self, state_dict):
critic_state_dict = state_dict.get('critic', {})
if not isinstance(critic_state_dict, dict): # backwards compatibility
critic_state_dict = critic_state_dict.state_dict()
self.critic.load_state_dict({**self.critic.state_dict(), **critic_state_dict})
class RolloutBaseline(Baseline):
def __init__(self, model, problem, opts, epoch=0):
super(Baseline, self).__init__()
self.problem = problem
self.opts = opts
self._update_model(model, epoch)
def _update_model(self, model, epoch, dataset=None):
self.model = copy.deepcopy(model)
# Always generate baseline dataset when updating model to prevent overfitting to the baseline dataset
if dataset is not None:
if len(dataset) != self.opts.val_size:
print("Warning: not using saved baseline dataset since val_size does not match")
dataset = None
elif (dataset[0] if self.problem.NAME == 'tsp' else dataset[0]['loc']).size(0) != self.opts.graph_size:
print("Warning: not using saved baseline dataset since graph_size does not match")
dataset = None
if dataset is None:
self.dataset = self.problem.make_dataset(
size=self.opts.graph_size, num_samples=self.opts.val_size, distribution=self.opts.data_distribution)
else:
self.dataset = dataset
print("Evaluating baseline model on evaluation dataset")
self.bl_vals = rollout(self.model, self.dataset, self.opts).cpu().numpy()
self.mean = self.bl_vals.mean()
self.epoch = epoch
def wrap_dataset(self, dataset):
print("Evaluating baseline on dataset...")
# Need to convert baseline to 2D to prevent converting to double, see
# https://discuss.pytorch.org/t/dataloader-gives-double-instead-of-float/717/3
return BaselineDataset(dataset, rollout(self.model, dataset, self.opts).view(-1, 1))
def unwrap_batch(self, batch):
return batch['data'], batch['baseline'].view(-1) # Flatten result to undo wrapping as 2D
def eval(self, x, c):
# Use volatile mode for efficient inference (single batch so we do not use rollout function)
with torch.no_grad():
v, _ = self.model(x)
# There is no loss
return v, 0
def epoch_callback(self, model, epoch):
"""
Challenges the current baseline with the model and replaces the baseline model if it is improved.
:param model: The model to challenge the baseline by
:param epoch: The current epoch
"""
print("Evaluating candidate model on evaluation dataset")
candidate_vals = rollout(model, self.dataset, self.opts).cpu().numpy()
candidate_mean = candidate_vals.mean()
print("Epoch {} candidate mean {}, baseline epoch {} mean {}, difference {}".format(
epoch, candidate_mean, self.epoch, self.mean, candidate_mean - self.mean))
if candidate_mean - self.mean < 0:
# Calc p value
t, p = ttest_rel(candidate_vals, self.bl_vals)
p_val = p / 2 # one-sided
assert t < 0, "T-statistic should be negative"
print("p-value: {}".format(p_val))
if p_val < self.opts.bl_alpha:
print('Update baseline')
self._update_model(model, epoch)
def state_dict(self):
return {
'model': self.model,
'dataset': self.dataset,
'epoch': self.epoch
}
def load_state_dict(self, state_dict):
# We make it such that it works whether model was saved as data parallel or not
load_model = copy.deepcopy(self.model)
get_inner_model(load_model).load_state_dict(get_inner_model(state_dict['model']).state_dict())
self._update_model(load_model, state_dict['epoch'], state_dict['dataset'])
class BaselineDataset(Dataset):
def __init__(self, dataset=None, baseline=None):
super(BaselineDataset, self).__init__()
self.dataset = dataset
self.baseline = baseline
assert (len(self.dataset) == len(self.baseline))
def __getitem__(self, item):
return {
'data': self.dataset[item],
'baseline': self.baseline[item]
}
def __len__(self):
return len(self.dataset)
| 8,213 | 32.120968 | 116 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.