python_code stringlengths 0 83.2k |
|---|
# first 3 functions taken from:
# http://www.johnvinyard.com/blog/?p=268
import numpy as np
from numpy.lib.stride_tricks import as_strided as ast
# from .arrays import normalizeMat
def norm_shape(shape):
'''
Normalize numpy array shapes so they're always expressed as a tuple,
even for one-dimensional shapes.
Parameters
shape - an int, or a tuple of ints
Returns
a shape tuple
'''
try:
i = int(shape)
return (i,)
except TypeError:
# shape was not a number
pass
try:
t = tuple(shape)
return t
except TypeError:
# shape was not iterable
pass
raise TypeError('shape must be an int, or a tuple of ints')
def sliding_window(a, ws, ss=None, flatten=True):
'''
Return a sliding window over a in any number of dimensions
Parameters:
a - an n-dimensional numpy array
ws - an int (a is 1D) or tuple (a is 2D or greater) representing the size
of each dimension of the window
ss - an int (a is 1D) or tuple (a is 2D or greater) representing the
amount to slide the window in each dimension. If not specified, it
defaults to ws.
flatten - if True, all slices are flattened, otherwise, there is an
extra dimension for each dimension of the input.
Returns
an array containing each n-dimensional window from a
'''
if None is ss:
# ss was not provided. the windows will not overlap in any direction.
ss = ws
ws = norm_shape(ws)
ss = norm_shape(ss)
# convert ws, ss, and a.shape to numpy arrays so that we can do math in every
# dimension at once.
ws = np.array(ws)
ss = np.array(ss)
shape = np.array(a.shape)
# ensure that ws, ss, and a.shape all have the same number of dimensions
ls = [len(shape), len(ws), len(ss)]
if 1 != len(set(ls)):
raise ValueError(
'a.shape, ws and ss must all have the same length. They were %s' % str(ls))
# ensure that ws is smaller than a in every dimension
if np.any(ws > shape):
raise ValueError(
'ws cannot be larger than a in any dimension.'
'a.shape was %s and ws was %s' % (str(a.shape), str(ws)))
# how many slices will there be in each dimension?
newshape = norm_shape(((shape - ws) // ss) + 1)
# the shape of the strided array will be the number of slices in each dimension
# plus the shape of the window (tuple addition)
newshape += norm_shape(ws)
# the strides tuple will be the array's strides multiplied by step size, plus
# the array's strides (tuple addition)
newstrides = norm_shape(np.array(a.strides) * ss) + a.strides
strided = ast(a, shape=newshape, strides=newstrides)
if not flatten:
return strided
# Collapse strided so that it has one more dimension than the window. I.e.,
# the new array is a flat list of slices.
meat = len(ws) if ws.shape else 0
firstdim = (np.product(newshape[:-meat]),) if ws.shape else ()
dim = firstdim + (newshape[-meat:])
return strided.reshape(dim)
def sliding_windows_of_elements(a, ss, ws=None, flatten=False):
return [sliding_window(row, ss, ws, flatten) for row in a]
def sliding_windows_of_rows(a, ss, ws=None, flatten=True):
windowsForRows = sliding_windows_of_elements(a, ss, ws, flatten)
return np.vstack(windowsForRows)
def _compute_from_seq(allSubseqs, n):
seqLens = np.array(map(lambda subseqs: subseqs.shape[0], allSubseqs))
startIdxs = np.r_[0, np.cumsum(seqLens)[:-1]]
endIdxs = np.r_[startIdxs[1:], n]
fromSeq = np.zeros(n)
for i in range(len(startIdxs)):
startIdx, endIdx = startIdxs[i], endIdxs[i]
fromSeq[startIdx:endIdx] = i
return fromSeq
# def flattened_subseqs_of_length(seqs, m, norm=None, return_from_seq=False):
# # TODO should have flags for returning X and allSubseqs, not just fromSeq
# # each element of seqs is assumed to be a 1D or 2D array
# origM = m
# step = 1
# origDims = len(seqs[0].shape)
# if origDims > 1:
# sampleDimensions = np.prod(seqs[0].shape[1:]) # num cols in mat
# m *= sampleDimensions # TODO don't enforce stepping in only one direction
# step *= sampleDimensions
# for i, seq in enumerate(seqs):
# seqs[i] = seq.flatten()
# allSubseqs = sliding_windows_of_elements(seqs, m, step)
# X = np.asarray(allSubseqs, dtype=np.float).reshape((-1, m)) # -1 = compute it
# Xnorm = normalizeMat(X, origM, how=norm)
# if not return_from_seq:
# return Xnorm, X, allSubseqs
# fromSeq = _compute_from_seq(allSubseqs, Xnorm.shape[0])
# return Xnorm, X, allSubseqs, fromSeq
# simple function for common case
def sliding_window_1D(x, windowLen, step=1):
return sliding_window(x, windowLen, step)
class InputTooSmallException(Exception):
pass
def extract_conv2d_windows(
X, filt_shape, strides=(1, 1), flatten_spatial_dims=False,
flatten_examples_dim=False, padding='valid'):
# TODO support NCHW format
orig_X_ndim = X.ndim
if X.ndim == 3:
X = X[np.newaxis, ...]
assert X.ndim == 4
assert len(filt_shape) == 2
assert len(strides) in (2, 4)
filt_shape = int(filt_shape[0]), int(filt_shape[1])
if filt_shape[0] > X.shape[1]: # TODO rm after debug
raise InputTooSmallException(
"filt_shape[0] ({}) > X.shape[1] ({})".format(
filt_shape[0], X.shape[1]))
if filt_shape[1] > X.shape[2]:
raise InputTooSmallException(
"filt_shape[1] ({}) > X.shape[2] ({})".format(
filt_shape[0], X.shape[2]))
padding = padding.lower()
assert padding in ('same', 'valid')
pad_nrows = filt_shape[0] - 1
pad_ncols = filt_shape[1] - 1
if padding == 'same' and (pad_nrows > 0 or pad_ncols > 0):
padded = np.zeros((X.shape[0], X.shape[1] + pad_nrows,
X.shape[1] + pad_ncols, X.shape[3]))
# NOTE: this should mirror the padding used by scipy and tensorflow;
# however, since their exact behavior is only vaguely documented, it
# may diverge from their behavior at any time. See the source code for
# scipy.signal.convolve2d or https://stackoverflow.com/a/38111069
row_start = int(pad_nrows) // 2
row_end = row_start + X.shape[1]
col_start = int(pad_ncols) // 2
col_end = col_start + X.shape[2]
# print("padding to shape:", padded.shape)
# print("padding: data row start, end:", row_start, row_end)
# print("padding: data col start, end:", col_start, col_end)
padded[:, row_start:row_end, col_start:col_end, :] = X
X = padded
filt_shape = (1, filt_shape[0], filt_shape[1], X.shape[3])
if len(strides) == 2:
strides = (1, strides[0], strides[1], X.shape[3])
windows = sliding_window(X, filt_shape, strides, flatten=False)
# strip out dims 3 and 4, since these are always 1; dim 3 is filter
# position across channels (only one position, since doing 2D conv),
# and dim 4 is all filter data across examples (not actually
# convolving across examples); e.g., for first 200 examples from
# MNIST with a 5x5 filter, goes from shape:
# (200, 24, 24, 1, 1, 5, 5, 1)
# to shape:
# (200, 24, 24, 5, 5, 1)
windows = windows.reshape(windows.shape[:3] + windows.shape[5:])
if flatten_spatial_dims:
# nexamples x npositions x filt_size
windows = windows.reshape(X.shape[0], -1, np.prod(filt_shape))
if flatten_examples_dim:
windows = windows.reshape(-1, *windows.shape[2:])
if orig_X_ndim == 3:
windows = windows.reshape(windows.shape[1:])
return windows
if __name__ == '__main__':
A = np.arange(24).reshape((6, 4))
print(A)
ws = 3
ss = 1
print(sliding_windows_of_rows(A, ws, ss))
|
#!#!/bin/env/python
from __future__ import print_function
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from .utils import kmeans
from joblib import Memory
_memory = Memory('.', verbose=0)
def _to_np(A):
return A.cpu().detach().numpy()
def _class_balanced_sampling(X, labels, k):
np.random.seed(123)
N, D = X.shape
# intialize centroids by sampling from each class in proportion to its
# relative frequency
uniq_lbls, counts = np.unique(labels, return_counts=True)
sort_idxs = np.argsort(counts)
uniq_lbls = uniq_lbls[sort_idxs]
counts = counts[sort_idxs]
remaining_counts = np.cumsum(counts[::-1])[::-1]
nremaining_samples = k
# C = np.empty((k, D), dtype=np.float32)
C = []
C_labels = []
# affinities = np.zeros((k, nclasses), dtype=np.float32)
for i, lbl in enumerate(uniq_lbls):
count = counts[i]
target_frac = count / remaining_counts[i]
target_nsamples = int(nremaining_samples * target_frac + .999)
target_nsamples = max(1, target_nsamples)
target_nsamples = min(target_nsamples, count)
nremaining_samples -= target_nsamples
lbl_idxs = np.where(labels == lbl)[0]
# print("lbl, count, num lbl idxs: ", lbl, count, len(lbl_idxs))
assert len(lbl_idxs) == count
use_idxs = np.random.choice(count, size=target_nsamples, replace=False)
keep_idxs = lbl_idxs[use_idxs]
C.append(X[keep_idxs])
C_labels.append(np.full(target_nsamples, lbl, dtype=np.int32))
# if len(C).shape[0] < k:
C = np.vstack(C).astype(np.float32)
# print("k, C shape", k, C.shape)
assert C.shape == (k, D)
C_labels = np.hstack(C_labels)
assert C_labels.shape == (k,)
return C, C_labels
def neighbor_compression(X, labels, k, niters=1000, rel_tol=.0001, verbose=1):
N, D = X.shape
# one-hot encode labels
nclasses = len(np.unique(labels))
# Y = np.zeros((N, nclasses), dtype=np.float32)
# for i in range(N):
# Y[i, labels[i]] = 1
# intialize centroids
# C, _ = kmeans(X, k)
C, C_labels = _class_balanced_sampling(X, labels, k)
# convert to torch tensors for optimization
# Y = torch.from_numpy(Y)
C = torch.tensor(C.T, requires_grad=True) # not from_numpy to allow grad
X = torch.from_numpy(X)
# having trained class affinities doesn't really seem to help
# Z = torch.randn(k, nclasses, requires_grad=True)
# print("uniq labels: ", np.unique(labels))
# print("uniq C_labels: ", np.unique(C_labels))
# one-hot encode labels
affinities = torch.zeros((k, nclasses),
dtype=torch.float32, requires_grad=True)
for kk in range(k):
affinities[kk, C_labels[kk]] = 1
Z = affinities.clone().detach().requires_grad_(True)
labels = torch.from_numpy(labels)
loss_fn = torch.nn.CrossEntropyLoss()
# opt = optim.SGD([C], lr=.1, momentum=.9)
# opt = optim.SGD([C, affinities], lr=.1, momentum=.9)
opt = optim.SGD([C, Z], lr=.1, momentum=.9)
# X_norms_sq = (X * X).sum(dim=1).view(-1, 1)
prev_loss = np.inf
for t in range(niters):
temperature = np.log2(t + 2) # +2 so that it starts at 1 at t=0
# # compute distances to all centroids
# # prods = torch.mm(X, C)
# prods = X @ C
# # norms_sq = torch.sqrt(torch.sum(C * C))
# # dists_sq = prods - norms_sq
# # C_norms_sq = torch.sqrt(torch.sum(C * C, dim=0))
# # C_norms_sq = torch.sum(C * C, dim=0)
# # dists_sq = -2 * prods
# # dists_sq += X_norms_sq
# # dists_sq += C_norms_sq
# # neg_dists_sq = -dists_sq
# neg_dists_sq = prods
# # # update soft labels for each centroid
# # similarities = F.softmax(neg_dists_sq, dim=0) # N x C; sim to each sample
# # class_affinities = similarities.transpose(0, 1) @ Y # C x nclasses
# # class_affinities = F.softmax(class_affinities * temperature, dim=1)
# # update class assignments for inputs
# # centroid_similarities = F.softmax(neg_dists_sq * temperature, dim=1) # N x C
# centroid_similarities = F.softmax(neg_dists_sq, dim=1) # N x C
# # centroid_similarities = torch.exp(neg_dists_sq / np.sqrt(D))
# # logits = centroid_similarities @ class_affinities
# logits = centroid_similarities @ Z
# way simpler version
similarities = F.softmax(X @ C, dim=1) # N x C
# logits = similarities @ Z
affinities = F.softmax(Z * temperature, dim=1)
# affinities = F.softmax(affinities * temperature, dim=1)
logits = similarities @ affinities
# update params and print how we're doing
loss = loss_fn(logits, labels)
loss.backward()
opt.step()
opt.zero_grad()
loss_pyfloat = loss.item()
change = prev_loss - loss_pyfloat
thresh = rel_tol * min(loss_pyfloat, prev_loss)
if np.abs(change) < thresh:
if verbose > 0:
_, labels_hat = torch.max(logits, dim=1)
acc = torch.mean((labels == labels_hat).type(torch.float))
print("converged after {} iters with acc {:.3f}, loss: {:.4f}"
"".format(t + 1, acc.item(), loss_pyfloat))
break # converged
prev_loss = loss_pyfloat
if (verbose > 1) and ((t + 1) % 10 == 0):
_, labels_hat = torch.max(logits, dim=1)
acc = torch.mean((labels == labels_hat).type(torch.float)).item()
print("acc: ", acc)
print("{:.3f}".format(loss.item())) # convert to python float
# return _to_np(C).T, _to_np(class_affinities)
centroid_labels = np.argmax(_to_np(Z), axis=1)
return _to_np(C).T, centroid_labels
# or at least, ProtoNN without the L0 constraints; also with simultaneous
# updates to all param tensors instead of alternating
# def protonn(X, labels, k, niters=10000, verbose=1, gamma=1):
def protonn(X, labels, k, d=-1, niters=1000, verbose=1, gamma=-1):
N, D = X.shape
if gamma < 1:
gamma = 1. / np.sqrt(D) # makes it struggle less / not make NaNs
# gamma = 1. / D
if d < 1:
d = D
labels = torch.from_numpy(labels)
# # one-hot encode labels
nclasses = len(np.unique(labels))
# Y = np.zeros((N, nclasses), dtype=np.float32)
# for i in range(N):
# Y[i, labels[i]] = 1
# intialize centroids
C, _ = kmeans(X, k)
W = np.random.randn(D, d).astype(np.float32)
# C = C @ W
# W = np.eye(D).astype(np.float32)[:, :d] # better than randn init
# convert to torch tensors for optimization
# Y = torch.from_numpy(Y)
C = torch.tensor(C.T, requires_grad=True) # not from_numpy to allow grad
X = torch.from_numpy(X)
W = torch.tensor(W, requires_grad=True) # not from_numpy to allow grad
# gamma = torch.tensor(np.array(gamma, dtype=np.float32), requires_grad=True)
# labels = torch.from_numpy(labels)
# print("W", W[:10])
# return None, None, None
Z = torch.randn(k, nclasses, requires_grad=True)
loss_fn = torch.nn.CrossEntropyLoss()
# opt = optim.SGD([C, Z], lr=.1, momentum=.9)
opt = optim.SGD([C, W, Z], lr=.1, momentum=.9)
# opt = optim.SGD([C, W, Z, gamma], lr=.1, momentum=.9)
nbatches = 1
batch_sz = int(np.ceil(N / nbatches))
# batch_sz = 1024
# nbatches = int(np.ceil(N / batch_sz))
# for t in range(1):
for t in range(niters):
perm = np.random.permutation(N)
for b in range(nbatches):
start_idx = b * batch_sz
end_idx = min(start_idx + batch_sz, N)
perm_idxs = perm[start_idx:end_idx]
X_batch = X[perm_idxs]
labels_batch = labels[perm_idxs]
# temperature = np.log2(t + 2) # +2 so that it starts at 1 at t=0
# compute distances to all centroids
# embeddings = X @ W
# embeddings = X_batch @ W
embeddings = X_batch
embed_norms_sq = (embeddings * embeddings).sum(dim=1, keepdim=True)
# prods = torch.mm(embeddings, C)
prods = embeddings @ C
C_norms_sq = torch.sum(C * C, dim=0)
dists_sq = -2 * prods
dists_sq += embed_norms_sq
dists_sq += C_norms_sq
neg_dists_sq = -dists_sq
# print("gamma: ", gamma)
# use_gamma = torch.clamp(gamma, max=1.)
# use_gamma = torch.clamp(gamma, 0, 1)
# use_gamma = F.sigmoid(gamma)
# gamma = torch.min((1, gamma))
# gamma = torch.max((0, gamma))
assert np.min(_to_np(dists_sq)) >= 0
assert np.max(_to_np(neg_dists_sq)) <= 0
similarities = torch.exp(gamma * neg_dists_sq) # N x C
# similarities = torch.exp(use_gamma * neg_dists_sq) # N x C
logits = similarities @ Z
# print("logits shape: ", logits.shape)
# print("logits shape: ", logits.shape)
# logits_np = _to_np(logits)
# print("dists_sq shape", dists_sq.shape)
# print("dists_sq", dists_sq[:10])
# print("C_norms_sq", C_norms_sq)
# print("embed_norms_sq", embed_norms_sq[:10])
# print("similarities", similarities[:10])
# print("logits", logits[:10])
# update soft labels for each centroid
# similarities = F.softmax(neg_dists_sq, dim=0) # N x C; sim to each sample
# class_affinities = similarities.transpose(0, 1) @ Y # C x nclasses
# class_affinities = F.softmax(class_affinities * temperature, dim=1)
# # update class assignments for inputs
# centroid_similarities = F.softmax(neg_dists_sq * temperature, dim=1) # N x C
# logits = centroid_similarities @ affinities
# update params and print how we're doing
# loss = loss_fn(logits, labels)
loss = loss_fn(logits, labels_batch)
# loss += .01 * (gamma * gamma).sum()
loss.backward()
opt.step()
opt.zero_grad()
# if (verbose > 0) and (t % 10 == 0):
# if (verbose > 0) and ((t + 1) % 10 == 0):
if (verbose > 0) and ((t + 1) % 10 == 0) and b == 0:
_, labels_hat = torch.max(logits, dim=1)
acc = torch.mean((labels[perm_idxs] == labels_hat).type(torch.float))
print("acc: ", acc)
print("{:.3f}".format(loss.item())) # convert to python float
# print("gamma: ", gamma.item())
return _to_np(C).T, _to_np(W), _to_np(Z)
@_memory.cache
def stochastic_neighbor_compression(X, labels, k, niters=1000,
gamma=-1, rel_tol=.0001, verbose=1):
N, D = X.shape
nclasses = len(np.unique(labels))
if gamma < 1:
gamma = 1
# gamma = 1. / np.sqrt(D) # makes it struggle less / not make NaNs
# gamma = 1. / D
# labels = torch.from_numpy(labels)
# C = np.random.randn(k, D).astype(np.float32)
C, C_labels = _class_balanced_sampling(X, labels, k)
# one-hot encode labels
affinities = torch.zeros((k, nclasses), dtype=torch.float32)
for kk in range(k):
affinities[kk, C_labels[kk]] = 1
# so that there's actual gradient flow
affinities += torch.randn(k, nclasses) * .1
# W = np.random.randn(D, D).astype(np.float32)
# C = C @ W
# W = np.eye(D).astype(np.float32) # better than randn init
# convert to torch tensors for optimization
# Y = torch.from_numpy(Y)
C = torch.tensor(C.T, requires_grad=True) # not from_numpy to allow grad
X = torch.from_numpy(X)
labels = torch.from_numpy(labels)
gamma = torch.tensor(np.array(gamma, dtype=np.float32))
# affinities = torch.from_numpy(affinities)
# print("labels shape: ", labels.shape)
# print("uniq labels: ", uniq_lbls)
# print("uniq label counts: ", counts)
# labels = labels.reshape(-1, 1)
# print("labels shape: ", labels.shape)
# W = torch.tensor(W, requires_grad=True) # not from_numpy to allow grad
# print("W", W[:10])
# return None, None, None
# Z = torch.randn(k, nclasses, requires_grad=True)
loss_fn = torch.nn.CrossEntropyLoss()
opt = optim.SGD([C], lr=.1, momentum=.9)
# opt = optim.SGD([C, Z], lr=.1, momentum=.9)
# opt = optim.SGD([C, gamma], lr=.1, momentum=.9)
nbatches = 1
batch_sz = int(np.ceil(N / nbatches))
# batch_sz = 1024
# nbatches = int(np.ceil(N / batch_sz))
# for t in range(50):
prev_loss = np.inf
converged = False
t = 0
while t < niters and not converged:
perm = np.random.permutation(N)
for b in range(nbatches):
if nbatches > 1:
start_idx = b * batch_sz
end_idx = min(start_idx + batch_sz, N)
perm_idxs = perm[start_idx:end_idx]
X_batch = X[perm_idxs]
labels_batch = labels[perm_idxs]
else:
X_batch = X
labels_batch = labels
# temperature = np.log2(t + 2) # +2 so that it starts at 1 at t=0
# compute distances to all centroids
# embeddings = X @ W
# embeddings = X_batch @ W
embeddings = X_batch
embed_norms_sq = (embeddings * embeddings).sum(dim=1, keepdim=True)
# prods = torch.mm(embeddings, C)
prods = embeddings @ C
C_norms_sq = torch.sum(C * C, dim=0)
dists_sq = -2 * prods
dists_sq += embed_norms_sq
dists_sq += C_norms_sq
neg_dists_sq = -dists_sq
# print("min dist sq: ", torch.min(dists_sq).item())
minval_dist_sq = torch.min(dists_sq).item()
if minval_dist_sq < -.01:
print("min dist sq: ", minval_dist_sq)
print("min C_norms_sq", torch.min(C_norms_sq).item())
print("min X_norms_sq", torch.min(embed_norms_sq).item())
print("dists_sq: ", dists_sq[:10])
assert minval_dist_sq >= -.01
# assert np.min(_to_np(dists_sq)) >= -1e-3
# assert np.max(_to_np(neg_dists_sq)) <= 1e-3
similarities = torch.exp(gamma * neg_dists_sq) # N x C
logits = similarities @ affinities
# logits = similarities @ Z
# print("logits shape: ", logits.shape)
# print("logits shape: ", logits.shape)
# print("dists_sq shape", dists_sq.shape)
# print("dists_sq", dists_sq[:10])
# print("C_norms_sq", C_norms_sq)
# print("embed_norms_sq", embed_norms_sq[:10])
# print("similarities", similarities[:10])
# print("logits", logits[:10])
# update params and print how we're doing
loss = loss_fn(logits, labels_batch)
# loss += gamma * gamma
loss.backward()
opt.step()
opt.zero_grad()
loss_pyfloat = loss.item()
change = prev_loss - loss_pyfloat
thresh = rel_tol * min(loss_pyfloat, prev_loss)
if np.abs(change) < thresh:
if verbose > 0:
_, labels_hat = torch.max(logits, dim=1)
labels_true = labels[perm_idxs] if nbatches > 1 else labels
acc = torch.mean(
(labels_true == labels_hat).type(torch.float))
print("converged after {} iters with acc {:.3f}, loss: {:.4f}" # noqa
"".format(t + 1, acc.item(), loss_pyfloat))
converged = True # converged
break
prev_loss = loss_pyfloat
# if (verbose > 0) and ((t + 1) % 10 == 0):
# if (verbose > 0) and ((t + 1) % 10 == 0) and b == 0:
if (verbose > 1) and (t % 10 == 0) and b == 0:
_, labels_hat = torch.max(logits, dim=1)
labels_true = labels[perm_idxs] if nbatches > 1 else labels
acc = torch.mean(
(labels_true == labels_hat).type(torch.float))
print("acc: {:.3f}".format(acc.item()))
print("{:.3f}".format(loss.item())) # convert to python float
# print("gamma: ", gamma.item())
t += 1
return _to_np(C).T, C_labels
def linear_regression_log_loss(
X, Y, lamda=1, max_niters=1000, rel_tol=.0001, verbose=2):
N, D = X.shape
N, M = Y.shape
X = X.astype(np.float32)
Y = Y.astype(np.float32)
# initialize W to OLS solution
XtX = X.T @ X
XtX += np.eye(D) * np.std(X)
XtY = X.T @ Y
W = np.linalg.solve(XtX, XtY).astype(np.float32)
# W += np.random.randn(*W.shape)
X = torch.from_numpy(X)
Y = torch.from_numpy(Y)
W = torch.tensor(W, requires_grad=True)
# W = torch.randn(D, M, requires_grad=True)
# W += torch.randn(D, M, requires_grad=False)
opt = optim.SGD([W], lr=.1, momentum=.9)
# now optimize using pytorch
prev_loss = np.inf
for t in range(max_niters):
Y_hat = X @ W
diffs = Y - Y_hat
# errs = torch.floor(torch.abs(diffs))
# loss = torch.abs(diffs) # TODO rm
# loss = diffs * diffs
loss = torch.log2(1 + torch.abs(diffs))
# loss = torch.log2(1e-10 + torch.abs(diffs))
# loss *= (loss > 0).type(torch.float32)
loss = torch.mean(loss)
# loss = torch.max(loss, 0)
loss.backward()
opt.step()
opt.zero_grad()
loss_pyfloat = loss.item()
change = prev_loss - loss_pyfloat
thresh = rel_tol * min(loss_pyfloat, prev_loss)
if np.abs(change) < thresh:
if verbose > 0:
print("converged after {} iters with loss: {:.4f}".format(
t + 1, loss_pyfloat))
break # converged
prev_loss = loss_pyfloat
if (verbose > 1) and ((t + 1) % 10 == 0):
print("loss: {:.4f}".format(loss_pyfloat))
return _to_np(W)
def main():
# N, D = 10000, 20
N, D = 1000, 20
# niters = 1000
niters = 10000
X = np.random.randn(N, D).astype(np.float32)
# ------------------------ linear regression with weird loss
# M = 20
# Y = np.random.randn(N, M).astype(np.float32)
# linear_regression_log_loss(X, Y)
# ------------------------ neighbor compression
K = 16
nclasses = 5
# labels = torch.randint(nclasses, size=(N,))
# labels = _to_np(torch.randint(nclasses, size=(N,)))
labels = np.random.randint(nclasses, size=(N,))
# C, W, Z = protonn(X, labels, K, niters=niters) # significantly worse
C, centroid_labels = stochastic_neighbor_compression(X, labels, K, niters=niters)
# C, centroid_labels = neighbor_compression(X, labels, K, niters=niters)
print("centroid_labels:", centroid_labels)
print("C type, shape", type(C), C.shape)
print("done")
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import os
import numpy as np
import pandas as pd
# TODO this file is hideous (but necessarily so for deadline purposes...)
#
# Also, this file is tightly coupled to figs.py; it basically has a func
# for each figure func that spits out data in exactly the required form
MCQ_RESULTS_DIR = '../results/timing/'
MATMUL_RESULTS_DIR = '../results/matmul/'
def get_mcq_path(D, nbytes):
fname = 'mcq_D={}_M={}.txt'.format(D, nbytes)
return os.path.join(MCQ_RESULTS_DIR, fname)
class McqResults(object):
def __init__(self, path=None, D=None, nbytes=None):
if path is None:
path = get_mcq_path(D=D, nbytes=nbytes)
self.path = path
with open(self.path, 'r') as f:
self.lines = f.readlines()
self.stats = {line.split(':')[0].strip(): line.split(':')[1].strip()
for line in self.lines if ':' in line}
self.bolt_nbytes = int(self.stats['bolt M'])
self.pq_nbytes = int(self.stats['pq M'])
self.bolt_D = int(self.stats['bolt subvect_len']) * self.bolt_nbytes * 2
self.pq_D = int(self.stats['pq subvect_len']) * self.pq_nbytes
assert self.bolt_nbytes == self.pq_nbytes
assert self.bolt_D == self.pq_D
self.nbytes = self.bolt_nbytes
self.D = self.bolt_D
# check that file was named properly
expected_path = get_mcq_path(D=self.D, nbytes=self.nbytes)
if expected_path != path:
print("expected path, path = ", expected_path, path)
assert expected_path == path
def __str__(self): # for debugging
s = ""
sorted_keys = sorted(self.stats.keys())
for k in sorted_keys:
v = self.stats[k]
s += "'{}': '{}'\n".format(k, v)
return s
def _extract_thruput(profile_str):
result_strs = profile_str.split(':')[-1]
rep_strs = result_strs.strip(' ,').split(',')
thruput_parens = [s.strip(' ').split(' ')[1] for s in rep_strs]
return np.array([int(s.strip('()s/')) for s in thruput_parens])
def _extract_times(profile_str):
result_strs = profile_str.split(':')[-1]
rep_strs = result_strs.strip(' ,').split(',')
time_strs = [s.strip(' ').split(' ')[0] for s in rep_strs]
return np.array([float(s) for s in time_strs])
def popcount_results_256():
LENGTH = 256
popcnt_times = {}
popcnt_times[8] = '2.456 (1302931596/s), 2.344 (1365187713/s), 2.125 (1505882352/s), 2.829 (1131141746/s), 2.148 (1489757914/s), 2.167 (1476695892/s), 2.327 (1375161151/s), 2.145 (1491841491/s), 2.12 (1509433962/s), 2.112 (1515151515/s)'
popcnt_times[16] = '4.368 (732600732/s), 4.121 (776510555/s), 3.926 (815078960/s), 4.105 (779537149/s), 4.176 (766283524/s), 4.119 (776887594/s), 4.464 (716845878/s), 4.153 (770527329/s), 4.364 (733272227/s), 4.198 (762267746/s)'
popcnt_times[32] = '7.612 (420388859/s), 7.347 (435551925/s), 7.694 (415908500/s), 9.122 (350800263/s), 7.343 (435789186/s), 9.344 (342465753/s), 8.148 (392734413/s), 9.046 (353747512/s), 8.455 (378474275/s), 7.685 (416395575/s)'
bolt_times = {}
bolt_times[8] = '0.461 (2169197396/s), 0.456 (2192982456/s), 0.539 (1855287569/s), 0.53 (1886792452/s), 0.456 (2192982456/s), 0.452 (2212389380/s), 0.442 (2262443438/s), 0.438 (2283105022/s), 0.434 (2304147465/s), 0.547 (1828153564/s)'
bolt_times[16] = '0.894 (1118568232/s), 1.08 (925925925/s), 0.88 (1136363636/s), 0.877 (1140250855/s), 0.881 (1135073779/s), 0.847 (1180637544/s), 1.011 (989119683/s), 0.866 (1154734411/s), 0.984 (1016260162/s), 0.838 (1193317422/s)'
bolt_times[32] = '2.047 (488519785/s), 1.726 (579374275/s), 1.924 (519750519/s), 2.085 (479616306/s), 2.076 (481695568/s), 1.748 (572082379/s), 1.757 (569151963/s), 2.064 (484496124/s), 1.742 (574052812/s), 1.725 (579710144/s)'
out_dicts = []
algos = ['Bolt', 'Binary Embedding']
dicts = [bolt_times, popcnt_times]
for algo, d in zip(algos, dicts):
for nbytes, s in list(d.items()):
thruputs = _extract_thruput(s)
out_dicts += [{'algo': algo, 'nbytes': nbytes, 'length': LENGTH,
'trial': i, 'y': t} for i, t in enumerate(thruputs)]
return pd.DataFrame.from_records(out_dicts)
def encode_results():
dicts = []
for D in [64, 128, 256, 512, 1024]:
for nbytes in [8, 16, 32]:
res = McqResults(D=D, nbytes=nbytes)
abbrevs = ['bolt', 'pq', 'opq']
names = ['Bolt', 'PQ', 'OPQ']
for abbrev, name in zip(abbrevs, names):
# results for encoding data
key = abbrev + ' encode (10x5)'
thruputs = _extract_thruput(res.stats[key])
dicts += [{'task': 'encode_x', 'D': D, 'nbytes': nbytes,
'algo': name, 'trial': i, 'y': t}
for i, t in enumerate(thruputs)]
# results for encoding query
if abbrev == 'bolt':
key = abbrev + ' encode lut (10x5)'
else:
key = abbrev + ' encode lut float dist (10x5)'
thruputs = _extract_thruput(res.stats[key])
dicts += [{'task': 'encode_q', 'D': D, 'nbytes': nbytes,
'algo': name, 'trial': i, 'y': t}
for i, t in enumerate(thruputs)]
return pd.DataFrame.from_records(dicts)
def matmul_results(which='square'):
if which == 'square':
SIZES = [64, 128, 256, 512, 1024, 4096, 8192]
data_fname = 'square_matmul_results.txt'
elif which == 'tall':
SIZES = [32, 64, 128, 256, 512, 1024]
data_fname = 'tall_matmul_results.txt'
with open(MATMUL_RESULTS_DIR + data_fname) as f:
lines = f.readlines()
stats = {line.split(':')[0].strip(): line.split(':')[1].strip()
for line in lines if ':' in line}
dicts = []
# add in results from bolt
for nbytes in [8, 16, 32]:
prefix = 'bolt<{}>'.format(nbytes)
algo = 'Bolt {}B'.format(nbytes)
for sz in SIZES:
for enc in (0, 1): # don't vs do encode X at start
key = '{} encode={} matmul {} (10x5)'.format(prefix, enc, sz)
times = _extract_times(stats[key])
dicts += [{'algo': algo, 'size': sz, 'enc': enc, 'nbytes': nbytes,
'trial': i, 'y': t}
for i, t in enumerate(times)]
# also add in "encode" version of bolt
if enc:
enc_algo_name = algo + ' + Encode'
dicts += [{'algo': enc_algo_name, 'size': sz, 'enc': enc,
'nbytes': nbytes, 'trial': i, 'y': t}
for i, t in enumerate(times)]
# add in matmul results
for sz in SIZES:
key = 'matmul {} (10x5)'.format(sz)
times = _extract_times(stats[key])
dicts += [{'algo': 'Floats', 'size': sz, 'enc': -1, 'trial': i, 'y': t}
for i, t in enumerate(times)]
return pd.DataFrame.from_records(dicts)
def encode_data_results_256():
LENGTH = 256
pq_times = {}
pq_times[8] = 'pq encode (10x5): 6.696 (149342/s), 6.688 (149521/s), 6.639 (150625/s), 6.648 (150421/s), 6.711 (149009/s), 6.67 (149925/s), 6.634 (150738/s), 6.684 (149611/s), 6.663 (150082/s), 6.67 (149925/s),'
pq_times[16] = 'pq encode (10x5): 7.181 (139256/s), 7.194 (139004/s), 7.179 (139295/s), 7.146 (139938/s), 7.123 (140390/s), 7.123 (140390/s), 7.162 (139625/s), 7.148 (139899/s), 7.116 (140528/s), 7.193 (139024/s),'
pq_times[32] = 'pq encode (10x5): 8.089 (123624/s), 8.175 (122324/s), 8.117 (123198/s), 8.096 (123517/s), 8.48 (117924/s), 8.071 (123900/s), 8.126 (123061/s), 8.123 (123107/s), 8.069 (123931/s), 8.21 (121802/s),'
opq_times = {}
opq_times[8] = 'opq encode (10x5): 8.441 (118469/s), 8.385 (119260/s), 8.368 (119502/s), 8.39 (119189/s), 8.355 (119688/s), 8.388 (119217/s), 8.383 (119289/s), 8.412 (118877/s), 8.401 (119033/s), 8.391 (119175/s),'
opq_times[16] = 'opq encode (10x5): 8.88 (112612/s), 8.786 (113817/s), 8.874 (112688/s), 8.834 (113199/s), 8.874 (112688/s), 8.902 (112334/s), 8.899 (112372/s), 8.925 (112044/s), 8.867 (112777/s), 8.907 (112271/s),'
opq_times[32] = 'opq encode (10x5): 9.761 (102448/s), 9.718 (102901/s), 9.717 (102912/s), 9.726 (102817/s), 9.908 (100928/s), 9.796 (102082/s), 10.164 (98386/s), 9.792 (102124/s), 9.735 (102722/s), 9.729 (102785/s),'
bolt_times = {}
bolt_times[8] = 'bolt encode (10x5): 3.43 (2915451/s), 3.586 (2788622/s), 3.421 (2923121/s), 3.408 (2934272/s), 3.409 (2933411/s), 3.406 (2935995/s), 3.407 (2935133/s), 3.412 (2930832/s), 3.411 (2931691/s), 3.409 (2933411/s),'
bolt_times[16] = 'bolt encode (10x5): 3.93 (2544529/s), 3.687 (2712232/s), 3.826 (2613695/s), 4.007 (2495632/s), 3.705 (2699055/s), 3.976 (2515090/s), 3.709 (2696144/s), 3.681 (2716653/s), 3.693 (2707825/s), 3.802 (2630194/s),'
bolt_times[32] = 'bolt encode (10x5): 5.039 (1984520/s), 4.591 (2178174/s), 5.081 (1968116/s), 4.697 (2129018/s), 4.591 (2178174/s), 4.763 (2099517/s), 4.832 (2069536/s), 4.805 (2081165/s), 4.961 (2015722/s), 4.665 (2143622/s),'
out_dicts = []
algos = ['Bolt', 'PQ', 'OPQ']
dicts = [bolt_times, pq_times, opq_times]
for algo, d in zip(algos, dicts):
for nbytes, s in list(d.items()):
thruputs = _extract_thruput(s)
out_dicts += [{'algo': algo, 'nbytes': nbytes, 'length': LENGTH,
'trial': i, 'y': t} for i, t in enumerate(thruputs)]
return pd.DataFrame.from_records(out_dicts)
def encode_lut_results():
pq_times = {}
pq_times[8] = 'pq encode lut float dist (10x5): 64.986 (153879/s), 65.014 (153813/s), 65.155 (153480/s), 64.808 (154301/s), 66.593 (150165/s), 67.68 (147754/s), 69.399 (144094/s), 66.702 (149920/s), 66.234 (150979/s), 66.286 (150861/s),'
pq_times[16] = 'pq encode lut float dist (10x5): 67.893 (147290/s), 67.484 (148183/s), 69.608 (143661/s), 68.083 (146879/s), 70.958 (140928/s), 69.423 (144044/s), 72.129 (138640/s), 74.984 (133361/s), 70.837 (141169/s), 74.967 (133392/s),'
pq_times[32] = 'pq encode lut float dist (10x5): 78.809 (126889/s), 79.34 (126039/s), 78.565 (127283/s), 79.171 (126308/s), 78.372 (127596/s), 78.689 (127082/s), 78.094 (128050/s), 80.031 (124951/s), 93.367 (107104/s), 81.896 (122106/s),'
opq_times = {}
opq_times[8] = 'opq encode lut float dist (10x5): 155.68 (64234/s), 159.49 (62698/s), 160.64 (62249/s), 158.21 (63205/s), 159.37 (62747/s), 159.29 (62778/s), 160.81 (62186/s), 158.5 (63090/s), 155.22 (64423/s), 158.98 (62901/s),'
opq_times[16] = 'opq encode lut float dist (10x5): 170.42 (58677/s), 168.41 (59380/s), 169.12 (59129/s), 171.53 (58298/s), 167.32 (59766/s), 168.96 (59185/s), 170.43 (58676/s), 170.7 (58581/s), 169.86 (58870/s), 160.43 (62333/s),'
opq_times[32] = 'opq encode lut float dist (10x5): 170.86 (58527/s), 175.79 (56885/s), 169.86 (58870/s), 180.3 (55464/s), 172.46 (57983/s), 171.66 (58254/s), 167.23 (59799/s), 168.19 (59457/s), 164.47 (60801/s), 168.31 (59413/s),'
bolt_times = {}
bolt_times[8] = 'bolt encode lut (10x5): 2.907 (3439972/s), 2.911 (3435245/s), 2.902 (3445899/s), 2.899 (3449465/s), 2.907 (3439972/s), 2.908 (3438789/s), 2.908 (3438789/s), 2.906 (3441156/s), 2.906 (3441156/s), 2.908 (3438789/s),'
bolt_times[16] = 'bolt encode lut (10x5): 2.957 (3381805/s), 2.953 (3386386/s), 2.957 (3381805/s), 2.943 (3397893/s), 2.949 (3390979/s), 2.95 (3389830/s), 2.946 (3394433/s), 3.103 (3222687/s), 2.944 (3396739/s), 3.029 (3301419/s),'
bolt_times[32] = 'bolt encode lut (10x5): 2.511 (3982477/s), 2.51 (3984063/s), 2.587 (3865481/s), 2.508 (3987240/s), 2.847 (3512469/s), 2.508 (3987240/s), 2.508 (3987240/s), 2.769 (3611412/s), 2.729 (3664345/s), 2.556 (3912363/s),'
out_dicts = []
algos = ['Bolt', 'PQ', 'OPQ']
dicts = [bolt_times, pq_times, opq_times]
for algo, d in zip(algos, dicts):
for nbytes, s in list(d.items()):
thruputs = _extract_thruput(s)
out_dicts += [{'algo': algo, 'nbytes': nbytes, 'y': t} for t in thruputs]
return pd.DataFrame.from_records(out_dicts)
def query_speed_results():
# NOTE: all thruputs in this function (except matmul ones) need be
# multiplied by 100,000 because we're reporting distances/sec, not time
# to query 100k points
bolt_times = {}
bolt_times[8] = '4.385 (22805/s), 4.385 (22805/s), 4.408 (22686/s), 4.385 (22805/s), 5.117 (19542/s), 4.378 (22841/s), 4.392 (22768/s), 4.393 (22763/s), 4.381 (22825/s), 4.383 (22815/s)'
bolt_times[16] = '8.268 (12094/s), 9.807 (10196/s), 8.389 (11920/s), 8.681 (11519/s), 8.711 (11479/s), 8.293 (12058/s), 9.797 (10207/s), 8.32 (12019/s), 9.767 (10238/s), 9.499 (10527/s)'
bolt_times[32] = '19.385 (5158/s), 17.215 (5808/s), 18.612 (5372/s), 18.117 (5519/s), 17.323 (5772/s), 18.436 (5424/s), 18.979 (5268/s), 16.274 (6144/s), 19.696 (5077/s), 17.026 (5873/s)'
popcnt_times = {}
popcnt_times[8] = '2.456 (1302931596/s), 2.344 (1365187713/s), 2.125 (1505882352/s), 2.829 (1131141746/s), 2.148 (1489757914/s), 2.167 (1476695892/s), 2.327 (1375161151/s), 2.145 (1491841491/s), 2.12 (1509433962/s), 2.112 (1515151515/s)'
popcnt_times[16] = '4.368 (732600732/s), 4.121 (776510555/s), 3.926 (815078960/s), 4.105 (779537149/s), 4.176 (766283524/s), 4.119 (776887594/s), 4.464 (716845878/s), 4.153 (770527329/s), 4.364 (733272227/s), 4.198 (762267746/s)'
popcnt_times[32] = '7.612 (420388859/s), 7.347 (435551925/s), 7.694 (415908500/s), 9.122 (350800263/s), 7.343 (435789186/s), 9.344 (342465753/s), 8.148 (392734413/s), 9.046 (353747512/s), 8.455 (378474275/s), 7.685 (416395575/s)'
pq_times = {}
pq_times[8] = '36.499 (2739/s), 35.729 (2798/s), 36.521 (2738/s), 37.924 (2636/s), 37.079 (2696/s), 36.444 (2743/s), 36.115 (2768/s), 36.955 (2705/s), 35.913 (2784/s), 40.354 (2478/s)'
pq_times[16] = '79.482 (1258/s), 82.546 (1211/s), 84.992 (1176/s), 84.996 (1176/s), 86.218 (1159/s), 84.495 (1183/s), 90.637 (1103/s), 82.164 (1217/s), 85.954 (1163/s), 82.255 (1215/s)'
pq_times[32] = '214.85 (465/s), 217.41 (459/s), 212.49 (470/s), 210.75 (474/s), 211.12 (473/s), 212.54 (470/s), 209.91 (476/s), 219.95 (454/s), 212.97 (469/s), 213.44 (468/s)'
opq_times = {}
opq_times[8] = '38.653 (2587/s), 36.958 (2705/s), 37.684 (2653/s), 35.902 (2785/s), 38.032 (2629/s), 39.511 (2530/s), 42.321 (2362/s), 38.94 (2568/s), 39.224 (2549/s), 39.06 (2560/s)'
opq_times[16] = '82.636 (1210/s), 82.401 (1213/s), 88.424 (1130/s), 86.649 (1154/s), 83.329 (1200/s), 82.719 (1208/s), 82.281 (1215/s), 80.581 (1240/s), 80.777 (1237/s), 81.107 (1232/s)'
opq_times[32] = '221.61 (451/s), 230.01 (434/s), 241.68 (413/s), 222.39 (449/s), 215.13 (464/s), 215.49 (464/s), 212.27 (471/s), 213.95 (467/s), 213.96 (467/s), 217.79 (459/s)'
# 1, 16 -> rowmajor times; 64, 256, 1024 -> colmajor times; (ie, use times from best layout)
matmul1_times = '12.063 (8289811/s), 11.231 (8903926/s), 10.283 (9724788/s), 10.864 (9204712/s), 10.492 (9531071/s), 10.877 (9193711/s), 10.79 (9267840/s), 10.85 (9216589/s), 11.041 (9057150/s), 10.647 (9392317/s)'
matmul16_times = '21.707 (73708941/s), 21.38 (74836295/s), 21.71 (73698756/s), 21.54 (74280408/s), 21.454 (74578167/s), 21.989 (72763654/s), 22.486 (71155385/s), 22.048 (72568940/s), 23.18 (69025021/s), 21.771 (73492260/s)'
matmul64_times = '56.496 (113282356/s), 55.488 (115340253/s), 54.853 (116675478/s), 56.689 (112896681/s), 56.482 (113310435/s), 55.644 (115016893/s), 54.623 (117166761/s), 55.773 (114750865/s), 54.726 (116946241/s), 54.918 (116537383/s)'
matmul256_times = '164.72 (155414306/s), 168.41 (152014488/s), 169.93 (150652927/s), 164.99 (155157157/s), 166.66 (153609831/s), 163.04 (157012830/s), 167.45 (152880544/s), 161.06 (158949936/s), 171.13 (149594750/s), 168.49 (151940505/s)'
matmul1024_times = '653.63 (156664035/s), 677.26 (151197248/s), 692.88 (147788938/s), 664.79 (154032909/s), 702.61 (145742096/s), 651.74 (157116904/s), 656.4 (156003388/s), 664.69 (154056314/s), 665.34 (153906736/s), 651.88 (157083643/s)'
out_dicts = []
algos = ['Bolt', 'PQ', 'OPQ', 'Binary Embedding']
dicts = [bolt_times, pq_times, opq_times, popcnt_times]
for algo, d in zip(algos, dicts):
for nbytes, s in list(d.items()):
thruputs = _extract_thruput(s) * 1e5
if algo == 'Binary Embedding':
thruputs /= 1e5 # these are already dists/sec, not qps
out_dicts += [{'algo': algo, 'nbytes': nbytes, 'y': t} for t in thruputs]
matmul_strs = [matmul1_times, matmul16_times, matmul64_times, matmul256_times, matmul1024_times]
batch_sizes = [1, 16, 64, 256, 1024]
nbytes_list = [8, 16, 32] # replicate results in each plot
for s, sz in zip(matmul_strs, batch_sizes):
algo = 'Matmul {}'.format(sz)
for nbytes in nbytes_list:
thruputs = _extract_thruput(s)
out_dicts += [{'algo': algo, 'nbytes': nbytes, 'y': t} for t in thruputs]
return pd.DataFrame.from_records(out_dicts)
def main():
pass
# print _extract_thruput('foo (10x5): 2.456 (1302931596/s), 2.344 (1365187713/s), 2.125 (1505882352/s), 2.829 (1131141746/s), 2.148 (1489757914/s), 2.167 (1476695892/s), 2.327 (1375161151/s), 2.145 (1491841491/s), 2.12 (1509433962/s), 2.112 (1515151515/s)')
# print McqResults('../results/tmp.txt')
# print McqResults('../results/mcq/mcq_D=256_M=8.txt')
# res = query_speed_results()
# print res.loc[res['algo'] == 'Matmul 1']
# print res.loc[res['algo'] == 'Matmul 256']
if __name__ == '__main__':
main()
|
#!/bin/env/python
import functools
import numpy as np
import pprint
import scipy
import time
from . import amm
from . import matmul_datasets as md
from . import pyience as pyn
from . import compress
from . import amm_methods as methods
from joblib import Memory
_memory = Memory('.', verbose=0)
# NUM_TRIALS = 1
NUM_TRIALS = 10
# @_memory.cache
def _estimator_for_method_id(method_id, **method_hparams):
return methods.METHOD_TO_ESTIMATOR[method_id](**method_hparams)
def _hparams_for_method(method_id):
if method_id in methods.SKETCH_METHODS:
# dvals = [2, 4, 6, 8, 12, 16, 24, 32, 48, 64] # d=1 undef on fd methods
# dvals = [1, 2, 4, 8, 16, 32, 64, 128]
dvals = [1, 2, 4, 8, 16, 32, 64]
# dvals = [1, 2, 4, 8, 16, 32]
# dvals = [1, 2, 4, 8]
# dvals = [32] # TODO rm after debug
# dvals = [16] # TODO rm after debug
# dvals = [8] # TODO rm after debug
# dvals = [4] # TODO rm after debug
# dvals = [3] # TODO rm after debug
# dvals = [2] # TODO rm after debug
# dvals = [1] # TODO rm after debug
if method_id == methods.METHOD_SPARSE_PCA:
# first one gets it to not return all zeros on caltech
alpha_vals = (1. / 16384, .03125, .0625, .125, .25, .5, 1, 2, 4, 8)
# alpha_vals = (.0625, .125, .25, .5, 1, 2, 4, 8)
# alpha_vals = (.0625, .125)
# alpha_vals = [.0625] # TODO rm
# alpha_vals = [.03125] # TODO rm
# alpha_vals = [1./1024] # TODO rm
# alpha_vals = [1./16384] # TODO rm
# alpha_vals = [0] # TODO rm
# alpha_vals = (2, 4, 5)
# alpha_vals = [.1]
# alpha_vals = [1.]
# alpha_vals = [10.]
# alpha_vals = [20.]
# alpha_vals = [50.]
return [{'d': d, 'alpha': alpha}
for d in dvals for alpha in alpha_vals]
return [{'d': dval} for dval in dvals]
if method_id in methods.VQ_METHODS:
# mvals = [1, 2, 4, 8, 16, 32, 64]
mvals = [2, 4, 8, 16, 32, 64]
# mvals = [64]
# mvals = [1, 2, 4, 8, 16]
# mvals = [1, 2, 4, 8]
# mvals = [8, 16] # TODO rm after debug
# mvals = [8, 16, 64] # TODO rm after debug
# mvals = [128] # TODO rm after debug
# mvals = [64] # TODO rm after debug
# mvals = [32] # TODO rm after debug
# mvals = [16] # TODO rm after debug
# mvals = [8] # TODO rm after debug
# mvals = [4] # TODO rm after debug
# mvals = [1] # TODO rm after debug
if method_id == methods.METHOD_MITHRAL:
lut_work_consts = (2, 4, -1)
# lut_work_consts = [-1] # TODO rm
params = []
for m in mvals:
for const in lut_work_consts:
params.append({'ncodebooks': m, 'lut_work_const': const})
return params
return [{'ncodebooks': m} for m in mvals]
if method_id in [methods.METHOD_EXACT, methods.METHOD_SCALAR_QUANTIZE]:
return [{}]
raise ValueError(f"Unrecognized method: '{method_id}'")
def _ntrials_for_method(method_id, ntasks):
# return 1 # TODO rm
if ntasks > 1: # no need to avg over trials if avging over multiple tasks
return 1
# return NUM_TRIALS if method_id in methods.NONDETERMINISTIC_METHODS else 1
return NUM_TRIALS if method_id in methods.RANDOM_SKETCHING_METHODS else 1
# ================================================================ metrics
def _compute_compression_metrics(ar):
# if quantize_to_type is not None:
# ar = ar.astype(quantize_to_type)
# ar -= np.min(ar)
# ar /= (np.max(ar) / 65535) # 16 bits
# ar -= 32768 # center at 0
# ar = ar.astype(np.int16)
# elem_sz = ar.dtype.itemsize
# return {'nbytes_raw': ar.nbytes,
# 'nbytes_blosc_noshuf': len(_blosc_compress(
# ar, elem_sz=elem_sz, shuffle=blosc.NOSHUFFLE)),
# 'nbytes_blosc_byteshuf': len(_blosc_compress(
# ar, elem_sz=elem_sz, shuffle=blosc.SHUFFLE)),
# 'nbytes_blosc_bitshuf': len(_blosc_compress(
# ar, elem_sz=elem_sz, shuffle=blosc.BITSHUFFLE)),
# 'nbytes_zstd': len(_zstd_compress(ar)),
# 'nbits_cost': nbits_cost(ar).sum() // 8,
# 'nbits_cost_zigzag':
# nbits_cost(zigzag_encode(ar), signed=False).sum() // 8,
# 'nbytes_sprintz': compress.sprintz_packed_size(ar)
# }
return {'nbytes_raw': ar.nbytes,
'nbytes_sprintz': compress.sprintz_packed_size(ar)}
def _cossim(Y, Y_hat):
ynorm = np.linalg.norm(Y) + 1e-20
yhat_norm = np.linalg.norm(Y_hat) + 1e-20
return ((Y / ynorm) * (Y_hat / yhat_norm)).sum()
def _compute_metrics(task, Y_hat, compression_metrics=True, **sink):
Y = task.Y_test
diffs = Y - Y_hat
raw_mse = np.mean(diffs * diffs)
normalized_mse = raw_mse / np.var(Y)
# Y_meannorm = Y - Y.mean()
# Y_hat_meannorm = Y_hat - Y_hat.mean()
# ynorm = np.linalg.norm(Y_meannorm) + 1e-20
# yhat_norm = np.linalg.norm(Y_hat_meannorm) + 1e-20
# r = ((Y_meannorm / ynorm) * (Y_hat_meannorm / yhat_norm)).sum()
metrics = {'raw_mse': raw_mse, 'normalized_mse': normalized_mse,
'corr': _cossim(Y - Y.mean(), Y_hat - Y_hat.mean()),
'cossim': _cossim(Y, Y_hat), # 'bias': diffs.mean(),
'y_mean': Y.mean(), 'y_std': Y.std(),
'yhat_std': Y_hat.std(), 'yhat_mean': Y_hat.mean()}
if compression_metrics:
# Y_q = compress.quantize(Y, nbits=8)
# Y_hat_q = compress.quantize(Y_hat, nbits=8)
# diffs_q = Y_q - Y_hat_q
# # diffs_q = compress.zigzag_encode(diffs_q).astype(np.uint8)
# assert Y_q.dtype == np.int8
# assert diffs_q.dtype == np.int8
Y_q = compress.quantize(Y, nbits=12)
Y_hat_q = compress.quantize(Y_hat, nbits=12)
diffs_q = Y_q - Y_hat_q
assert Y_q.dtype == np.int16
assert diffs_q.dtype == np.int16
# Y_q = quantize_i16(Y)
# # quantize to 16 bits
# Y = Y - np.min(Y)
# Y /= (np.max(Y) / 65535) # 16 bits
# Y -= 32768 # center at 0
# Y = Y.astype(np.int16)
# diffs =
metrics_raw = _compute_compression_metrics(Y_q)
metrics.update({k + '_orig': v for k, v in metrics_raw.items()})
metrics_raw = _compute_compression_metrics(diffs_q)
metrics.update({k + '_diffs': v for k, v in metrics_raw.items()})
if task.info:
problem = task.info['problem']
metrics['problem'] = problem
if problem == 'softmax':
lbls = task.info['lbls_test'].astype(np.int32)
b = task.info['biases']
logits_amm = Y_hat + b
logits_orig = Y + b
lbls_amm = np.argmax(logits_amm, axis=1).astype(np.int32)
lbls_orig = np.argmax(logits_orig, axis=1).astype(np.int32)
# print("Y_hat shape : ", Y_hat.shape)
# print("lbls hat shape: ", lbls_amm.shape)
# print("lbls amm : ", lbls_amm[:20])
metrics['acc_amm'] = np.mean(lbls_amm == lbls)
metrics['acc_orig'] = np.mean(lbls_orig == lbls)
elif problem in ('1nn', 'rbf'):
lbls = task.info['lbls_test'].astype(np.int32)
lbls_centroids = task.info['lbls_centroids']
lbls_hat_1nn = []
rbf_lbls_hat = []
W = task.W_test
centroid_norms_sq = (W * W).sum(axis=0)
sample_norms_sq = (task.X_test * task.X_test).sum(
axis=1, keepdims=True)
k = W.shape[1]
nclasses = np.max(lbls_centroids) + 1
affinities = np.zeros((k, nclasses), dtype=np.float32)
for kk in range(k):
affinities[kk, lbls_centroids[kk]] = 1
for prods in [Y_hat, Y]:
dists_sq_hat = (-2 * prods) + centroid_norms_sq + sample_norms_sq
# 1nn classification
centroid_idx = np.argmin(dists_sq_hat, axis=1)
lbls_hat_1nn.append(lbls_centroids[centroid_idx])
# rbf kernel classification (bandwidth=1)
# gamma = 1. / np.sqrt(W.shape[0])
# gamma = 1. / W.shape[0]
gamma = 1
similarities = scipy.special.softmax(-dists_sq_hat * gamma, axis=1)
class_probs = similarities @ affinities
rbf_lbls_hat.append(np.argmax(class_probs, axis=1))
lbls_amm_1nn, lbls_orig_1nn = lbls_hat_1nn
rbf_lbls_amm, rbf_lbls_orig = rbf_lbls_hat
metrics['acc_amm_1nn'] = np.mean(lbls_amm_1nn == lbls)
metrics['acc_orig_1nn'] = np.mean(lbls_orig_1nn == lbls)
metrics['acc_amm_rbf'] = np.mean(rbf_lbls_amm == lbls)
metrics['acc_orig_rbf'] = np.mean(rbf_lbls_orig == lbls)
if problem == '1nn':
lbls_amm, lbls_orig = rbf_lbls_amm, rbf_lbls_orig
elif problem == 'rbf':
lbls_amm, lbls_orig = rbf_lbls_amm, rbf_lbls_orig
orig_acc_key = 'acc-1nn-raw'
if orig_acc_key in task.info:
metrics[orig_acc_key] = task.info[orig_acc_key]
metrics['acc_amm'] = np.mean(lbls_amm == lbls)
metrics['acc_orig'] = np.mean(lbls_orig == lbls)
elif problem == 'sobel':
assert Y.shape[1] == 2
grad_mags_true = np.sqrt((Y * Y).sum(axis=1))
grad_mags_hat = np.sqrt((Y_hat * Y_hat).sum(axis=1))
diffs = grad_mags_true - grad_mags_hat
metrics['grad_mags_nmse'] = (
(diffs * diffs).mean() / grad_mags_true.var())
elif problem.lower().startswith('dog'):
# difference of gaussians
assert Y.shape[1] == 2
Z = Y[:, 0] - Y[:, 1]
Z_hat = Y_hat[:, 0] - Y_hat[:, 1]
diffs = Z - Z_hat
metrics['dog_nmse'] = (diffs * diffs).mean() / Z.var()
return metrics
# ================================================================ driver funcs
def _eval_amm(task, est, fixedB=True, **metrics_kwargs):
est.reset_for_new_task()
if fixedB:
est.set_B(task.W_test)
# print("eval_amm validating task: ", task.name)
# task.validate(train=False, test=True)
# print(f"task {task.name} matrix hashes:")
# pprint.pprint(task._hashes())
# print("task: ", task.name)
# print("X_test shape: ", task.X_test.shape)
# print("W_test shape: ", task.W_test.shape)
t = time.perf_counter()
# Y_hat = est.predict(task.X_test.copy(), task.W_test.copy())
Y_hat = est.predict(task.X_test, task.W_test)
# Y_hat = task.X_test @ task.W_test # yep, zero error
duration_secs = time.perf_counter() - t
metrics = _compute_metrics(task, Y_hat, **metrics_kwargs)
metrics['secs'] = duration_secs
# metrics['nmultiplies'] = est.get_nmuls(task.X_test, task.W_test)
metrics.update(est.get_speed_metrics(
task.X_test, task.W_test, fixedB=fixedB))
# print("eval_amm re-validating task: ", task.name)
# task.validate(train=False, test=True)
# print(f"task {task.name} matrix hashes:")
# pprint.pprint(task.hashes())
return metrics
def _get_all_independent_vars():
independent_vars = set(['task_id', 'method', 'trial'])
for method_id in methods.ALL_METHODS:
hparams = _hparams_for_method(method_id)[0]
est = _estimator_for_method_id(method_id, **hparams)
independent_vars = (independent_vars |
set(est.get_params().keys()))
return independent_vars
# @functools.lru_cache(maxsize=None)
# @_memory.cache
def _fitted_est_for_hparams(method_id, hparams_dict, X_train, W_train,
Y_train, **kwargs):
est = _estimator_for_method_id(method_id, **hparams_dict)
est.fit(X_train, W_train, Y=Y_train, **kwargs)
return est
# def _main(tasks, methods=['SVD'], saveas=None, ntasks=None,
def _main(tasks_func, methods=None, saveas=None, ntasks=None,
verbose=1, limit_ntasks=-1, compression_metrics=False, # TODO uncomment below
# verbose=3, limit_ntasks=-1, compression_metrics=False,
tasks_all_same_shape=False):
methods = methods.DEFAULT_METHODS if methods is None else methods
if isinstance(methods, str):
methods = [methods]
if limit_ntasks is None or limit_ntasks < 1:
limit_ntasks = np.inf
independent_vars = _get_all_independent_vars()
for method_id in methods:
if verbose > 0:
print("running method: ", method_id)
ntrials = _ntrials_for_method(method_id=method_id, ntasks=ntasks)
# for hparams_dict in _hparams_for_method(method_id)[2:]: # TODO rm
for hparams_dict in _hparams_for_method(method_id):
if verbose > 3:
print("got hparams: ")
pprint.pprint(hparams_dict)
metrics_dicts = []
try:
prev_X_shape, prev_Y_shape = None, None
prev_X_std, prev_Y_std = None, None
est = None
for i, task in enumerate(tasks_func()):
if i + 1 > limit_ntasks:
raise StopIteration()
if verbose > 1:
print("-------- running task: {} ({}/{})".format(
task.name, i + 1, ntasks))
task.validate_shapes() # fail fast if task is ill-formed
can_reuse_est = (
(i != 0) and (est is not None)
and (prev_X_shape is not None)
and (prev_Y_shape is not None)
and (prev_X_std is not None)
and (prev_Y_std is not None)
and (task.X_train.shape == prev_X_shape)
and (task.Y_train.shape == prev_Y_shape)
and (task.X_train.std() == prev_X_std)
and (task.Y_train.std() == prev_Y_std))
if not can_reuse_est:
try:
est = _fitted_est_for_hparams(
method_id, hparams_dict,
task.X_train, task.W_train, task.Y_train)
except amm.InvalidParametersException as e:
# hparams don't make sense for task (eg, D < d)
if verbose > 2:
print(f"hparams apparently invalid: {e}")
est = None
if tasks_all_same_shape:
raise StopIteration()
else:
continue
prev_X_shape = task.X_train.shape
prev_Y_shape = task.Y_train.shape
prev_X_std = task.X_train.std()
prev_Y_std = task.Y_train.std()
try:
# print(f"task {task.name} matrix hashes:")
# pprint.pprint(task.hashes())
for trial in range(ntrials):
metrics = _eval_amm(
task, est, compression_metrics=compression_metrics)
metrics['N'] = task.X_test.shape[0]
metrics['D'] = task.X_test.shape[1]
metrics['M'] = task.W_test.shape[1]
metrics['trial'] = trial
metrics['method'] = method_id
metrics['task_id'] = task.name
# metrics.update(hparams_dict)
metrics.update(est.get_params())
print("got metrics: ")
pprint.pprint(metrics)
# pprint.pprint({k: metrics[k] for k in 'method task_id normalized_mse'.split()})
# print("{:.5f}".format(metrics['normalized_mse'])) # TODO uncomment above
metrics_dicts.append(metrics)
except amm.InvalidParametersException as e:
if verbose > 2:
print(f"hparams apparently invalid: {e}")
if tasks_all_same_shape:
raise StopIteration()
else:
continue
except StopIteration: # no more tasks for these hparams
pass
if len(metrics_dicts):
pyn.save_dicts_as_data_frame(
metrics_dicts, save_dir='results/amm', name=saveas,
dedup_cols=independent_vars)
# def main_ecg(methods=None, saveas='ecg', limit_nhours=1):
# tasks = md.load_ecg_tasks(limit_nhours=limit_nhours)
# return _main(tasks=tasks, methods=methods, saveas=saveas, ntasks=139,
# # limit_ntasks=10, compression_metrics=False)
# limit_ntasks=5, compression_metrics=True)
def main_caltech(methods=methods.USE_METHODS, saveas='caltech',
limit_ntasks=-1, limit_ntrain=-1, filt='sobel'):
# tasks = md.load_caltech_tasks()
# tasks = md.load_caltech_tasks(limit_ntrain=100e3, limit_ntest=10e3) # TODO rm after debug
# tasks = md.load_caltech_tasks(limit_ntrain=-1, limit_ntest=10e3) # TODO rm after debug
# tasks = md.load_caltech_tasks(limit_ntrain=100e3)
# tasks = md.load_caltech_tasks(limit_ntrain=500e3)
# tasks = md.load_caltech_tasks(limit_ntrain=1e6) # does great
# tasks = md.load_caltech_tasks(limit_ntrain=15e5)
# tasks = md.load_caltech_tasks(limit_ntrain=17.5e5) # bad
# tasks = md.load_caltech_tasks(limit_ntrain=2e6)
# tasks = md.load_caltech_tasks(limit_ntrain=2.5e6)
# return _main(tasks=tasks, methods=methods, saveas=saveas,
# limit_ntasks = -1
# limit_ntasks = 10
# filt = 'sharpen5x5'
# filt = 'gauss5x5'
# filt = 'sobel'
saveas = '{}_{}'.format(saveas, filt)
# saveas = '{}_{}'.format(saveas, filt)
# limit_ntrain = -1
# limit_ntrain = 500e3
task_func = functools.partial(
md.load_caltech_tasks, filt=filt, limit_ntrain=limit_ntrain)
return _main(tasks_func=task_func, methods=methods,
saveas=saveas, ntasks=510, limit_ntasks=limit_ntasks,
tasks_all_same_shape=True)
def main_ucr(methods=methods.USE_METHODS, saveas='ucr',
k=128, limit_ntasks=None, problem='rbf'):
# limit_ntasks = 10
# limit_ntasks = 13
# tasks = md.load_ucr_tasks(limit_ntasks=limit_ntasks)
# k = 128
tasks_func = functools.partial(
md.load_ucr_tasks, limit_ntasks=limit_ntasks, k=k, problem=problem)
saveas = '{}_k={}_problem={}'.format(saveas, k, problem)
return _main(tasks_func=tasks_func, methods=methods, saveas=saveas,
ntasks=76, limit_ntasks=limit_ntasks,
tasks_all_same_shape=False)
def main_cifar10(methods=methods.USE_METHODS, saveas='cifar10'):
# tasks = md.load_cifar10_tasks()
return _main(tasks_func=md.load_cifar10_tasks, methods=methods,
saveas=saveas, ntasks=1)
def main_cifar100(methods=methods.USE_METHODS, saveas='cifar100'):
# tasks = md.load_cifar100_tasks()
return _main(tasks_func=md.load_cifar100_tasks, methods=methods,
saveas=saveas, ntasks=1)
def main_all(methods=methods.USE_METHODS):
main_cifar10(methods=methods)
main_cifar100(methods=methods)
# main_ecg(methods=methods)
main_caltech(methods=methods)
def main():
# main_cifar10(methods='ScalarQuantize')
# main_cifar100(methods='ScalarQuantize')
# main_ucr(methods='ScalarQuantize')
main_caltech(methods='ScalarQuantize', filt='sobel')
main_caltech(methods='ScalarQuantize', filt='dog5x5')
# main_cifar10(methods='MithralPQ')
# main_cifar100(methods='Mithral')
# main_caltech(methods='Hadamard')
# main_cifar10(methods='MithralPQ')
# main_cifar100(methods='MithralPQ')
# main_ucr(methods='MithralPQ', k=64, limit_ntasks=5, problem='rbf')
# main_ucr(methods='Bolt', k=64, limit_ntasks=5, problem='softmax')
# rerun mithral stuff with fixed numerical issues
# main_cifar10(methods=['Mithral', 'MithralPQ'])
# main_cifar100(methods=['Mithral', 'MithralPQ'])
# main_ucr(methods=['Mithral', 'MithralPQ'], k=128, problem='rbf')
# main_caltech(methods=['Mithral', 'MithralPQ'], filt='sobel')
# main_caltech(methods=['Mithral', 'MithralPQ'], filt='dog5x5')
# #
# # TODO ideally run this too to put in appendix
# #
# use_methods = list(methods.USE_METHODS)
# use_methods.remove(methods.METHOD_SPARSE_PCA)
# main_ucr(methods=use_methods, k=128, problem='softmax')
# main_caltech('Mithral', filt='sobel', limit_ntrain=1e6, limit_ntasks=10)
# lim = 500e3
# lim = 2e6
# lim = -1
# lim = 4e6
# lim = 5e6
# main_caltech('Mithral', filt='sobel', limit_ntrain=lim, limit_ntasks=10)
# main_caltech('MithralPQ', filt='sobel', limit_ntrain=lim, limit_ntasks=10)
# main_caltech('Mithral', filt='dog5x5', limit_ntrain=lim, limit_ntasks=10)
# main_caltech('MithralPQ', filt='dog5x5', limit_ntrain=lim, limit_ntasks=10)
# main_caltech('OldMithralPQ', filt='sobel', limit_ntrain=lim, limit_ntasks=10)
# main_ucr(methods='MithralPQ', limit_ntasks=5)
# main_caltech(methods='Bolt', limit_ntasks=10, limit_ntrain=500e3, filt='dog5x5')
# main_caltech(methods='Bolt', limit_ntasks=10, limit_ntrain=500e3, filt='sobel')
# main_caltech(methods='SparsePCA')
if __name__ == '__main__':
np.set_printoptions(formatter={'float': lambda f: "{:.2f}".format(f)},
linewidth=100)
main()
|
#!/bin/env/python
import abc
import numpy as np
# from sklearn.decomposition import PCA, SparsePCA
from sklearn import decomposition
from sklearn.decomposition import PCA, SparsePCA, MiniBatchSparsePCA
from sklearn.utils.extmath import randomized_svd
import numba # conda install numba
# import ffht # https://github.com/FALCONN-LIB/FFHT; python setup.py install
import scipy
from joblib import Memory
_memory = Memory('.', verbose=1, compress=9)
KEY_NMULTIPLIES = 'muls'
OSNAP_DEFAULT_S = 4
# OSNAP_DEFAULT_S = 2
# ================================================================ utils
def _nmultiplies_matmul(A, B):
return A.shape[0] * A.shape[1] * B.shape[1]
def _nmultiplies_matmul_with_sizes(N, D, M):
return N * D * M
def _nmultiplies_svd(N, D):
return min(N * N * D, N * D * D)
def _nmultiplies_qr(N, D):
return min(N * N * D, N * D * D)
# ================================================================ types
class InvalidParametersException(Exception):
pass
class ApproxMatmul(abc.ABC):
def __init__(*args_unused, **kwargs_unused):
pass
def fit(self, A, B, Y=None): # Y = A @ B if not specified
pass
def set_A(self, A):
pass
def set_B(self, B):
pass
def reset_for_new_task(self):
pass
@abc.abstractmethod
def __call__(self, A, B):
pass
def predict(self, A, B):
return self(A, B)
def get_params(self):
return {}
# def get_nmuls(self, A, B, fixedA=False, fixedB=False):
@abc.abstractmethod
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
pass
class ExactMatMul(ApproxMatmul):
def __call__(self, A, B):
return A @ B
def get_speed_metrics(self, A, B, **sink):
return {KEY_NMULTIPLIES: _nmultiplies_matmul(A, B)}
def _scalar_quantize(A, axis=1, signed=False, nbits=8):
unsigned_maxval = float(1 << int(nbits)) - 1
# # TODO rm
# # return np.zeros((A.shape[0], 1)), np.ones((A.shape[0], 1)), A
# # offsets = np.zeros((A.shape[0], 1))
# offsets = A.min(axis=1, keepdims=True)
# # scales = maxval / np.ones((A.shape[0], 1))
# scales = maxval / A.max(axis=1, keepdims=True)
# Aq = (A - offsets) * scales
# return offsets, scales, Aq
# maxval = float(1 << int(nbits)) - 1
mins = A.min(axis=axis, keepdims=True)
# A_offset = A - offsets
ranges = (A - mins).max(axis=axis, keepdims=True) + 1e-20
scales = unsigned_maxval / ranges
# Aq = (A_offset * (maxval / scales)).astype(np.int)
# Aq = (A_offset * scales).astype(np.int)
if signed:
# sign_offset = 1 << (nbits - 1) # 8 bits -> 128
# A_offset -= sign_offset
offsets = mins + (ranges * (128. / 255))
minval = -(1 << (nbits - 1))
maxval = -minval - 1
else:
offsets = mins
minval = 0
maxval = (1 << nbits) - 1
Aq = (A - offsets) * scales
# print("min, max A:", Aq.min(), Aq.max()) # looks good
Aq = np.clip(Aq, minval, maxval).astype(np.int)
return offsets, scales, Aq
class QuantizedMatmul(ApproxMatmul):
__slots__ = 'nbits a_offsets a_scales b_offsets b_scales A B'.split()
def __init__(self, nbits=8):
self.nbits = nbits
def __call__(self, A, B):
assert A.shape[1] == B.shape[0] # dims need to match
N, D = A.shape
D, M = B.shape
if self.A is None:
self.set_A(A)
if self.B is None:
self.set_B(B)
# print("QuantizedMatmul")
# print("min, max A:", self.A.min(), self.A.max())
# print("min, max A offsets:", self.a_offsets.min(), self.a_offsets.max())
# print("min, max A scales :", self.a_scales.min(), self.a_scales.max())
# print("min, max B:", self.B.min(), self.B.max())
# print("min, max B offsets:", self.b_offsets.min(), self.b_offsets.max())
# print("min, max B scales :", self.b_scales.min(), self.b_scales.max())
# ((A - a_offsets) / a_scales) @ ((B - b_offsets) / b_scales) # noqa
# ignoring scales, we have:
# (A - a_off) @ (B - b_off)
# = A @ B - (a_off @ B) - (A @ b_off) + a_off @ b_off
# maxval = (1 << int(self.nbits)) - 1
ret = (self.A @ self.B).astype(np.float32)
ret *= 1. / self.a_scales
ret *= 1. / self.b_scales
A_off = np.tile(self.a_offsets, (1, D))
B_off = np.tile(self.b_offsets, (D, 1))
return ret + (A_off @ B) + (A @ B_off) - (A_off @ B_off)
def set_A(self, A):
# unsigned quantization; we *could* learn the offsets and scales
# on the training set, but since this is a baseline, we're giving it
# the advantage of using the "true" offsets/scales
self.a_offsets, self.a_scales, self.A = _scalar_quantize(
A, axis=1, signed=False, nbits=self.nbits)
# mins = A.min(axis=1, keepdims=True)
# A_offset = A - mins
# scales = A_offset.max(axis=1, keepdims=True) + 1e-20
# self.A = (A_offset * (255. / scales)).astype(np.int)
def set_B(self, B):
# signed quantization (for maddubs instruction)
self.b_offsets, self.b_scales, self.B = _scalar_quantize(
B, axis=0, signed=True, nbits=self.nbits)
# self.b_offsets, self.b_scales, self.B = _scalar_quantize(
# B.T, nbits=self.nbits, signed=True)
# # quantize each col, not each row
# self.b_offsets = self.b_offsets.ravel()
# self.b_scales = self.b_scales.ravel()
# self.B = self.B.T
def reset_for_new_task(self):
self.A = None
self.B = None
def get_speed_metrics(self, A, B, **sink):
# neglect packing, postprocessing, etc
return {KEY_NMULTIPLIES: _nmultiplies_matmul(A, B)}
class SketchedMatmul(ApproxMatmul, abc.ABC):
__slots__ = 'd'
def __init__(self, d):
self.d = int(d)
def get_params(self):
return {'d': self.d}
def sketch(self, A, B):
pass
def call(self, A, B):
A_hat, B_hat = self.sketch(A, B)
assert A_hat.shape[0] == A.shape[0]
assert B_hat.shape[1] == B.shape[1]
assert A_hat.shape[1] <= self.d # verify sketch size not cheating
return A_hat @ B_hat
def __call__(self, A, B):
assert A.shape[1] == B.shape[0] # dims need to match
D = A.shape[1]
if D <= self.d:
raise InvalidParametersException(
'D <= d: {} < {}'.format(D, self.d))
if B.shape[1] <= self.d:
raise InvalidParametersException(
'M <= d: {} < {}'.format(B.shape[1], self.d))
return self.call(np.copy(A), np.copy(B)) # guarantee A, B unchanged
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
assert not (fixedA and fixedB) # this would be stupid, so fail fast
sketch_nmuls = self._get_nmuls(A.shape[0], A.shape[1], B.shape[1],
self.d, fixedA=fixedA, fixedB=fixedB)
N, D = A.shape
D, M = B.shape
sketched_matmul_nmuls = N * self.d * M
return {KEY_NMULTIPLIES: sketch_nmuls + sketched_matmul_nmuls}
def _get_nmuls(self, N, D, M, d, fixedA=False, fixedB=False):
# default nmuls = sketching with dense matrix
nmuls = 0
if not fixedA:
nmuls += N * D * d
if not fixedB:
nmuls += M * D * d
return nmuls
class RandGaussSketch(SketchedMatmul):
def sketch(self, A, B):
D = A.shape[1]
V = np.random.randn(D, self.d).astype(np.float32)
# dividing by expected norm is more similar to theory papers,
# but no reason this should actually be better AFAIK
# V /= np.sqrt(D)
V /= np.linalg.norm(V, axis=0)
A = A @ V
B = V.T @ B
return A, B
class RandOrthoGaussSketch(SketchedMatmul):
def sketch(self, A, B):
D = A.shape[1]
V = np.random.randn(D, self.d).astype(np.float32)
V, _ = np.linalg.qr(V)
A = A @ V
B = V.T @ B
return A, B
class RandRademacherSketch(SketchedMatmul):
def sketch(self, A, B):
D = A.shape[1]
V = np.random.randint(2, size=(D, self.d)).astype(np.float32) * 2 - 1
V /= np.sqrt(D)
A = A @ V
B = V.T @ B
return A, B
class HadamardSketch(SketchedMatmul):
def sketch(self, A, B):
D = A.shape[1]
use_D = 1 << int(np.ceil(np.log2(D)))
V = scipy.linalg.hadamard(use_D)[:D, :self.d].astype(np.float32)
V /= np.linalg.norm(V, axis=0)
# V /= np.sqrt(2)
# V *= np.sqrt(2)
# V *= np.sqrt(D / self.d)
# V *= (D / self.d) ** .25
A = A @ V
B = V.T @ B
return A, B
class SketchSqSample(SketchedMatmul):
def sketch(self, A, B):
return sketch_sq_sample(A, B, self.d)
def _get_nmuls(self, N, D, M, d, **sink):
return _nmultiplies_sketch_sq_sample(N, D, M, d)
class FdAmm(SketchedMatmul):
def sketch(self, A, B):
return fd_amm_sketches(A, B, self.d)
def _get_nmuls(self, N, D, M, d, **sink):
return _nmultiplies_fd_amm_sketches(N, D, M, d)
class CooccurSketch(SketchedMatmul):
def sketch(self, A, B):
return cooccur_sketches(A, B, self.d)
def _get_nmuls(self, N, D, M, d, **sink):
return _nmultiplies_cooccur_sketches(N, D, M, d)
class FastJlSketch(SketchedMatmul):
def sketch(self, A, B):
return fastjl_sketches(A, B, self.d)
def _get_nmuls(self, N, D, M, d, **sink):
return _nmultiplies_fastjl_sketches(N, D, M, d)
class HashJlSketch(SketchedMatmul):
def sketch(self, A, B):
return hash_sketches(A, B, self.d)
def _get_nmuls(self, N, D, M, d, **sink):
return _nmultiplies_hash_sketches(N, D, M, d)
class OsnapSketch(SketchedMatmul):
def sketch(self, A, B):
return osnap_sketches(A, B, self.d, s=OSNAP_DEFAULT_S)
# def get_params(self):
# return {'d': self.d, 's': OSNAP_DEFAULT_S}
def _get_nmuls(self, N, D, M, d, **sink):
return _nmultiplies_osnap_sketches(N, D, M, d)
class SvdSketch(SketchedMatmul):
__slots__ = 'd niters Ua SVTa Ub SVTb'.split()
def __init__(self, d, niters=5):
self.d = d
self.niters = niters
self.reset_for_new_task()
def get_params(self):
return {'d': self.d, 'niters': self.niters}
def _check_mat_shape(self, M):
if M is None:
return False
# if np.min(M.shape) < self.d:
if np.max(M.shape) < self.d:
raise InvalidParametersException(
'shape has entry < d: {} < {}'.format(M.shape, self.d))
return True
def set_A(self, A):
# if A is None:
# return
if self._check_mat_shape(A):
self.Ua, self. SVTa = svd_sketch(A, self.d)
def set_B(self, B):
if self._check_mat_shape(B):
self.Ub, self.SVTb = svd_sketch(B, self.d)
def reset_for_new_task(self):
self.Ua = None
self.SVTa = None
self.Ub = None
self.SVTb = None
# def __call__(self, A=None, B=None):
# assert A.shape[1] == B.shape[0] # dims need to match
# if A.shape[1] < self.d:
# raise InvalidParametersException('D < d')
def call(self, A=None, B=None):
if self.Ua is None:
self.set_A(A)
if self.Ub is None:
self.set_B(B)
D = self.Ua.shape[1]
if D < self.d:
raise InvalidParametersException(
'D < d: {} < {}'.format(D, self.d))
# verify sketch size isn't cheating
# print("A.shape", A.shape)
# print("B.shape", B.shape)
# print("self.Ua.shape: ", self.Ua.shape)
# print("self.SVTa.shape: ", self.SVTa.shape)
# print("self.Ub.shape: ", self.Ub.shape)
# print("self.SVTb.shape: ", self.SVTb.shape)
# print("self.d: ", self.d)
assert self.Ua.shape[1] <= self.d
assert self.SVTa.shape[0] <= self.d
assert self.SVTb.shape[0] <= self.d
assert self.Ub.shape[1] <= self.d
# innermost parens important so that matmuls actually use low rank
# outer parens help if B ncols < A nrows (which is true for us)
return self.Ua @ ((self.SVTa @ self.Ub) @ self.SVTb)
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
# XXX this will break if not called right after self.call()
total = 0
d = self.d
N, D = A.shape
_, M = B.shape
if not fixedA:
total += _nmultiplies_svd_sketch(N, D, d, niters=self.niters)
if not fixedB:
total += _nmultiplies_svd_sketch(D, M, d, niters=self.niters)
total += d * D * d # SVTa @ UB, d x D @ D x d
total += d * d * M # (above) @ SVTb, d x d @ d x M
total += N * d * M # Ua @ (above), N x d @ d x M
return {KEY_NMULTIPLIES: total}
@_memory.cache
def _fitted_pca(X, n_components):
pca = PCA(n_components=n_components)
return pca.fit(X)
class TrainedPcaSketch(ApproxMatmul):
__slots__ = 'pca d A B V'.split()
def __init__(self, d):
# self.pca = PCA(n_components=d)
self.d = d
self.reset_for_new_task()
def reset_for_new_task(self):
self.A = None
self.B = None
def fit(self, A, B, Y=None): # Y = A @ B if not specified
D, M = B.shape
print("called fit on TrainedPcaSketch!")
if D < self.d:
raise InvalidParametersException(
'D < d: {} < {}'.format(D, self.d))
if M < self.d:
raise InvalidParametersException(
'M < d: {} < {}'.format(M, self.d))
self.pca = _fitted_pca(A, n_components=self.d)
self.V = self.pca.components_.T
# print("components V.T @ V =\n", self.V.T @ self.V) # yep, orthonormal
def set_A(self, A):
self.A = A @ self.V
def set_B(self, B):
self.B = self.V.T @ B
def __call__(self, A, B):
assert A.shape[1] == B.shape[0] # dims need to match
if B.shape[1] < self.d:
raise InvalidParametersException(
'M < d: {} < {}'.format(B.shape[1], self.d))
if (self.A is None):
self.set_A(A)
if (self.B is None):
self.set_B(B)
return self.A @ self.B
def get_params(self):
return {'d': self.d}
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
N, D = A.shape
D, M = B.shape
d = self.d
nmuls = N * d * M # assuming matrices already sketched
if not fixedA:
nmuls += N * D * d
if not fixedB:
nmuls += D * M * d
return {KEY_NMULTIPLIES: nmuls}
@_memory.cache
def _fitted_sparse_pca(X, d, unscaled_alpha, **kwargs):
# this seems to work better than initializing with MiniBatchSparsePCA,
# svd of cov mat, or basically anything else I tried
U, _, Vt = randomized_svd(X, n_components=d, random_state=123)
U = U[:, :d]
V = Vt.T[:d]
# SparsePCA (and all the sklearn dictionary learning stuff)
# internally uses sum of squared errs for each sample, and L1 norm
# of parameter matrix; to make alpha meaningful across datasets,
# want to scale by number of examples (so it's effectively using MSE)
# and divide by L1 norm (which grows linearly with size of parameter
# matrix / vector); also scale by variance of data for similar reasons
N, D = X.shape
alpha = unscaled_alpha * np.var(X - X.mean(axis=0)) * N / D
verbose = 1
pca = SparsePCA(n_components=d, alpha=alpha, normalize_components=True,
method='lars', U_init=U, V_init=V, max_iter=10,
ridge_alpha=max(1, len(X) * X.std() * 10),
# ridge_alpha=1e8,
verbose=verbose, random_state=123)
if verbose > 0:
print("fitting sparse pca...")
return pca.fit(X)
class TrainedSparsePcaSketch(ApproxMatmul):
__slots__ = 'pca d alpha nnz can_optimize_transform A B'.split()
# def __init__(self, d, alpha, can_optimize_transform=True):
def __init__(self, d, alpha, can_optimize_transform=False):
self.d = d
self.alpha = alpha
self.can_optimize_transform = can_optimize_transform
self.reset_for_new_task()
def reset_for_new_task(self):
self.A = None
self.B = None
def fit(self, A, B, Y=None): # Y = A @ B if not specified
D, M = B.shape
# if M <= self.d:
# raise InvalidParametersException(
# 'M <= d: {} < {}'.format(M, self.d))
if D <= self.d:
raise InvalidParametersException(
'D < d: {} < {}'.format(D, self.d))
self.pca = _fitted_sparse_pca(A, d=self.d, unscaled_alpha=self.alpha)
self.nnz = np.sum(self.pca.components_ != 0)
sparsity = np.mean(self.pca.components_ == 0)
if self.nnz < self.d:
raise InvalidParametersException(
"ignoring SparsePCA with nnz < d: "
"{} < {}".format(self.nnz, self.d))
if sparsity == 0.:
raise InvalidParametersException(
"ignoring SparsePCA with no zeros")
def set_A(self, A):
if self.can_optimize_transform:
# uses ridge regression to get coeffs, instead of linear projection
# disabled by default because it produces garbage on caltech and
# is more expensive than just doing the matmul
self.A = self.pca.transform(A)
self.A += self.pca.mean_ @ self.pca.components_.T
else:
self.A = A @ self.pca.components_.T
def set_B(self, B):
if self.can_optimize_transform:
self.B = self.pca.transform(B.T).T
self.B += (self.pca.mean_ @ self.pca.components_.T).reshape(-1, 1)
else:
self.B = (B.T @ self.pca.components_.T).T
def __call__(self, A, B):
assert A.shape[1] == B.shape[0] # dims need to match
N, D = A.shape
D, M = B.shape
if D <= self.d:
raise InvalidParametersException(
'D < d: {} < {}'.format(D, self.d))
fixedA = self.A is not None
fixedB = self.B is not None
nmuls_naive = N * D * M
nmuls_ours = self.get_speed_metrics(
A, B, fixedA=fixedA, fixedB=fixedB)[KEY_NMULTIPLIES]
if nmuls_naive <= nmuls_ours:
raise InvalidParametersException(
"naive # of multiplies < sparse sketch # of multiplies: "
"{} < {}".format(nmuls_naive, nmuls_ours))
if not fixedA:
self.set_A(A)
if not fixedB:
self.set_B(B)
# if N == 700:
# if False:
print("got to weird dset!")
# print("pca means: ", self.pca.mean_[::20])
# print("A means:", A.mean(axis=0)[::20])
# print("B means:", B.mean(axis=1)[::20])
print("pca means sum: ", self.pca.mean_.sum())
print("A means sum: ", A.mean(axis=0).sum())
print("B means sum: ", B.mean(axis=1).sum())
offsets = (self.pca.mean_ @ self.pca.components_.T)
print("offsets: ", offsets)
print("offsets sum: ", offsets.sum())
# C = (A @ B)
# print("true mean of output: ", C.mean())
# print("true std of output: ", C.std())
return self.A @ self.B
def get_params(self):
return {'d': self.d, 'alpha': self.alpha,
'canCheat': self.can_optimize_transform}
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
N, D = A.shape
D, M = B.shape
nmuls_sketch_X = N * self.nnz
nmuls_sketch_W = M * self.nnz
nmuls_make_output = N * self.d * M
total_nmuls = nmuls_make_output
if not fixedA:
total_nmuls += nmuls_sketch_X
if not fixedB:
total_nmuls += nmuls_sketch_W
try: # compute degree of sparsity
nnz = self.nnz
sparsity = (self.pca.components_ == 0).mean()
except AttributeError: # model not fitted yet
nnz = -1
sparsity = -1
return {KEY_NMULTIPLIES: total_nmuls,
'nnz': nnz, 'sparsity': sparsity}
# ================================================================ drineas06
def _compute_dim_scores(A, B, A_col_norms=None, B_row_norms=None):
if A_col_norms is None:
A_col_norms = np.linalg.norm(A, axis=0)
if B_row_norms is None:
B_row_norms = np.linalg.norm(B, axis=1)
return A_col_norms * B_row_norms
def sketch_sq_sample(A, B, d):
scores = _compute_dim_scores(A, B)
idxs, weights = importance_sample(scores, d)
# idxs, weights = sample_varopt_1d(scores, d) # doesn't help
return A[:, idxs] / weights, B[idxs]
# weights = np.sqrt(weights)
# return A[:, idxs] / weights, B[idxs] / weights.reshape(-1, 1)
# probs = scores / np.sum(scores)
# D = A.shape[1]
# keep_idxs = np.random.choice(D, size=d, p=probs)
# # keep_idxs = np.random.choice(D, size=d, p=probs, replace=False)
# # keep_idxs = np.random.choice(D, size=d, replace=False)
# # keep_idxs = np.arange(D-1)
# # keep_idxs = np.arange(1, D)
# # keep_idxs = np.arange(D)
# weights = np.sqrt(d * probs) # what the paper says; huge errors
# # weights = np.sqrt(D * probs) # slightly less bad
# # weights = np.sqrt(np.sqrt(d * probs))
# # weights = np.ones(D)
# A = np.copy(A) / weights
# B = np.copy(B) / weights.reshape(-1, 1)
# return np.copy(A[:, keep_idxs]), np.copy(B[keep_idxs])
# return A[:, keep_idxs], B[keep_idxs]
# return A, B
def _nmultiplies_sketch_sq_sample(N, D, M, d):
scores_nmuls = N * D + M * D # sum of sizes of each mat
reweight_nmuls = N * d + M * d # sum of sizes of each sampled mat
return scores_nmuls + reweight_nmuls # neglect normalization of probs, etc
def sketch_sq_deterministic(A, B, d):
scores = _compute_dim_scores(A, B)
D = A.shape[1]
keep_idxs = np.argsort(scores)[::-d]
weights = np.sqrt(d * (1. / D)) # uniform prob
return A[:, keep_idxs] / weights, B[keep_idxs] / weights.reshape(-1, 1)
def test_sketch_sq_sample():
print("test_sketch_sq_sample")
N, M, D = 100, 50, 200
np.random.seed(1234)
# A = np.random.randint(5, size=(N, D)).astype(np.float32)
# B = np.random.randint(5, size=(D, M)).astype(np.float32)
# A -= np.mean(A)
# B -= np.mean(B)
A = np.random.randn(N, D).astype(np.float32)
B = np.random.randn(D, M).astype(np.float32)
AB = A @ B
orig_frob_sq = np.mean(AB * AB)
print("true mss: ", orig_frob_sq)
prev_normed_err = np.inf
for d in (10, 20, 30, 40, 50):
A_hat, B_hat = sketch_sq_sample(A, B, d)
# A_hat, B_hat = sketch_sq_deterministic(A, B, d)
AB_hat = A_hat @ B_hat
# print("AB_hat mss: ", (AB_hat * AB_hat).mean())
diffs = AB - AB_hat
err_frob_sq = np.mean(diffs * diffs)
normed_err_sq = err_frob_sq / orig_frob_sq
# print("orig mss: ", orig_frob_sq)
print('d = {}, err = {:.3f}'.format(d, normed_err_sq))
assert normed_err_sq < 2.
assert normed_err_sq < (prev_normed_err + .05) # should usually hold
prev_normed_err = normed_err_sq
# ================================================================ sampling
# wait, this just returns points summing to the true sample sum
# deterministically...
def importance_sample(sample_weights, m, replace=False):
probs = sample_weights / sample_weights.sum()
idxs = np.random.choice(
np.arange(len(sample_weights)), p=probs, replace=replace, size=m)
weights = 1. / (probs[idxs] * m)
return idxs, weights
def _invert_permutation(permutation):
return np.arange(len(permutation))[np.argsort(permutation)]
def _sum_for_tau(x, tau):
above_tau = x > tau
return x[above_tau].sum() + (x[~above_tau] / tau).sum()
def _compute_new_tau(x_sorted_desc, m, tau=0):
x = x_sorted_desc
current_sum = _sum_for_tau(x, tau)
assert current_sum >= m
while current_sum > m:
x = x[:-1]
current_sum = _sum_for_tau(x, tau)
def sample_varopt_1d(x, m):
# varopt sampling; original paper (see Algorithm 1 on p16):
# https://arxiv.org/pdf/0803.0473.pdf
# better intuition:
# https://datasketches.github.io/docs/Sampling/VarOptSampling.html
#
# unlike paper, we're just going to do it all at once since that will
# be simpler and vectorize way better; basically just recursively
# take largest point w_i if w_i > (m / sum_i w_i), with m decremented
# by 1 each time; if this doesn't take all the points, importance sample
# from the remaining points (with probs proportional to their weights)
#
# EDIT: this sucks unless really heavy tailed, so probably not a
# correct impl?
x = np.asarray(x, dtype=np.float32)
n = len(x)
if m >= n:
return np.arange(n)
maxval = np.max(x)
minval = np.min(x)
assert minval >= 0 # needs nonnegative entries
if minval == maxval or m == 1:
return np.random.choice(np.arange(n), size=m)
sort_idxs = np.argsort(x)[::-1] # in descending order
x_sorted = x[sort_idxs]
unsort_idxs = _invert_permutation(sort_idxs)
q = x_sorted * (m / np.sum(x_sorted)) # sums to m
# q_tailsums = np.cumsum(q[::-1])[::-1]
# next_val = x_sorted[0]
head_sz = 0
for i in range(m):
if q[0] >= 1.:
head_sz += 1
q = q[1:] * ((m - 1) / q[1:].sum())
# TODO just compute tail sums once for renormalization (below)
# q_mass_eliminated = q[i]
# next_val = q[i + 1] * (m - head_sz) / m * ()
# renormalize such that tail sums to m - 1
else:
break
tail_sz = m - head_sz
# print("m, head_sz, tail_sz:", m, head_sz, tail_sz)
# print("len(q)", len(q))
# probs = q / np.sum(q)
probs = x_sorted[head_sz:] / np.sum(x_sorted[head_sz:])
tail_idxs = np.random.choice(
np.arange(head_sz, n), p=probs, replace=False, size=tail_sz)
idxs = list(tail_idxs)
# idxs = tail_idxs
# tau = tail_sz / np.sum(x_sorted[head_sz:])
# print("tau: ", tau)
# print("x_sorted[:head_sz + 1]: ", x_sorted[:head_sz + 1])
# tau = x_sorted[head_sz]
true_probs = probs[tail_idxs - head_sz] * (tail_sz / m)
weights = list(1. / (m * true_probs)) # small err; definitely right
# weights = [tau] * tail_sz
if head_sz > 0:
head_idxs = list(np.arange(head_sz))
head_weights = list(np.ones(head_sz))
idxs = head_idxs + idxs
weights = head_weights + weights
return unsort_idxs[idxs], np.array(weights)
# ============================================================ random sketches
# sketch both A and B jointly using the same matrix to amortize overhead and
# because it seems like this should help accuracy
# @numba.jit(nopython=True)
def fastjl_sketches(A, B, d, P=None):
N, D = A.shape
M = B.shape[1]
# pad A and B for FHT
log2_D = int(np.ceil(np.log2(D)))
D_pad = 2 ** log2_D
A_pad = np.zeros((N, D_pad), dtype=np.float32)
A_pad[:, :D] = A
B_pad = np.zeros((D_pad, M), dtype=np.float32)
B_pad[:D] = B
# construct and apply random signs for each dim
randsigns = np.random.randint(0, 2, size=D_pad) * 2 - 1
# scale now instead of scaling FHT mat, so only O(D) multiplies
randsigns = randsigns.astype(np.float32) * (1. / np.sqrt(D_pad))
A_pad *= randsigns
B_pad *= randsigns.reshape(-1, 1)
# # apply fast hadamard transform
H = scipy.linalg.hadamard(D_pad, dtype=np.float32)
# H = scipy.linalg.hadamard(D_pad, dtype=np.float32) / np.sqrt(D_pad)
A_pad = A_pad @ H
B_pad = H @ B_pad
# dimensionalty reduction
if P is None:
# logd = np.log2(D_pad)
keep_prob = log2_D * log2_D / D_pad
# if (keep_prob) >= 1:
# print("WARNING: FastJL returning all zeros mat...")
P = (np.random.uniform(size=(D_pad, d)) > keep_prob).astype(np.float32)
# P *= np.random.randn(*P.shape) * (d / keep_prob)
# scaling sigma totally fails; need norm to actually be 1, not just
# have expected value of 1
P *= np.random.randn(*P.shape)
P *= (1. / np.linalg.norm(P, axis=0))
# print("P shape, Apad shape, Bpad shape: ", P.shape, A_pad.shape, B_pad.shape)
return A_pad @ P, P.T @ B_pad
def _nmultiplies_fastjl_sketches(N, D, M, d): # avg, not exact, since P sparse
# technically adds or subs, but you'd do fma ops regardless for floats
log2_D = int(np.ceil(np.log2(D)))
D_pad = 2 ** log2_D
fht_nmuls = D_pad * np.log2(D_pad)
sign_nmuls = D_pad
# trickier part; expected number of madds (or similar ops) to mul by P
construct_P_nmuls = D_pad * d # assuming only 1 mul for rng + threshold
keep_prob = log2_D * log2_D / D_pad
nnz_p = min(1, keep_prob) * D_pad # expected nnz per row of P
p_nmuls = N * nnz_p * d + d * nnz_p * M
return fht_nmuls + sign_nmuls + construct_P_nmuls + p_nmuls
@numba.jit(nopython=True)
def hash_sketches(A, B, d, scale=1., share_projections=True):
N, D = A.shape
D, M = B.shape
A_hat = np.zeros((N, d), dtype=A.dtype)
B_hat = np.zeros((d, M), dtype=B.dtype)
for j in range(D):
idx = np.random.randint(d)
sign = (np.random.randint(0, 2) * 2) - 1
# coeff = sign * scale # worse than predicting mean, esp for small d
coeff = sign * scale / np.sqrt(2) # actually pretty decent
# coeff = sign * scale * ((d / D) ** .25)
# coeff = sign * scale * np.sqrt(d / D) # best for small d / D
# coeff = sign * scale * d / D # best for larger d / D
A_hat[:, idx] += A[:, j] * coeff
if share_projections:
B_hat[idx] += B[j] * coeff
continue
# use a different projection for B
idx = np.random.randint(d)
sign = (np.random.randint(0, 2) * 2) - 1
B_hat[idx] += B[j] * sign
# using unscaled signs preserves norms really well, at least for
# random matrices
# print("A norm, A_hat norm:", np.linalg.norm(A), np.linalg.norm(A_hat))
# print("B norm, B_hat norm:", np.linalg.norm(B), np.linalg.norm(B_hat))
# A_norm = np.linalg.norm(A)
# B_norm = np.linalg.norm(B)
# A_hat *= np.linalg.norm(A) / np.linalg.norm(A_hat)
# B_hat *= np.linalg.norm(B) / np.linalg.norm(B_hat)
return A_hat, B_hat
def osnap_sketches(A, B, d, s=OSNAP_DEFAULT_S):
N, D = A.shape
D, M = B.shape
s = max(1, min(d // 2, s)) # handle s too large relative to d
A_hat = np.zeros((N, d), dtype=A.dtype)
B_hat = np.zeros((d, M), dtype=B.dtype)
scale = 1. / np.sqrt(s)
# scale = 1. / s
# scale = 1 # seems to often work better than dividing by 1/sqrt(s)?
# scale = np.sqrt(s)
# scale = s
subspace_len = (d + s - 1) // s # round up
for ss in range(s):
start_idx = ss * subspace_len
end_idx = min(D, start_idx + subspace_len)
A_hat[:, start_idx:end_idx], B_hat[start_idx:end_idx] = \
hash_sketches(A, B, subspace_len, scale=scale)
# A_hat /= np.linalg.norm(A_hat, axis=)
return A_hat, B_hat
def _nmultiplies_hash_sketches(N, D, M, d):
# technically adds or subs, but you'd do fma ops regardless for floats
return N * D + D * M
def _nmultiplies_osnap_sketches(N, D, M, d, s=4):
return 4 * _nmultiplies_hash_sketches(N, D, M, d)
def test_rand_sketches():
print("test_svd_sketches")
N, M, D = 100, 80, 50
np.random.seed(1234)
A = np.random.randint(5, size=(N, D)).astype(np.float32)
B = np.random.randint(5, size=(D, M)).astype(np.float32)
A -= np.mean(A)
B -= np.mean(B)
AB = A @ B
orig_frob_sq = np.sum(AB * AB)
prev_normed_err = np.inf
# for d in [10]:
for d in (1, 2, 4, 8, 16, 32):
# (Ua, SVTa), (Ub, SVTb) = svd_sketches(A, B, d)
# AB_hat = Ua @ (SVTa @ Ub) @ SVTb
A_hat, B_hat = fastjl_sketches(A, B, d)
# A_hat, B_hat = hash_sketches(A, B, d) # sharing projections helps
# A_hat, B_hat = hash_sketches(A, B, d, share_projections=False)
# A_hat, B_hat = osnap_sketches(A, B, d)
AB_hat = A_hat @ B_hat
# print("fused mats shapes: ")
# print(Ua.shape, SVTa.shape, Ub.shape, SVTb.shape)
diffs = AB - AB_hat
err_frob_sq = np.sum(diffs * diffs)
normed_err_sq = err_frob_sq / orig_frob_sq
print('d = {}, err = {:.5f}'.format(d, normed_err_sq))
# assert normed_err_sq < 1.
# assert normed_err_sq < prev_normed_err + .001
prev_normed_err = normed_err_sq
# ================================================================ Rand SVD
def svd_sketch(A, d, niters=5, **kwargs):
# assert A.shape[0] >= d
# assert A.shape[1] >= d
assert np.max(A.shape) >= d # can't truncate to larger size
U, S, Vt = randomized_svd(A, n_components=d, n_iter=niters, **kwargs)
# print("Vt shape: ", Vt.shape)
# print("S: ", S)
return (U, np.diag(S) @ Vt)
def _nmultiplies_svd_sketch(N, D, d, niters):
# # "In contrast, randomized schemes can produce an approximate SVD using
# # only O(mn log(k) + (m + n)k2) flops" -Halko et al. 2010
# # https://arxiv.org/pdf/0909.4061.pdf
# iter_cost = N * D * int(np.ceil(np.log2(d)))
# iter_cost += (N + D) * d * d
# return iter_cost * niters
# # assumes algorithm 4.4 in above; sklearn randomized_svd source
# # code says it implements algorithm 4.3, but paper says 4.3 should actually
# # be implemented as 4.4 in practice. Also 4x4's complexity is much easier
# # to understand and counting multiplies is at best a rough estimate
# # regardless.
# #
# # shapes:
# # A: N x D
# # A*: D x N
# # Omega: D x d
# # Y0 = A @ Omega: N x d
# # Q0: N x d
# # R0: d x d
# # Y_tilde_j:
# # gauss_mat_cost = D * d
# # Y0_cost = N * D * d
# Y0_cost = N * D * int(np.ceil(np.log2(d))) # subsampled FFT; see text
# Y0_cost += _nmultiplies_qr(N, d)
# Yj_tilde_cost = D * N * d + _nmultiplies_qr(N, d)
# Yj_cost =
# okay, sklearn says it uses algorithm 4.3 in Halko et al. 2010 [1],
# so we're going to go with that
# [1] https://arxiv.org/pdf/0909.4061.pdf
# shapes:
# A: N x D
# A.T: D x N
# G (Omega): D x d
# A @ G: N x d
# A.T @ (AG) D x d
# A @ (A.T@A@G) N x d
# Q0: N x d
# R0: d x d
Omega_cost = D * d
A_Omega_cost = N * D * d
# each iter: premul by A.T, then A; assumes no LU or QR for stability
iter_cost = D * N * d + N * D * d
return Omega_cost + A_Omega_cost + iter_cost * niters
def svd_sketches(A, B, d, **kwargs):
return svd_sketch(A, d, **kwargs), svd_sketch(B, d, **kwargs)
# Ua, Sa, VTa = randomized_svd(A, n_components=d, **kwargs)
# Ub, Sb, VTb = randomized_svd(B, n_components=d, **kwargs)
# print("truncated svd mat shapes:")
# print(Ua.shape, Sa.shape, VTa.shape)
# print(Ub.shape, Sb.shape, VTb.shape)
# return (Ua, np.diag(Sa) @ VTa), (Ub, np.diag(Sb) @ VTb)
def test_svd_sketches():
print("test_svd_sketches")
N, M, D = 100, 80, 50
np.random.seed(1234)
A = np.random.randint(5, size=(N, D)).astype(np.float32)
B = np.random.randint(5, size=(D, M)).astype(np.float32)
A -= np.mean(A)
B -= np.mean(B)
AB = A @ B
orig_frob_sq = np.sum(AB * AB)
prev_normed_err = np.inf
# for d in [10]:
for d in (1, 2, 4, 8, 16, 32):
(Ua, SVTa), (Ub, SVTb) = svd_sketches(A, B, d)
AB_hat = Ua @ (SVTa @ Ub) @ SVTb
# print("fused mats shapes: ")
# print(Ua.shape, SVTa.shape, Ub.shape, SVTb.shape)
diffs = AB - AB_hat
err_frob_sq = np.sum(diffs * diffs)
normed_err_sq = err_frob_sq / orig_frob_sq
print('d = {}, err = {:.5f}'.format(d, normed_err_sq))
assert normed_err_sq < 1.
assert normed_err_sq < prev_normed_err
prev_normed_err = normed_err_sq
# ================================================================ FD methods
# TODO impl fast-FD, which zeros out half the entries
def frequent_directions(A, d, variant=None):
N, D = A.shape
H = np.zeros((d, D))
assert N >= d
assert D >= d
# for i in range(N):
H[:d - 1] = A[:d - 1]
for i in range(d - 1, N):
H[-1] = A[i]
try:
U, S, Vt = np.linalg.svd(H, full_matrices=False) # d x d, d, d x D
except np.linalg.LinAlgError as e:
print("SVD failed at iter ", i - (d - 1))
print("H shape: ", H.shape)
print("A shape: ", A.shape)
print("d: ", d)
# print("svd mat shape: ", U.shape, S.shape, Vt.shape)
raise e
# cutoff = S[d - 1] # S is returned as a vector, not a diagonal mat
if variant == 'robust':
raise NotImplementedError()
else:
S = np.sqrt((S - S[-1]) ** 2) # note that last entry is dth entry
# print("new S shape: ", S.shape)
# H = np.diag(S) @ Vt # d x D
H = Vt * S.reshape(-1, 1) # d x D; equivalent to np.diag(S) @ Vt
return H
def fast_frequent_directions(A, d, variant=None, alpha=.5):
N, D = A.shape
# H = np.zeros((d, D))
H = np.copy(A[:d])
assert N >= d
assert D >= d
cutoff_idx = int(d * (1 - alpha))
cutoff_idx = min(d - 1, cutoff_idx) # always zero at least last element
ntrailing_zeros = d - cutoff_idx
i = d
while i < N:
try:
U, S, Vt = np.linalg.svd(H, full_matrices=False) # d x d, d, d x D
except np.linalg.LinAlgError as e:
print("SVD failed at iter ", i - (d - 1))
print("H shape: ", H.shape)
print("A shape: ", A.shape)
print("d: ", d)
# print("svd mat shape: ", U.shape, S.shape, Vt.shape)
raise e
cutoff = S[cutoff_idx]
if variant == 'parametrized':
raise NotImplementedError()
else:
S = np.sqrt(np.maximum(S - cutoff, 0) ** 2)
S = np.sqrt((S - S[-1]) ** 2) # note that last entry is dth entry
# print("new S shape: ", S.shape)
# H = np.diag(S) @ Vt # d x D
H = Vt * S.reshape(-1, 1) # d x D; equivalent to np.diag(S) @ Vt
# replace zeroed-out rows of H with next rows of A
end_dim = min(N, i + ntrailing_zeros)
nrows_to_copy = end_dim - i
end_row = cutoff_idx + nrows_to_copy
assert nrows_to_copy <= ntrailing_zeros
assert end_row <= d
H[-nrows_to_copy:] = A[i:end_dim]
i = end_dim
return H
def parametrized_fd_sketches(A, B, d):
# from "Improved Practical Matrix Sketching with Guarantees"
A_hat = fast_frequent_directions(A.T, d, variant='parametrized', alpha=.2)
B_hat = fast_frequent_directions(B.T, d, variant='parametrized', alpha=.2)
return A_hat.T, B_hat.T
def fd_amm_sketches(A, B, d):
# print("A shape: ", A.shape)
# print("B shape: ", B.shape)
G = np.hstack((A.T, B)) # D x (N + M)
H = frequent_directions(G, d)
assert H.shape == (d, A.shape[0] + B.shape[1])
C = H[:, :A.shape[0]] # d x N
D = H[:, A.shape[0]:] # d x M
return C.T, D
def fast_fd_amm_sketches(A, B, d):
# print("A shape: ", A.shape)
# print("B shape: ", B.shape)
G = np.hstack((A.T, B)) # D x (N + M)
H = fast_frequent_directions(G, d)
assert H.shape == (d, A.shape[0] + B.shape[1])
C = H[:, :A.shape[0]] # d x N
D = H[:, A.shape[0]:] # d x M
return C.T, D
def _nmultiplies_frequent_directions(N, D, d):
niters = N - d + 1
iter_svd_cost = _nmultiplies_svd(d, D)
iter_reweight_cost = d * D
iter_cost = iter_svd_cost + iter_reweight_cost
return niters * iter_cost
def _nmultiplies_fast_frequent_directions(N, D, d):
niters = int(np.ceil(N / d))
iter_svd_cost = _nmultiplies_svd(d, D)
iter_reweight_cost = d * D
iter_cost = iter_svd_cost + iter_reweight_cost
return niters * iter_cost
def _nmultiplies_fd_amm_sketches(N, D, M, d):
N, D = D, N + M # matrices get concatenated
return _nmultiplies_frequent_directions(N, D, d)
def test_fd_amm_sketches():
print("test_fd_amm_sketches")
N, M, D = 100, 80, 50
np.random.seed(1234)
A = np.random.randint(5, size=(N, D)).astype(np.float32)
B = np.random.randint(5, size=(D, M)).astype(np.float32)
# A -= np.mean(A)
# B -= np.mean(B)
AB = A @ B
orig_frob_sq = np.sum(AB * AB)
prev_normed_err = np.inf
for d in (1, 2, 4, 8, 16, 32):
A_hat, B_hat = fd_amm_sketches(A, B, d)
AB_hat = A_hat @ B_hat
diffs = AB - AB_hat
err_frob_sq = np.sum(diffs * diffs)
normed_err_sq = err_frob_sq / orig_frob_sq
print('d = {}, err = {:.5f}'.format(d, normed_err_sq))
assert normed_err_sq < 1.05
assert normed_err_sq < prev_normed_err
prev_normed_err = normed_err_sq
# ================================================================ Co-occurring
def cooccur_sketches(A, B, d):
N, D = A.shape
B = B.T
M, _ = B.shape
assert B.shape[1] == D
# assert N >= d # not enough rows in specified A matrix
# assert M >= d # not enough cols in specified B matrix
# add new rows to A or B so that R from QR factorization is at least d x d
if N < d:
A_new = np.zeros((d, D), dtype=A.dtype)
A_new[:N] = A
A = A_new
if M < d:
B_new = np.zeros((d, D), dtype=B.dtype)
B_new[:M] = B
B = B_new
X = np.copy(A[:, :d]) # N x d
Y = np.copy(B[:, :d]) # M x d
# mid_idx = d - 2 # does this make it work better for large d? EDIT: nope
mid_idx = d // 2
ntrailing_zeros = d - mid_idx
i = d
while i < D:
Qx, Rx = np.linalg.qr(X) # N x d, d x d
Qy, Ry = np.linalg.qr(Y) # M x d, d x d
prod = Rx @ Ry.T # d x d
U, S, Vt = np.linalg.svd(prod, full_matrices=False) # d x d, d, d x d
cutoff = S[mid_idx]
S = np.sqrt(np.maximum(S - cutoff, 0))
# print("prod.shape", prod.shape)
# print("orig X.shape", X.shape)
# print("orig Y.shape", Y.shape)
X = Qx @ (U * S) # equivalent to U @ np.diag(S)
Y = Qy @ (Vt.T * S) # equivalent to Vt.T @ np.diag(S)
# print("X.shape", X.shape)
# print("Qx.shape", Qx.shape)
# print("U.shape", U.shape)
# replace zeroed-out cols of X and Y with new cols of A and B
end_dim = min(D, i + ntrailing_zeros)
ncols_to_copy = end_dim - i
end_col = mid_idx + ncols_to_copy
assert ncols_to_copy <= ntrailing_zeros
assert end_col <= d
X[:, mid_idx:end_col] = A[:, i:end_dim]
Y[:, mid_idx:end_col] = B[:, i:end_dim]
i = end_dim
return X[:N], Y[:M].T # slicing is because we may have zero-padded
def _nmultiplies_cooccur_sketches(N, D, M, d):
niters = int(np.ceil(D / d))
iter_qr_cost = _nmultiplies_qr(N, d) + _nmultiplies_qr(M, d)
iter_RRt_cost = d * d * d
iter_svd_cost = _nmultiplies_svd(d, d)
iter_reweight_cost = N * d + M * d
iter_update_x_y_cost = (N * d * d) + (M * d * d)
iter_cost = (iter_qr_cost + iter_RRt_cost + iter_svd_cost +
iter_reweight_cost + iter_update_x_y_cost)
return niters * iter_cost
def test_cooccur_sketches():
print("test_cooccur_sketches")
# so this doesn't have monotonically better acc as d increases; seems to
# run into issues with d being a large fraction of D, possibly because
# then it doesn't have many iterations and it's just zeroing out a ton of
# the singular vectors
N, M, D = 100, 80, 50
# N, M, D = 100, 80, 160
np.random.seed(1234)
A = np.random.randint(5, size=(N, D)).astype(np.float32)
B = np.random.randint(5, size=(D, M)).astype(np.float32)
A -= np.mean(A)
B -= np.mean(B)
AB = A @ B
orig_frob_sq = np.sum(AB * AB)
# prev_normed_err = np.inf
# for d in [4]:
for d in (2, 4, 8, 16, 32):
# A_hat, B_hat = fd_amm_sketches(A, B, d)
A_hat, B_hat = cooccur_sketches(A, B, d)
AB_hat = A_hat @ B_hat
# print("fused mats shapes: ")
# print(Ua.shape, SVTa.shape, Ub.shape, SVTb.shape)
diffs = AB - AB_hat
err_frob_sq = np.sum(diffs * diffs)
normed_err_sq = err_frob_sq / orig_frob_sq
print('d = {}, err = {:.5f}'.format(d, normed_err_sq))
assert normed_err_sq < 1.
# assert normed_err_sq < prev_normed_err
# prev_normed_err = normed_err_sq
# ================================================================ main
# def main():
# pass
if __name__ == '__main__':
np.set_printoptions(formatter={'float': lambda f: "{:.3}".format(f)})
# test_sketch_sq_sample()
# test_svd_sketches()
# test_fd_amm_sketches()
# test_cooccur_sketches()
test_rand_sketches()
# # N = 1000
# # N = 100
# N = 20
# # N = 10
# M = 10
# # M = 5
# x = np.arange(N)
# # x *= x
# # x *= x
# # x = np.sqrt(x)
# # x = 1.1 ** x
# # x = 1.15 ** x
# x = 2 ** x
# # print("x = ", x)
# # idxs, weights = sample_varopt_1d(x, M)
# idxs, weights = importance_sample(x, M)
# y = x[idxs] * weights
# xsum, ysum = x.sum(), y.sum()
# # print("idxs = ", idxs)
# print("vals = ", x[idxs])
# print("weights = ", weights)
# print("vals * weights", y)
# # print("true sum, sample sum: ", xsum, ysum)
# print("sum rel err: ", (xsum - ysum) / xsum)
|
#!/bin/env/python
import numpy as np
def energy(A):
if A.ndim < 2 or len(A) < 2:
return 0
diffs = A - A.mean(axis=0)
return np.sum(diffs * diffs)
def run_trial(N=100, D=3, seed=None):
if seed is not None:
np.random.seed(seed)
w0, w = np.random.randn(2, D)
X = np.random.randn(N, D)
X1 = X[(X @ w) > 0]
X2 = X[(X @ w) <= 0]
U = X[(X @ w0) > 0]
V = X[(X @ w0) <= 0]
U1 = U[(U @ w) > 0]
U2 = U[(U @ w) <= 0]
V1 = V[(V @ w) > 0]
V2 = V[(V @ w) <= 0]
energy_0 = energy(X)
energy_w = energy(X1) + energy(X2)
energy_w0 = energy(U) + energy(V)
energy_w0_w = energy(U1) + energy(U2) + energy(V1) + energy(V2)
gain1 = energy_0 - energy_w
gain2 = energy_w0 - energy_w0_w
if gain1 < gain2:
print("N, D, seed = ", N, D, seed)
print("energy_0:", energy_0)
print("energy_w:", energy_w)
print("energy_w0:", energy_w0)
print("energy_w0_w:", energy_w0_w)
print("gain1:", gain1)
print("gain2:", gain2)
print("w0:\n", w0)
print("w: \n", w)
# print("X\t({:.3f}):\n{}".format(energy(X), X))
# print("X1\t({:.3f}):\n{}".format(energy(X1), X1))
# print("X2\t({:.3f}):\n{}".format(energy(X2), X2))
# print("U\t({:.3f}):\n{}".format(energy(U), U))
# print("U1\t({:.3f}):\n{}".format(energy(U1), U1))
# print("U2\t({:.3f}):\n{}".format(energy(U2), U2))
# print("V\t({:.3f}):\n{}".format(energy(V), V))
# print("V1\t({:.3f}):\n{}".format(energy(V1), V1))
# print("V2\t({:.3f}):\n{}".format(energy(V2), V2))
print("X energy: \t{:.3f}".format(energy(X)))
print("X1 energy: \t{:.3f}".format(energy(X1)))
print("X2 energy: \t{:.3f}".format(energy(X2)))
print("U energy: \t{:.3f}".format(energy(U)))
print("U1 energy: \t{:.3f}".format(energy(U1)))
print("U2 energy: \t{:.3f}".format(energy(U2)))
print("V energy: \t{:.3f}".format(energy(V)))
print("V1 energy: \t{:.3f}".format(energy(V1)))
print("V2 energy: \t{:.3f}".format(energy(V2)))
if D == 2:
import matplotlib.pyplot as plt
_, axes = plt.subplots(2, 2, figsize=(7.5, 7))
# plt.scatter(X[:, 0], X[:, 1])
for ax in axes.ravel():
ax.set_xlim([-2.5, 2.5])
ax.set_ylim([-2.5, 2.5])
# ax.plot([0, w0[0]], [0, w0[1]])
# ax.plot([0, w[0]], [0, w[1]])
axes[0, 0].set_title("X")
axes[0, 0].scatter(X[:, 0], X[:, 1])
axes[0, 1].set_title("U and V (split on w0)")
axes[0, 1].plot([0, w0[0]], [0, w0[1]])
axes[0, 1].scatter(U[:, 0], U[:, 1])
axes[0, 1].scatter(V[:, 0], V[:, 1])
axes[1, 0].set_title("X1 and X2 (split on w)")
axes[1, 0].plot([0, w[0]], [0, w[1]])
axes[1, 0].scatter(X1[:, 0], X1[:, 1])
axes[1, 0].scatter(X2[:, 0], X2[:, 1])
axes[1, 1].set_title("U1, U2, V1, V2 (split on w0 and w)")
axes[1, 1].plot([0, w0[0]], [0, w0[1]])
axes[1, 1].plot([0, w[0]], [0, w[1]])
axes[1, 1].scatter(U1[:, 0], U1[:, 1])
axes[1, 1].scatter(U2[:, 0], U2[:, 1])
axes[1, 1].scatter(V1[:, 0], V1[:, 1])
axes[1, 1].scatter(V2[:, 0], V2[:, 1])
plt.tight_layout()
plt.show()
assert gain1 >= gain2
def main():
ntrials = 100
# for N in [4, 8, 16, 32, 64, 128, 256]:
for N in [64, 128, 256]:
# for D in [1, 2, 3, 5, 10, 100]:
for D in [100, 200]:
for trial in range(ntrials):
run_trial(N=N, D=D, seed=trial)
if __name__ == '__main__':
np.set_printoptions(precision=3)
main()
|
#!/bin/env/python
import collections
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sb
import pandas as pd
import pathlib as pl
# from . import files
from . import amm_results2 as res
# from . import amm_methods as ameth
# sb.set_context('poster')
# sb.set_context('talk')
# sb.set_cmap('tab10')
FIGS_SAVE_DIR = pl.Path('../figs/amm')
USE_FONT = 'DejaVu Sans'
mpl.rcParams['font.family'] = 'sans-serif'
mpl.rcParams['font.sans-serif'] = [USE_FONT]
# to avoid type3 fonts; 42 = truetype, which is more flexible than type1
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
def fix_ticks():
# recover from seaborn white style messing this up
plt.rcParams['xtick.bottom'] = True
plt.rcParams['ytick.left'] = True
if not os.path.exists(FIGS_SAVE_DIR):
FIGS_SAVE_DIR.mkdir(parents=True)
def set_seaborn_style(stylename):
sb.set_style(stylename)
fix_ticks()
def save_fig(name):
# plt.savefig(os.path.join(FIGS_SAVE_DIR, name + '.png'),
# dpi=300, bbox_inches='tight')
plt.savefig(os.path.join(FIGS_SAVE_DIR, name + '.pdf'),
bbox_inches='tight')
def _xlabel_for_xmetric(x_metric):
return {'d': 'Sketch Size',
'secs': 'Time (s)',
'muls': 'Number of Multiplies',
'nlookups': 'Number of Lookups',
'ops': 'Number of Operations',
'Latency': 'Latency (ms)',
'Speedup': 'Speedup Over Exact Matrix Multiply',
'NormalizedTime': 'Normalized Latency',
'Throughput': 'Throughput (elements/s)'}[x_metric]
def _ylabel_for_xmetric(y_metric):
if y_metric == 'Relative Accuracy':
return 'Normalized\nAccuracy'
if y_metric == 'Accuracy':
return 'Classification\nAccuracy'
return y_metric
def add_ylabels_on_right(axes, fmt, vals):
for i, ax in enumerate(axes):
lbl = fmt.format(vals[i])
ax2 = ax.twinx()
ax2.get_xaxis().set_visible(False)
ax2.yaxis.set_label_position('right')
ax2.set_ylabel(lbl, fontsize=14, family=USE_FONT, labelpad=5)
sb.despine(ax=ax2, top=True, left=True, bottom=True, right=True)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.setp(ax2.get_yticklabels(), visible=False)
ax2.set_yticks([])
ax2.tick_params(axis='y', which='y', length=0)
def scan_speed_fig(save=True):
# ================================ data cleaning
df = res.scan_timings()
name_map = collections.OrderedDict()
# name_map['mithral scan'] = 'Mithral'
name_map['mithral scan'] = 'MADDNESS'
# name_map['mithral scan'] = 'Maddness'
# name_map['bolt scan uint8'] = 'Bolt\nCheating'
name_map['bolt scan safe uint16'] = 'Bolt'
name_map['popcount scan'] = 'Popcount'
name_map['pq scan'] = 'PQ / OPQ'
df = res.rename_values_in_col(df, 'algo', name_map)
df = res.melt_times(df)
# alright, can't get stds to show without really screwing with stuff
# times = np.array(df['time'])
# times += np.random.randn(len(df['time'])) * .1 # get 1px for stds
# # mask = df['algo'] == 'PQ / OPQ'
# mask = df['B'] == 64
# df['time'].loc[mask] = times[mask]
df['thruput'] = df['N'] * df['M'] / df['time']
df['thruput'] /= 1e6 # just use units of billions; times are in ms
# df['thruput'] *= 1e3 # times are in ms
# ================================ fig creation
sb.set_context("talk")
# fig, ax = plt.subplots(1, 1, figsize=(8, 5))
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
axes = [ax]
sb.barplot(data=df, x='algo', y='thruput', units='timing_trial',
hue='B', hue_order=[8, 16, 32, 64], order=name_map.values(),
ax=ax, ci='sd')
# ------------------------ clean up / format axes
for ax in axes[:-1]:
# remove x labels except for bottom axis
plt.setp(ax.get_xticklabels(), visible=False)
ax.get_xaxis().set_visible(False)
handles, labels = axes[0].get_legend_handles_labels()
labels = ['8B Codes', '16B Codes', '32B Codes', '64B Codes']
# labels = ['8 Bytes', '16 Bytes', '32 Bytes', '64 Bytes']
# labels = ['8B', '16B', '32B', '64B']
plt.figlegend(handles, labels, loc='lower center', ncol=4, fontsize=14)
for ax in axes:
ax.set_ylabel('Billion Dot Products/s', family=USE_FONT)
ax.get_legend().remove()
# ------------------------ have bottom / top axes print title, x info
axes[0].set_title('Speed of f() Functions for Different Encoding Sizes',
y=1.04, family=USE_FONT, fontsize=20)
# # get and set them again so we can make the first one bold; can't make
# # it bold beforehand because need a tick lbl object, not a string
# xlabels = list(axes[-1].get_xticklabels())
# xlabels[0].set_weight('bold')
# # axes[-1].set_xticklabels(xlabels, rotation=60, ha='right')
# axes[-1].set_xticklabels(xlabels)
axes[-1].tick_params(axis='x', which='major', pad=4)
axes[-1].set_xlabel("", labelpad=-30)
ax.xaxis.set_ticks_position('none')
# ------------------------ save / show plot
plt.tight_layout()
# plt.subplots_adjust(bottom=.21)
plt.subplots_adjust(bottom=.23)
if save:
save_fig('scan_speed')
else:
plt.show()
def encode_speed_fig(save=True):
# ================================ data cleaning
df = res.encode_timings()
df = df.loc[df['algo'] != 'mithral encode i16']
# print("df ours f32: ", df.loc[df['algo'].str.lower().str.strip() == 'mithral encode f32'])
# print("df ours f32: ", df.loc[df['algo'].str.lower().str.strip() == 'mithral encode i8'])
# print(df)
# # # print(df['B'])
# # # print(df['C'])
# import sys; sys.exit()
name_map = collections.OrderedDict()
# name_map['mithral encode i8'] = r'$\bf{Mithral}$ $\bf{i8}$')
# name_map['mithral encode i8'] = r'$\bf{Mithral}$ $\bf{i8}$')
# name_map['mithral encode i8'] = 'Mithral i8'
# name_map['mithral encode i16'] = 'Mithral i16' # no i16 in plot
# name_map['mithral encode f32'] = 'Mithral f32'
# name_map['mithral encode i8'] = 'MADDNESS i8'
# name_map['mithral encode f32'] = 'MADDNESS f32'
name_map['mithral encode f32'] = 'MADDNESS'
name_map['bolt encode'] = 'Bolt'
name_map['pq encode'] = 'PQ'
name_map['opq encode'] = 'OPQ'
df = res.rename_values_in_col(df, 'algo', name_map)
df = res.melt_times(df, ntimes=5)
order = 'MADDNESS Bolt PQ OPQ'.split()
# df['thruput'] = df['N'] * df['D'] / df['time']
# df['thruput'] = df['N'] / (df['time'] * .001) # rows/sec
time_secs = (df['time'] * .001)
df['elemsz'] = 4
df['elemsz'].loc[df['algo'].str.endswith('i8')] = 1
df['elemsz'].loc[df['algo'].str.endswith('i16')] = 2
df['thruput'] = df['N'] * df['D'] * df['elemsz'] / time_secs # GB/sec
df['thruput'] /= 1e9 # convert to GB
# df['thruput'] /= 1e6 # just use units of billions; times are in ms
# full_byte_per_codebook = df['algo'].isin(['PQ', 'OPQ'])
# df['B'] = df['C'].values / 2
# # cvals = df['C'].loc[full_byte_per_codebook]
# df['B'].loc[full_byte_per_codebook] = df['C'].loc[full_byte_per_codebook]
# df['B'] = df['B'].astype(np.int)
# # print("df.cols: ", df.columns)
# print(df)
# # # print(df['B'])
# # # print(df['C'])
# import sys; sys.exit()
# ================================ fig creation
sb.set_context('talk')
# sb.set_style('darkgrid')
# sb.set_style('white')
set_seaborn_style('white')
# use_nbytes = [8, 16, 32, 64]
use_nbytes = [8, 16, 32]
# fig, axes = plt.subplots(len(use_nbytes), 1, figsize=(6, 8), sharey=True)
# fig, axes = plt.subplots(len(use_nbytes), 1, figsize=(6, 6.5), sharey=True)
# fig, axes = plt.subplots(len(use_nbytes), 1, figsize=(6, 7), sharey=True)
fig, axes = plt.subplots(len(use_nbytes), 1, figsize=(6, 6.5), sharey=True)
for i, nbytes in enumerate(use_nbytes):
data = df.loc[df['B'] == nbytes]
# print("df.cols: ", df.columns)
# print(data)
# # # print(df['B'])
# # # print(df['C'])
# import sys; sys.exit()
order = name_map.values()
dashes = {name: ([] if name.lower().startswith('maddness') else
mpl.rcParams['lines.dashed_pattern'])
for name in order}
# dashes = None
# sb.lineplot(data=data, x='D', y='thruput', hue='algo',
# sb.lineplot(data=data, x='D', y='thruput', hue='algo', units='timing_trial',
sb.lineplot(data=data, x='D', y='thruput', hue='algo',
# ax=axes[i], ci='sd', estimator=None, hue_order=order,
ax=axes[i], ci='sd', estimator='mean', hue_order=order,
# ax=axes[i], ci=None, estimator='mean', hue_order=order,
style='algo', style_order=order, dashes=dashes,
palette=my_colors_list)
# import sys; sys.exit()
# ------------------------ axis cleanup
axes[0].set_title('Speed of g() Functions\nfor Different Encoding Sizes',
y=1.04, family=USE_FONT, fontsize=16)
handles, labels = axes[0].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm df column name
# plt.figlegend(handles, labels, loc='lower center', ncol=3, fontsize=13)
plt.figlegend(handles, labels, loc='lower center', ncol=4, fontsize=13)
for ax in axes:
# ax.semilogx()
ax.semilogy()
ax.set_ylim([.02, 1000])
# ax.set_yticks([.1, 1, 10, 100, 1000])
ax.set_yticks([.1, 10, 1000])
ax.get_legend().remove()
# ax.set_ylabel('Billions of\nScalars Encoded/s',
# ax.set_ylabel('Scalars Encoded/s\n(Billions)',
# ax.set_ylabel('Scalars Encoded\nper Second (Billions)',
# ax.set_ylabel('Scalars Encoded\nper Second',
# ax.set_ylabel('Scalars Encoded/s',
# ax.set_ylabel('Rows Encoded/s',
ax.set_ylabel('Encoding\nSpeed (GB/s)',
family=USE_FONT, fontsize=14)
for ax in axes[:-1]:
# remove x labels except for bottom axis
ax.tick_params(axis='x', which='x', length=0)
plt.setp(ax.get_xticklabels(), visible=False)
ax.set_xlabel("", visible=False)
# ax.get_xaxis().set_visible(False)
# ax.get_xticklabels().set_visible(False)
axes[-1].set_xlabel('Number of Columns in Matrix A',
family=USE_FONT, fontsize=14)
# add byte counts on the right
add_ylabels_on_right(axes, "{}B Encodings", use_nbytes)
plt.tight_layout()
# plt.subplots_adjust(bottom=.18, hspace=.15)
# plt.subplots_adjust(bottom=.19, hspace=.15)
plt.subplots_adjust(bottom=.17, hspace=.15)
# plt.subplots_adjust(bottom=.21, hspace=.15)
if save:
save_fig('encode_speed')
else:
plt.show()
def lut_speed_fig(save=True):
# ================================ data cleaning
df = res.lut_timings()
name_map = collections.OrderedDict()
# name_map['mithral lut dense'] = '$\bf{Mithral}$'
# name_map['mithral lut sparse'] = '$\bf{Mithral}$'
name_map['mithral lut dense'] = 'MADDNESS'
name_map['mithral lut sparse'] = 'MADDNESS'
name_map['bolt lut'] = 'Bolt'
name_map['pq lut'] = 'PQ'
name_map['opq lut'] = 'OPQ'
df = res.rename_values_in_col(df, 'algo', name_map)
# print(df[:20])
# df['lutconst'] = df['lutconst'].str.strip().astype(np.float).astype(np.int)
# print("df.dtypes", df.dtypes)
# import sys; sys.exit()
names = list(df['algo'])
consts = np.array(df['lutconst'])
# print("len(names)", len(names))
# print("len(consts)", len(consts))
mithral_const_to_name = collections.OrderedDict()
mithral_const_to_name[-1] = 'MADDNESS, L = ∞'
mithral_const_to_name[4] = 'MADDNESS, L = 4'
mithral_const_to_name[2] = 'MADDNESS, L = 2'
mithral_names = list(mithral_const_to_name.values())
# add lut constant into the name for mithral variations
new_names = []
ismithral = []
for i, name in enumerate(names):
if not name.startswith('Mithral'):
new_names.append(name)
ismithral.append(False)
continue
# const = consts[i]
# const = "{:d}".format(int(const)) if const > 0 else "∞"
# new_names.append(f"{name}, L = {const}")
new_names.append(mithral_const_to_name[int(consts[i])])
ismithral.append(True)
# print("len(new_names)", len(new_names))
df['algo'] = new_names
df['ismithral'] = ismithral
df = res.melt_times(df, ntimes=5)
# df = res.melt_times(df, ntimes=3) # TODO rerun with ntrials=5
# print(df)
df['thruput'] = df['N'] * df['D'] / df['time']
# df['thruput'] /= 1e6 # just use units of billions; times are in ms
# # TODO rm once we have updated results
# mask = df['algo'].isin(('PQ', 'OPQ'))
# df['B'] = -1 # create placeholder col
# df['B'].loc[mask] = df['C'].loc[mask]
# df['B'].loc[~mask] = df['C'].loc[~mask] / 2
# ================================ fig creation
sb.set_context('talk')
# sb.set_style('darkgrid')
# sb.set_style('white')
set_seaborn_style('white')
# use_nbytes = [8, 16, 32, 64]
use_nbytes = [8, 16, 32]
fig, axes = plt.subplots(len(use_nbytes), 1, figsize=(6, 8), sharey=True)
order = [mithral_names[2], 'Bolt',
mithral_names[1], 'PQ',
mithral_names[0], 'OPQ']
dashes = {k: ('-' if k in mithral_names else '--') for k in order}
# dashes = {k: ('solid' if k in mithral_names else 'dashed') for k in order}
# dashes = {k: (None if k in mithral_names else [3, 3]) for k in order}
# dashes = True
# print(dashes)
# import sys; sys.exit()
for i, nbytes in enumerate(use_nbytes):
data = df.loc[df['B'] == nbytes]
ax = axes[i]
# print(f"------------------------ {nbytes}B")
# manual version
# for algo in order:
# subdf = data.loc[df['algo'] == algo]
# print("plotting algo: ", algo)
# x = subdf['D'].as_matrix()
# y = subdf['thruput'].as_matrix()
# sort_idxs = np.argsort(x)
# x, y = x[sort_idxs], y[sort_idxs]
# ax.plot(x, y, dashes[algo], label=algo)
dashes = {name: ([] if name.lower().startswith('mithral') else
mpl.rcParams['lines.dashed_pattern'])
for name in order}
sb.lineplot(data=data, x='D', y='thruput', hue='algo',
units='timing_trial', ax=axes[i], ci='sd',
estimator=None, hue_order=order,
style='algo', style_order=order, dashes=dashes)
# sb.lineplot(data=data, x='D', y='thruput', hue='algo', units='timing_trial',
# hue_order=order,
# # hue_order=order, style='algo', style_order=order,
# # dashes=True,
# style='ismithral', style_order=[True, False], dashes=True,
# ax=axes[i], ci='sd', estimator=None)
# ------------------------ axis cleanup
axes[0].set_title('Speed of h() Functions\nfor Different Encoding Sizes',
y=1.04, family=USE_FONT, fontsize=18)
# for ax in axes:
# print("ax handles, labels: ")
# print(ax.get_legend_handles_labels())
handles, labels = axes[-1].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm df column name
# handles, labels = handles[:-3], labels[:-3] # rm ismithral
plt.figlegend(handles, labels, loc='lower center', ncol=3, fontsize=13)
for ax in axes:
# ax.semilogx()
ax.semilogy()
ax.get_legend().remove()
ax.set_ylabel('Scalars Encoded/s',
family=USE_FONT, fontsize=14)
for ax in axes[:-1]:
# remove x labels except for bottom axis
ax.tick_params(axis='x', which='x', length=0)
plt.setp(ax.get_xticklabels(), visible=False)
ax.set_xlabel("", visible=False)
axes[-1].set_xlabel('Number of Rows in Matrix B',
family=USE_FONT, fontsize=14)
# add byte counts on the right
add_ylabels_on_right(axes, "{}B Encodings", use_nbytes)
plt.tight_layout()
plt.subplots_adjust(bottom=.18, hspace=.15)
if save:
save_fig('lut_speed')
else:
plt.show()
def lotsa_colors_cmap(value):
assert 0 <= value <= 1 # if this throws, I don't understand cmaps
if value < .3333:
return plt.get_cmap('tab20')(3 * value)
elif value < .6666:
return plt.get_cmap('tab20b')((3 * value) - 1)
else:
return plt.get_cmap('tab20c')((3 * value) - 2)
# def my_tab10(value):
# assert 0 <= value <= 1
# value = int(value * 10)
# perm = [3, 1, 2, 4, 5, 6, 7, 8, 9] # make red first, then orange
# value = perm[value]
# return plt.get_cmap('tab10')((value / 10.) + .01)
# def my_cmap(value):
my_colors_list = (plt.get_cmap('Set1').colors
+ plt.get_cmap('Set3').colors[:1] # skip light yellow
+ plt.get_cmap('Set3').colors[2:]
+ plt.get_cmap('Dark2').colors[:6])
# my_colors_list = my_colors_list[:5] + () my_colors_list[6:] # rm bright yellow
# new_yellow = (240./255, 230./255, 140./255)
new_yellow = (204. / 255, 204. / 255, 0. / 255)
# print(type(my_colors_list))
# print(my_colors_list)
my_colors_list = my_colors_list[:5] + (new_yellow,) + my_colors_list[6:]
# print(type(my_colors_list))
# print(my_colors_list)
# import sys; sys.exit()
# DEFAULT_PLOT_METHODS = ('Mithral', 'MithralPQ', 'Brute Force', 'Bolt',
# DEFAULT_PLOT_METHODS = ('MADDNESS', 'MADDNESS-PQ', 'Exact', 'Bolt',
# 'FastJL', 'HashJL', 'OSNAP', 'PCA', 'SparsePCA',
# 'Rademacher', 'RandGauss', 'OrthoGauss')
DEFAULT_PLOT_METHODS = (
'MADDNESS', 'MADDNESS-PQ', 'Exact', 'ScalarQuantize', 'Bolt',
# 'MADDNESS', 'Exact', 'ScalarQuantize', 'Bolt',
# 'FastJL', 'HashJL', 'PCA', 'RandGauss', 'SparsePCA')
'FastJL', 'HashJL', 'PCA', 'SparsePCA')
# 'FastJL', 'HashJL', 'PCA', 'SparsePCA')
# 'MADDNESS', 'MADDNESS-PQ', 'Exact', 'Bolt',
# 'FastJL', 'HashJL', 'PCA', 'RandGauss', 'SparsePCA')
def lineplot(data, ax, x_metric, y_metric, units=None, scatter=False,
# plot_methods=None):
plot_methods=DEFAULT_PLOT_METHODS, first_two_same_marker=True,
**kwargs):
estimator = 'mean' if units is None else None
if plot_methods is not None:
data = data.loc[data['method'].isin(set(plot_methods))]
order = plot_methods
else:
# order = 'Ours Bolt Exact PQ SVD FD-AMM CD'.split()
# order = [m for m in order if m in data['Method'].unique()]
order = list(data['method'].unique())
# move_methods_to_front = ['Ours', 'OursPQ', 'Brute Force']
# move_methods_to_front = ['Mithral', 'MithralPQ', 'Brute Force']
mithral_methods = [method for method in order
# if method.lower().startswith('mithral')][::-1]
if method.lower().startswith('maddness')][::-1]
move_methods_to_front = mithral_methods[:]
# move_methods_to_front.append('Brute Force')
move_methods_to_front.append('Exact')
for elem in move_methods_to_front[:]:
if elem in order:
order.remove(elem)
else:
move_methods_to_front.remove(elem)
order = move_methods_to_front + sorted(order)
order = [method for method in order if method in data['method'].unique()]
# order = plot_methods
# order = list(data['method'].unique())
# have to specify markers or seaborn freaks out because it doesn't
# have enough of them
# filled_markers = ('o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h',
# 'H', 'D', 'd', 'P', 'X')
# use_markers = ('*', '*', 's') + (
initial_markers = ('D', 'D', 's') if first_two_same_marker else ('D', 's')
use_markers = initial_markers + (
'o', 'v', '^', '<', '>', '8', 'p', 'h', 'd', 'P', 'X', '*', 'D')
if scatter:
# sb.violinplot(cut=0, saturation=1, linewidth=.001, scale='width', inner='box',
# data['Speedup'] *= 1 + (np.random.randn(len(data['Speedup'])) / 100)
sb.scatterplot(alpha=.25, # seems to suck the least
data=data, x=x_metric, y=y_metric, hue='method',
style='method', style_order=order, hue_order=order,
markers=use_markers, estimator=estimator,
# units=units, estimator=estimator, markers=use_markers,
palette=my_colors_list, ax=ax)
# sb.boxplot(linewidth=.1, width=2, whis=999,
# sb.stripplot(alpha=.25, orient='v', jitter=False,
# data=data, x=x_metric, y=y_metric, hue='method', hue_order=order,
# palette=my_colors_list, ax=ax)
return
kwargs.setdefault('ci', 'sd')
sb.lineplot(data=data, x=x_metric, y=y_metric, hue='method',
# style='method', style_order=order[::-1], hue_order=order[::-1],
style='method', style_order=order, hue_order=order,
markers=use_markers, estimator=estimator,
# units=units, estimator=estimator, markers=use_markers,
dashes=False, palette=my_colors_list, ax=ax, **kwargs)
lines = ax.get_lines()
for i, line in enumerate(lines):
line.set_zorder(10 - i)
# def cifar_fig(save=False, x_metric='Throughput', y_metric='Accuracy'):
def cifar_fig(save=False, x_metric='Speedup', y_metric='Accuracy'):
df10 = res.cifar10_amm()
df100 = res.cifar100_amm()
sb.set_context('poster')
# fig, axes = plt.subplots(2, 1, figsize=(11, 13.5), sharex=True)
# fig, axes = plt.subplots(2, 1, figsize=(11, 10), sharex=True)
fig, axes = plt.subplots(2, 1, figsize=(11, 8.5), sharex=True)
# plot_methods = ['Mithral', 'MithralPQ', 'Brute Force', 'Bolt',
# plot_methods = ['MADDNESS', 'MADDNESS-PQ', 'Exact', 'Bolt',
# 'FastJL', 'HashJL', 'OSNAP', 'PCA', 'SparsePCA',
# 'Rademacher', 'RandGauss', 'OrthoGauss']
# # df10 = df10.loc[df10['method'].isin(set(plot_methods))]
# df100 = df100.loc[df100['method'].isin(set(plot_methods))]
# df10 = df10.loc[df10['method'] != 'OrthoGauss']
# df100 = df100.loc[df100['method'] != 'OrthoGauss']
lineplot(df10, axes[0], x_metric=x_metric, y_metric=y_metric)
lineplot(df100, axes[1], x_metric=x_metric, y_metric=y_metric)
# plt.suptitle('Sketch size vs Classification Accuracy')
xlbl = _xlabel_for_xmetric(x_metric)
# plt.suptitle('{} vs {}'.format(xlbl, y_metric))
plt.suptitle('Approximating Softmax Classifiers', family=USE_FONT)
axes[0].set_title('CIFAR-10', family=USE_FONT)
for ax in axes:
ax.set_ylabel(_ylabel_for_xmetric(y_metric), family=USE_FONT)
axes[0].set_xlabel(None)
axes[1].set_xlabel(xlbl, family=USE_FONT)
axes[1].set_title('CIFAR-100', family=USE_FONT)
handles, labels = axes[0].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm 'Method' title
# axes[0].legend(handles, labels, fontsize='small')
# axes[1].legend(handles, labels, fontsize='small')
# plt.figlegend(handles, labels, loc='lower center', ncol=1)
# plt.figlegend(handles, labels, loc='center right', ncol=1)
for ax in axes.ravel():
ax.get_legend().remove()
if y_metric == 'Accuracy':
axes[0].set_ylim([.09, .96])
axes[1].set_ylim([.009, .73])
elif y_metric == '1 - NMSE':
axes[0].set_ylim([0, 1.02])
axes[1].set_ylim([0, 1.02])
# axes[1].get_legend().remove()
# axes[1].get_legend().remove()
plt.figlegend(handles, labels, loc='lower center', ncol=3)
# if x_metric in ('muls', 'ops', 'nlookups', 'Latency', 'Throughput'):
axes[0].semilogx()
for ax in axes:
if x_metric == 'Speedup':
ax.set_xlim([.94, ax.get_xlim()[1]])
elif x_metric == 'NormalizedTime':
ax.set_xlim([ax.get_xlim()[0], 1.06])
plt.tight_layout()
# plt.subplots_adjust(top=.91, bottom=.24)
plt.subplots_adjust(top=.89, bottom=.32)
# plt.subplots_adjust(top=.95, bottom=.1)
save_fig('cifar_{}_{}'.format(x_metric, y_metric))
# save_fig('cifar_{}_{}_no_maddnesspq'.format(x_metric, y_metric))
def fig1(save=False, x_metric='Speedup', y_metric='Accuracy'):
df10 = res.cifar10_amm()
df100 = res.cifar100_amm()
sb.set_context('poster')
fig, axes = plt.subplots(2, 1, figsize=(11, 10), sharex=True)
# df10['method'] = df10['method'].str.replace('Mithral', 'HashMul')
# replace_names_dict = {'Mithral': 'Ours',
replace_names_dict = {'MADDNESS': 'Ours',
# 'SparsePCA': '2nd best (Mairal et al.)',
# 'HashJL': '3rd best (Dasgupta et al.)',
'SparsePCA': 'Mairal et al.',
'HashJL': 'Dasgupta et al.',
'Exact': 'Exact Matrix Multiply'
}
# print("--- about to run the rename we care about")
df10 = res.rename_values_in_col(df10, 'method', replace_names_dict)
df100 = res.rename_values_in_col(df100, 'method', replace_names_dict)
# df10['method'] = df10['method'].str.replace(replace_names_dict)
# df100['method'] = df100['method'].str.replace(replace_names_dict)
# print('df10 methods: ', df10['method'].unique())
# import sys; sys.exit()
# plot_methods = ['Ours', '2nd best', '3rd best', 'Exact Matrix Multiply']
# plot_methods = ['Ours', 'Mairal et al.', 'Dasgupta et al.', 'Exact Matrix Multiply']
plot_methods = ['Ours', 'Exact Matrix Multiply', 'Mairal et al.', 'Dasgupta et al.']
# plot_methods = ['Ours', '3rd best', '2nd best', 'Exact Matrix Multiply']
# plot_methods = ['Mithral', 'SparsePCA', 'HashJL', 'Brute Force']
# df10 = df10.loc[df10['method'].isin(set(plot_methods))]
# df100 = df100.loc[df100['method'].isin(set(plot_methods))]
# df10 = df10.loc[df10['method'] != 'OrthoGauss']
# df100 = df100.loc[df100['method'] != 'OrthoGauss']
lineplot(df10, axes[0], x_metric=x_metric, y_metric=y_metric,
plot_methods=plot_methods, ci=None, first_two_same_marker=False)
lineplot(df100, axes[1], x_metric=x_metric, y_metric=y_metric,
plot_methods=plot_methods, ci=None, first_two_same_marker=False)
# plt.suptitle('Sketch size vs Classification Accuracy')
xlbl = _xlabel_for_xmetric(x_metric)
# plt.suptitle('{} vs {}'.format(xlbl, y_metric))
plt.suptitle('Approximating Softmax Classifiers', family=USE_FONT)
axes[0].set_title('CIFAR-10', family=USE_FONT)
for ax in axes:
ax.set_ylabel(_ylabel_for_xmetric(y_metric), family=USE_FONT)
axes[0].set_xlabel(None)
axes[1].set_xlabel(xlbl, family=USE_FONT)
axes[1].set_title('CIFAR-100', family=USE_FONT)
handles, labels = axes[0].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm 'Method' title
# axes[0].legend(handles, labels, fontsize='small')
# axes[1].legend(handles, labels, fontsize='small')
# plt.figlegend(handles, labels, loc='lower center', ncol=1)
# plt.figlegend(handles, labels, loc='center right', ncol=1)
for ax in axes.ravel():
ax.get_legend().remove()
if y_metric == 'Accuracy':
axes[0].set_ylim([.09, .96])
axes[1].set_ylim([.009, .73])
elif y_metric == '1 - NMSE':
axes[0].set_ylim([0, 1.02])
axes[1].set_ylim([0, 1.02])
# axes[1].get_legend().remove()
# axes[1].get_legend().remove()
plt.figlegend(handles, labels, loc='lower center', ncol=2)
# if x_metric in ('muls', 'ops', 'nlookups', 'Latency', 'Throughput'):
axes[0].semilogx()
for ax in axes:
if x_metric == 'Speedup':
ax.set_xlim([.94, ax.get_xlim()[1]])
elif x_metric == 'NormalizedTime':
ax.set_xlim([ax.get_xlim()[0], 1.06])
plt.tight_layout()
plt.subplots_adjust(top=.89, bottom=.23)
save_fig('fig1')
def caltech_fig(x_metric='Speedup', y_metric='1 - NMSE'):
# df = res.caltech_amm()
# df = res.caltech_amm()
df0 = res.caltech_amm(filt='sobel')
df1 = res.caltech_amm(filt='dog5x5')
# print("df cols: ", df.columns)
sb.set_context('poster')
# fig, ax = plt.subplots(1, 1, figsize=(11, 6))
fig, axes = plt.subplots(2, 1, figsize=(12, 8))
# axes = [ax]
# is_mithral = df['method'].str.startswith('Mithral')
# is_exact = df['method'] == 'Brute Force'
# others_to_keep = df['method'].isin(['Brute Force', 'PCA', 'SparsePCA'])
# others_to_keep = df['method'].isin(['PCA', 'SparsePCA'])
# df = df.loc[is_mithral | others_to_keep] # others suck too hard
# df = df.loc[~(df['method'].isin(['Mithral, L = 2', 'Mithral, L = 4']))]
# df['method'].loc[df['method'] == 'Mithral, L = ∞'] = 'Mithral'
# print("df0 uniq methods: ", df0['method'].unique())
# print("df1 uniq methods: ", df1['method'].unique())
# import sys; sys.exit()
# keep_methods = ['Mithral', 'MithralPQ', 'SparsePCA', 'PCA', 'OSNAP']
# keep_methods = ['Mithral', 'MithralPQ', 'SparsePCA', 'PCA', 'HashJL', 'OSNAP', 'FastJL']
# keep_methods = ['Mithral', 'MithralPQ', 'SparsePCA', 'PCA']
# keep_methods = ['MADDNESS', 'MADDNESS-PQ', 'SparsePCA', 'PCA']
# even scalar quantize is slower than custom exact matmul; note that
# in the 5x5 plot, it's occluded by maddness (near perfect mse, but
# slightly to the left of 1x speedup)
# keep_methods = ['MADDNESS', 'MADDNESS-PQ', 'ScalarQuantize', 'SparsePCA']
keep_methods = ['MADDNESS', 'MADDNESS-PQ', 'SparsePCA']
df0 = df0.loc[df0['method'].isin(keep_methods)]
df1 = df1.loc[df1['method'].isin(keep_methods)]
# print("df0 kept methods: ", df0['method'].unique())
# print("df1 kept methods: ", df1['method'].unique())
# print("df1 scalar quantize numbers: ", df1.loc[df1['method'] == 'ScalarQuantize'])
# import sys; sys.exit()
# print("df1:\n", df1.loc[(df1['method'] == 'MithralPQ') & df1['task_id'].str.contains('509')])
# import sys; sys.exit()
# lineplot(df, ax, x_metric=x_metric, y_metric=y_metric, units=None)
lineplot(df0, axes[0], x_metric=x_metric, y_metric=y_metric,
plot_methods=keep_methods)
lineplot(df1, axes[1], x_metric=x_metric, y_metric=y_metric,
plot_methods=keep_methods)
handles, labels = axes[-1].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm 'Method' title
# plt.figlegend(handles, labels, loc='lower center', ncol=2)
# plt.figlegend(handles, labels, loc='lower center', ncol=4)
plt.figlegend(handles, labels, loc='lower center', ncol=len(keep_methods))
# plt.suptitle('Approximating an Image Filter')
for ax in axes:
ax.set_xlabel(_xlabel_for_xmetric(x_metric), fontsize=20)
ax.set_ylabel(y_metric)
ax.get_legend().remove()
ax.set_ylim([-.01, 1.01])
ax.plot([1, 1], ax.get_ylim(), 'k--')
# for ax in axes[:-1]:
# # remove x labels except for bottom axis
# plt.setp(ax.get_xticklabels(), visible=False)
# ax.get_xaxis().set_visible(False)
axes[0].set_title('Approximating a Sobel Filter', y=1.02, fontsize=28)
axes[1].set_title('Approximating a Gaussian Filter', y=1.02, fontsize=28)
# plt.subplots_adjust(top=.91, bottom=.37)
plt.tight_layout()
# plt.subplots_adjust(bottom=.26, hspace=.72) # with ncol=2
plt.subplots_adjust(bottom=.22, hspace=.7) # with ncol=2
# plt.subplots_adjust(top=.95, bottom=.1)
save_fig('caltech_{}_{}'.format(x_metric, '1 - NMSE'))
# save_fig('caltech_sobel_{}_{}'.format(x_metric, '1 - NMSE'))
# save_fig('caltech_dog_{}_{}'.format(x_metric, '1 - NMSE'))
# def ucr_fig(x_metric='Speedup', y_metric='Accuracy'):
# def ucr_fig(x_metric='Speedup', y_metric='Change in Accuracy'):
def ucr_fig(x_metric='Speedup', y_metric='Relative Accuracy'):
# df = res.ucr_amm()
# df = res.ucr_amm(k=64)
# df = res.ucr_amm(k=128)
# df = res.ucr_amm(k=256)
df0 = res.ucr_amm(k=64)
df1 = res.ucr_amm(k=128)
df2 = res.ucr_amm(k=256)
sb.set_context('poster')
# fig, ax = plt.subplots(1, 1, figsize=(11, 8))
fig, axes = plt.subplots(3, 1, figsize=(12, 13), sharex=True)
# axes = [ax]
# df = df.loc[df['task_id'].str.lower().str.contains('starlight')]
# df = df.loc[df['method'] == 'Mithral']
# # df = df.loc[df['method'] == 'MithralPQ']
# # df = df.loc[df['ncodebooks'] == 4]
# df = df['Accuracy acc_orig acc_orig_1nn ncodebooks method task_id'.split() + ['Relative Accuracy']]
# df.reset_index(inplace=True, drop=True)
# print(df)
# import sys; sys.exit()
# df['Change in Accuracy'] = df['Accuracy'] - df['acc-1nn-raw']
# print("uniq N, D, M: ")
# print(df['N'].unique())
# print(df['D'].unique())
# print(df['M'].unique())
# df_brute = df.loc[df['method'] == 'Brute Force']
# print("uniq times from brute force: ", df_brute['time'].unique())
# print("df Brute:\n", df_brute['N D M method normalized_mse Accuracy time'.split()])
# import sys; sys.exit()
# df['acc']
# # TODO put in results cleaning after debug
# if 'Accuracy' in df.columns:
# # df['Relative Accuracy'] = df['Accuracy'] / (df['acc_orig'] + 1e-20)
# # # note that relative accuracy can actually be higher if errors
# # # happen to compensate for incorrect classification sometimes
# # print("max relative acc: ", df['Relative Accuracy'].values.max())
# # # assert df['Relative Accuracy'].values.max() <= 1.000001
# # acc_orig field is supposed to capture this, but I messed it up for
# # 1nn so this will also work
# tid2acc = {}
# exactdf = df.loc[df['method'] == 'Brute Force']
# for tid in df['task_id'].unique():
# subdf = exactdf.loc[exactdf['task_id'] == tid]
# if subdf.shape[0] != 1:
# print(f"tid = {tid} gives bad subdf:\n", subdf)
# tid2acc[tid] = subdf['Accuracy'].values[0]
# df['BaseAccuracy'] = [tid2acc[tid] for tid in df['task_id']]
# df['Relative Accuracy'] = df['Accuracy'] / df['BaseAccuracy']
# df = df.loc[~(df['method'].isin(['Mithral, L = 2', 'Mithral, L = 4']))]
# # df['method'].loc[df['method'] == 'Mithral, L = ∞'] = 'Mithral'
# df0 = df0.loc[df0['method'] != 'Brute Force']
# df1 = df1.loc[df1['method'] != 'Brute Force']
# df2 = df2.loc[df2['method'] != 'Brute Force']
# print(df.columns)
# import sys; sys.exit()
def clean_df(df):
df['Change in Accuracy'] = df['Accuracy'] - df['acc-1nn-raw']
# is_mithral = df['method'].str.startswith('Mithral')
# is_mithral = df['method'] == 'Mithral'
is_mithral = df['method'] == 'MADDNESS'
# # is_exact = df['method'] == 'Brute Force'
others_to_keep = df['method'].isin([
'PCA', 'SparsePCA', 'Bolt', 'HashJL', 'OSNAP'])
# others_to_keep = df['method'].isin(['PCA', 'SparsePCA'])
return df.loc[is_mithral | others_to_keep]
df0 = clean_df(df0)
df1 = clean_df(df1)
df2 = clean_df(df2)
# df = df.loc[df['method'] == 'Brute Force']
# df['not_mse'] = 1. - df['normalized_mse']
# df = df.loc[df['not_mse'] < 2]
lineplot(df0, axes[0], x_metric=x_metric, y_metric=y_metric, scatter=True)
lineplot(df1, axes[1], x_metric=x_metric, y_metric=y_metric, scatter=True)
lineplot(df2, axes[2], x_metric=x_metric, y_metric=y_metric, scatter=True)
plt.suptitle('Approximating an RBF Kernel Classifier')
axes[-1].set_xlabel(_xlabel_for_xmetric(x_metric))
# ax.set_ylabel('1. - NMSE')
handles, labels = axes[-1].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm 'Method' title
plt.figlegend(handles, labels, loc='lower center', ncol=3)
for ax in axes:
ax.set_ylabel(_ylabel_for_xmetric(y_metric))
ax.get_legend().remove()
ax.semilogx()
ax.set_xlim([.9, ax.get_xlim()[1]])
# ax.set_ylim([.2, 1.1])
# plt.plot([1, 1], ax.get_ylim(), 'k--')
plt.tight_layout()
plt.subplots_adjust(top=.94, bottom=.25)
# plt.subplots_adjust(top=.95, bottom=.1)
save_fig('ucr_{}_{}'.format(x_metric, y_metric))
def ucr_fig2(x_metric='Speedup', y_metric='Relative Accuracy',
# problem='softmax'):
problem='rbf'):
# df0 = res.ucr_amm(k=64)
# df1 = res.ucr_amm(k=128)
# df2 = res.ucr_amm(k=256)
df = res.ucr_amm(k=128, problem=problem)
sb.set_context('poster')
# fig, axes = plt.subplots(3, 1, figsize=(12, 13), sharex=True)
fig, axes = plt.subplots(3, 1, figsize=(12, 12), sharex=True)
# df = res.ucr_amm(k=128, problem='rbf')
# df_bolt = df.loc[df['method'] == 'Bolt']
# print("number of uniq bolt speedups:")
# print(df_bolt['Speedup'].unique().size)
# import sys; sys.exit()
def clean_df(df):
df['Change in Accuracy'] = df['Accuracy'] - df['acc-1nn-raw']
return df
# # is_mithral = df['method'].str.startswith('Mithral')
# is_mithral = df['method'] == 'Mithral'
# # # is_exact = df['method'] == 'Brute Force'
# others_to_keep = df['method'].isin([
# 'PCA', 'SparsePCA', 'Bolt', 'HashJL', 'OSNAP'])
# # others_to_keep = df['method'].isin(['PCA', 'SparsePCA'])
# return df.loc[is_mithral | others_to_keep]
def frac_above_thresh(df, thresh):
return res.frac_above_thresh(
df, x_metric, y_metric, 'method', 'task_id', thresh)
df = clean_df(df)
# df0['frac_above_thresh'] = frac_above_thresh(df, .5)
# df_bolt = df.loc[df['method'] == 'Bolt']
# print("number of uniq bolt speedups:")
# print(df_bolt['Speedup'].unique().size)
# import sys; sys.exit()
# df = df.loc[df['method'] == 'SparsePCA']
# print(df.groupby('task_id')['Speedup'].count())
# import sys; sys.exit()
y_frac_thresholds = [.5, .75, .95]
df0 = frac_above_thresh(df, y_frac_thresholds[0])
df1 = frac_above_thresh(df, y_frac_thresholds[1])
df2 = frac_above_thresh(df, y_frac_thresholds[2])
# # print(df0['frac_above_thresh'])
# print(df0)
# # for row in df0.iterrows():
# # print(row)
# # print(df0.unstack(0))
# print("df cols: ", df.columns)
# print("df0 cols: ", df0.columns)
# print("uniq methods: ", df['method'].unique())
# df = df.loc[df['method'] == 'Brute Force']
# df['not_mse'] = 1. - df['normalized_mse']
# df = df.loc[df['not_mse'] < 2]
ycol = 'frac_above_thresh'
lineplot(df0, axes[0], x_metric=x_metric, y_metric=ycol, scatter=False)
lineplot(df1, axes[1], x_metric=x_metric, y_metric=ycol, scatter=False)
lineplot(df2, axes[2], x_metric=x_metric, y_metric=ycol, scatter=False)
kind = 'a Softmax' if problem == 'softmax' else 'an RBF Kernel'
plt.suptitle(f'Approximating {kind} Classifier')
axes[-1].set_xlabel(_xlabel_for_xmetric(x_metric))
# ax.set_ylabel('1. - NMSE')
handles, labels = axes[-1].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm 'Method' title
plt.figlegend(handles, labels, loc='lower center', ncol=3)
for i, ax in enumerate(axes):
# ax.set_ylabel(_ylabel_for_xmetric(y_metric))
# ax.set_ylabel("Fraction of Datasets\nWith Relative Acc > "
# f"{y_frac_thresholds[i]}")
# ax.set_ylabel(f"Fraction with Relative\nAccuracy> {y_frac_thresholds[i]}")
ax.set_ylabel(f"Fraction > {y_frac_thresholds[i]}")
ax.get_legend().remove()
ax.semilogx()
ax.set_xlim([.9, ax.get_xlim()[1]])
ax.set_ylim([0, 1.03])
# ax.set_ylim([.2, 1.1])
# plt.plot([1, 1], ax.get_ylim(), 'k--')
plt.tight_layout()
# plt.subplots_adjust(top=.94, bottom=.25)
plt.subplots_adjust(top=.94, bottom=.22)
# plt.subplots_adjust(top=.95, bottom=.1)
save_fig('ucr2_{}_{}_{}'.format(x_metric, y_metric, problem))
def main():
scan_speed_fig()
encode_speed_fig()
lut_speed_fig()
fig1()
ucr_fig2()
caltech_fig()
# caltech_fig(y_metric='1 - NMSE')
# caltech_fig(x_metric='ops', y_metric='1 - NMSE')
cifar_fig()
# cifar_fig(y_metric='1 - NMSE')
# cifar_fig(x_metric='ops')
# cifar_fig(x_metric='ops', y_metric='1 - NMSE')
# ucr_fig2(x_metric='ops', y_metric='1 - NMSE')
# ucr_fig2(x_metric='ops')
# cifar_fig(y_metric='1 - NMSE')
# ucr_fig2()
# ucr_fig2(y_metric='1 - NMSE')
if __name__ == '__main__':
main()
|
#!/bin/env/python
from . import amm, vq_amm
METHOD_EXACT = 'Exact'
METHOD_SCALAR_QUANTIZE = 'ScalarQuantize'
METHOD_SKETCH_SQ_SAMPLE = 'SketchSqSample'
METHOD_SVD = 'SVD' # truncated SVD run on the matrix at test time
METHOD_FD_AMM = 'FD-AMM'
METHOD_COOCCUR = 'CooccurSketch'
METHOD_PCA = 'PCA' # PCA projection, with PCA basis learned at train time
METHOD_SPARSE_PCA = 'SparsePCA' # like above, using sklearn SparsePCA
METHOD_RANDGAUSS = 'RandGauss'
METHOD_ORTHOGAUSS = 'OrthoGauss'
METHOD_HADAMARD = 'Hadamard'
METHOD_RADEMACHER = 'Rademacher'
METHOD_FASTJL = 'FastJL'
METHOD_HASHJL = 'HashJL'
METHOD_OSNAP = 'OSNAP'
METHOD_OPQ = 'OPQ'
METHOD_BOLT = 'Bolt'
METHOD_BOLT_PERM = 'Bolt+Perm'
METHOD_BOLT_CORRPERM = 'Bolt+CorrPerm'
METHOD_BOLT_SPLITS = 'BoltSplits'
METHOD_BOLT_MULTISPLITS = 'Bolt+MultiSplits'
METHOD_BOLT_PERM_MULTISPLITS = 'Bolt+Perm+MultiSplits'
METHOD_PQ = 'PQ'
METHOD_PQ_PERM = 'PQ+Perm'
METHOD_PQ_MULTISPLITS = 'PQ+MultiSplits'
METHOD_PQ_PERM_MULTISPLITS = 'PQ+Perm+MultiSplits'
METHOD_MITHRALPQ = 'MithralPQ'
METHOD_OLD_MITHRALPQ = 'OldMithralPQ'
METHOD_MITHRAL = 'Mithral'
# these are for trying out different perm options
METHOD_BOLT_GEHT_COV_TOPK = 'Bolt_CovTopk'
METHOD_BOLT_GEHT_COV_SAMP = 'Bolt_CovSamp'
METHOD_BOLT_GEHT_COR_TOPK = 'Bolt_CorTopk'
METHOD_BOLT_GEHT_COR_SAMP = 'Bolt_CorSamp'
# DEFAULT_METHODS = (METHOD_EXACT, METHOD_SVD, METHOD_FD_AMM,
# METHOD_COOCCUR, METHOD_PCA, METHOD_PQ,
# METHOD_BOLT, METHOD_MITHRALPQ)
METHOD_TO_ESTIMATOR = {
METHOD_EXACT: amm.ExactMatMul,
METHOD_SCALAR_QUANTIZE: amm.QuantizedMatmul,
METHOD_SKETCH_SQ_SAMPLE: amm.SketchSqSample,
METHOD_SVD: amm.SvdSketch,
METHOD_FD_AMM: amm.FdAmm,
METHOD_COOCCUR: amm.CooccurSketch,
METHOD_PCA: amm.TrainedPcaSketch,
METHOD_SPARSE_PCA: amm.TrainedSparsePcaSketch,
METHOD_RANDGAUSS: amm.RandGaussSketch,
METHOD_ORTHOGAUSS: amm.RandOrthoGaussSketch,
METHOD_HADAMARD: amm.HadamardSketch,
METHOD_RADEMACHER: amm.RandRademacherSketch,
METHOD_FASTJL: amm.FastJlSketch,
METHOD_HASHJL: amm.HashJlSketch,
METHOD_OSNAP: amm.OsnapSketch,
METHOD_PQ: vq_amm.PQMatmul,
METHOD_BOLT: vq_amm.BoltMatmul,
METHOD_OPQ: vq_amm.OPQMatmul,
METHOD_BOLT_CORRPERM: vq_amm.GEHTBoltMatmul_CorrTopk,
METHOD_BOLT_GEHT_COV_TOPK: vq_amm.GEHTBoltMatmul_CovTopk,
METHOD_BOLT_GEHT_COV_SAMP: vq_amm.GEHTBoltMatmul_CovSamp,
METHOD_BOLT_GEHT_COR_TOPK: vq_amm.GEHTBoltMatmul_CorrTopk,
METHOD_BOLT_GEHT_COR_SAMP: vq_amm.GEHTBoltMatmul_CorrSamp,
METHOD_BOLT_PERM: vq_amm.GEHTBoltMatmul_CovTopk,
METHOD_BOLT_SPLITS: vq_amm.BoltSplits,
METHOD_BOLT_MULTISPLITS: vq_amm.BoltMultiSplits,
METHOD_BOLT_PERM_MULTISPLITS: vq_amm.BoltPermMultiSplits,
METHOD_PQ_PERM: vq_amm.PQPerm,
METHOD_PQ_MULTISPLITS: vq_amm.PQMultiSplits,
METHOD_PQ_PERM_MULTISPLITS: vq_amm.PQPermMultiSplits,
METHOD_OLD_MITHRALPQ: vq_amm.OldMithralPQ,
METHOD_MITHRALPQ: vq_amm.MithralPQ,
METHOD_MITHRAL: vq_amm.MithralMatmul
}
ALL_METHODS = sorted(list(METHOD_TO_ESTIMATOR.keys()))
ALL_METHODS.remove(METHOD_SKETCH_SQ_SAMPLE), # always terrible results
ALL_METHODS.remove(METHOD_OPQ) # takes forever to train, more muls than exact
# these were just for playing with different permuation options
ALL_METHODS.remove(METHOD_BOLT_GEHT_COV_TOPK)
ALL_METHODS.remove(METHOD_BOLT_GEHT_COV_SAMP)
ALL_METHODS.remove(METHOD_BOLT_GEHT_COR_TOPK)
ALL_METHODS.remove(METHOD_BOLT_GEHT_COR_SAMP)
RANDOM_SKETCHING_METHODS = (
METHOD_FASTJL, METHOD_HASHJL, METHOD_OSNAP, METHOD_RANDGAUSS,
METHOD_ORTHOGAUSS, METHOD_RADEMACHER)
DENSE_SKETCH_METHODS = (METHOD_PCA, METHOD_FASTJL, METHOD_RANDGAUSS,
METHOD_HADAMARD, METHOD_ORTHOGAUSS, METHOD_RADEMACHER)
FAST_SKETCH_METHODS = RANDOM_SKETCHING_METHODS + (
METHOD_HADAMARD, METHOD_PCA, METHOD_SPARSE_PCA)
# SLOW_SKETCH_METHODS = (METHOD_SVD, METHOD_FD_AMM, METHOD_COOCCUR)
SLOW_SKETCH_METHODS = (METHOD_FD_AMM, METHOD_COOCCUR, METHOD_SVD)
SKETCH_METHODS = FAST_SKETCH_METHODS + SLOW_SKETCH_METHODS
# VQ_METHODS = (METHOD_PQ, METHOD_BOLT, METHOD_OPQ)
# VQ_METHODS = (METHOD_PQ, METHOD_BOLT)
BOLT_METHODS = (METHOD_BOLT, METHOD_BOLT_PERM,
METHOD_BOLT_CORRPERM, METHOD_BOLT_SPLITS,
METHOD_BOLT_MULTISPLITS, METHOD_BOLT_PERM_MULTISPLITS)
PQ_METHODS = (METHOD_PQ, METHOD_PQ_PERM, METHOD_PQ_MULTISPLITS,
METHOD_PQ_PERM_MULTISPLITS)
MITHRAL_METHODS = (METHOD_MITHRALPQ, METHOD_MITHRAL, METHOD_OLD_MITHRALPQ)
VQ_METHODS = PQ_METHODS + BOLT_METHODS + MITHRAL_METHODS
NONDETERMINISTIC_METHODS = (METHOD_SKETCH_SQ_SAMPLE, METHOD_SVD) + VQ_METHODS
# USE_METHODS = (FAST_SKETCH_METHODS +
# (METHOD_EXACT, METHOD_BOLT, METHOD_MITHRALPQ, METHOD_MITHRAL))
USE_METHODS = ((METHOD_EXACT, METHOD_BOLT, METHOD_MITHRALPQ, METHOD_MITHRAL)
+ FAST_SKETCH_METHODS)
USE_CALTECH_METHODS = list(USE_METHODS)
USE_CALTECH_METHODS.remove(METHOD_BOLT) # Bolt *can't* be faster
|
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import pprint
microbench_output = \
"""
ncodebooks = 4
amm bolt N, D, M, ncodebooks: 10000, 512, 10, 4 (5x20): 7.574 (4.225e+07/s), 7.582 (4.221e+07/s), 7.584 (4.219e+07/s), 7.587 (4.218e+07/s), 7.579 (4.222e+07/s),
amm bolt N, D, M, ncodebooks: 10000, 512, 100, 4 (5x20): 7.747 (1.652e+08/s), 7.743 (1.653e+08/s), 7.757 (1.650e+08/s), 7.758 (1.650e+08/s), 7.743 (1.653e+08/s),
amm bolt N, D, M, ncodebooks: 223590, 96, 12, 4 (5x20): 26.029 (2.749e+08/s), 26.028 (2.749e+08/s), 26.013 (2.751e+08/s), 26.010 (2.751e+08/s), 26.063 (2.745e+08/s),
amm bolt N, D, M, ncodebooks: 49284, 27, 2, 4 (5x20): 1.931 (8.167e+08/s), 1.924 (8.197e+08/s), 1.925 (8.193e+08/s), 1.925 (8.193e+08/s), 1.929 (8.176e+08/s),
ncodebooks = 8
amm bolt N, D, M, ncodebooks: 10000, 512, 10, 8 (5x20): 6.912 (4.630e+07/s), 6.919 (4.625e+07/s), 6.912 (4.630e+07/s), 6.909 (4.632e+07/s), 6.911 (4.630e+07/s),
amm bolt N, D, M, ncodebooks: 10000, 512, 100, 8 (5x20): 7.169 (1.785e+08/s), 7.207 (1.776e+08/s), 7.200 (1.778e+08/s), 7.205 (1.777e+08/s), 7.205 (1.777e+08/s),
amm bolt N, D, M, ncodebooks: 223590, 96, 12, 8 (5x20): 24.550 (2.914e+08/s), 24.514 (2.919e+08/s), 24.485 (2.922e+08/s), 24.470 (2.924e+08/s), 24.474 (2.923e+08/s),
amm bolt N, D, M, ncodebooks: 49284, 27, 2, 8 (5x20): 2.445 (6.450e+08/s), 2.454 (6.427e+08/s), 2.436 (6.474e+08/s), 2.448 (6.442e+08/s), 2.446 (6.448e+08/s),
ncodebooks = 16
amm bolt N, D, M, ncodebooks: 10000, 512, 10, 16 (5x20): 6.350 (5.039e+07/s), 6.350 (5.039e+07/s), 6.347 (5.042e+07/s), 6.356 (5.035e+07/s), 6.438 (4.970e+07/s),
amm bolt N, D, M, ncodebooks: 10000, 512, 100, 16 (5x20): 7.340 (1.744e+08/s), 7.270 (1.761e+08/s), 7.302 (1.753e+08/s), 7.277 (1.759e+08/s), 7.299 (1.754e+08/s),
amm bolt N, D, M, ncodebooks: 223590, 96, 12, 16 (5x20): 28.217 (2.536e+08/s), 28.063 (2.550e+08/s), 28.082 (2.548e+08/s), 28.086 (2.547e+08/s), 28.070 (2.549e+08/s),
amm bolt N, D, M, ncodebooks: 49284, 27, 2, 16 (5x20): 3.525 (4.474e+08/s), 3.529 (4.469e+08/s), 3.525 (4.474e+08/s), 3.530 (4.468e+08/s), 3.527 (4.471e+08/s),
ncodebooks = 32
amm bolt N, D, M, ncodebooks: 10000, 512, 10, 32 (5x20): 6.036 (5.302e+07/s), 6.070 (5.272e+07/s), 6.085 (5.259e+07/s), 6.158 (5.196e+07/s), 6.176 (5.181e+07/s),
amm bolt N, D, M, ncodebooks: 10000, 512, 100, 32 (5x20): 7.473 (1.713e+08/s), 7.478 (1.712e+08/s), 7.571 (1.691e+08/s), 7.567 (1.692e+08/s), 7.571 (1.691e+08/s),
amm bolt N, D, M, ncodebooks: 223590, 96, 12, 32 (5x20): 36.693 (1.950e+08/s), 36.721 (1.948e+08/s), 36.753 (1.947e+08/s), 36.805 (1.944e+08/s), 37.216 (1.923e+08/s),
ncodebooks = 64
amm bolt N, D, M, ncodebooks: 10000, 512, 10, 64 (5x20): 6.962 (4.596e+07/s), 6.959 (4.598e+07/s), 6.954 (4.602e+07/s), 6.959 (4.598e+07/s), 6.964 (4.595e+07/s),
amm bolt N, D, M, ncodebooks: 10000, 512, 100, 64 (5x20): 8.539 (1.499e+08/s), 8.598 (1.489e+08/s), 8.484 (1.509e+08/s), 8.572 (1.493e+08/s), 8.527 (1.501e+08/s),
amm bolt N, D, M, ncodebooks: 223590, 96, 12, 64 (5x20): 64.087 (1.116e+08/s), 64.096 (1.116e+08/s), 64.638 (1.107e+08/s), 64.099 (1.116e+08/s), 64.079 (1.117e+08/s),
ncodebooks = 4
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 10, 4 (5x20): 0.021 (4.770e+09/s), 0.021 (4.770e+09/s), 0.021 (4.770e+09/s), 0.021 (4.770e+09/s), 0.021 (4.770e+09/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 10, 4 (5x20): 0.016 (1.252e+09/s), 0.016 (1.252e+09/s), 0.016 (1.252e+09/s), 0.016 (1.252e+09/s), 0.016 (1.252e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 10, 4 (5x20): 0.000 (inf/s), 0.000 (inf/s), 0.000 (inf/s), 0.000 (inf/s), 0.000 (inf/s),
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 100, 4 (5x20): 0.077 (1.301e+10/s), 0.077 (1.301e+10/s), 0.076 (1.318e+10/s), 0.080 (1.252e+10/s), 0.077 (1.301e+10/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 100, 4 (5x20): 0.016 (1.252e+09/s), 0.016 (1.252e+09/s), 0.016 (1.252e+09/s), 0.016 (1.252e+09/s), 0.017 (1.178e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 100, 4 (5x20): 0.000 (inf/s), 0.000 (inf/s), 0.000 (inf/s), 0.000 (inf/s), 0.000 (inf/s),
----
f32 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 4 (5x20): 0.999 (2.686e+09/s), 0.974 (2.755e+09/s), 1.001 (2.681e+09/s), 1.000 (2.683e+09/s), 0.999 (2.686e+09/s),
f32 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 4 (5x20): 0.614 (7.284e+08/s), 0.611 (7.320e+08/s), 0.598 (7.479e+08/s), 0.613 (7.296e+08/s), 0.601 (7.441e+08/s),
f32 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 4 (5x20): 0.024 (1.863e+10/s), 0.024 (1.863e+10/s), 0.024 (1.863e+10/s), 0.024 (1.863e+10/s), 0.024 (1.863e+10/s),
----
i16 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 4 (5x20): 0.604 (4.443e+09/s), 0.603 (4.450e+09/s), 0.579 (4.635e+09/s), 0.604 (4.443e+09/s), 0.605 (4.435e+09/s),
i16 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 4 (5x20): 0.257 (1.740e+09/s), 0.280 (1.597e+09/s), 0.265 (1.688e+09/s), 0.254 (1.761e+09/s), 0.254 (1.761e+09/s),
i16 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 4 (5x20): 0.024 (1.863e+10/s), 0.024 (1.863e+10/s), 0.024 (1.863e+10/s), 0.024 (1.863e+10/s), 0.024 (1.863e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 4 (5x20): 0.083 (1.188e+09/s), 0.083 (1.188e+09/s), 0.085 (1.160e+09/s), 0.084 (1.174e+09/s), 0.084 (1.174e+09/s),
f32 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 4 (5x20): 0.077 (1.281e+09/s), 0.077 (1.281e+09/s), 0.076 (1.298e+09/s), 0.076 (1.298e+09/s), 0.076 (1.298e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 4 (5x20): 0.004 (2.466e+10/s), 0.004 (2.466e+10/s), 0.004 (2.466e+10/s), 0.004 (2.466e+10/s), 0.004 (2.466e+10/s),
----
i8 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 4 (5x20): 0.034 (2.901e+09/s), 0.029 (3.401e+09/s), 0.029 (3.401e+09/s), 0.030 (3.287e+09/s), 0.030 (3.287e+09/s),
i8 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 4 (5x20): 0.023 (4.288e+09/s), 0.023 (4.288e+09/s), 0.023 (4.288e+09/s), 0.023 (4.288e+09/s), 0.023 (4.288e+09/s),
i8 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 4 (5x20): 0.004 (2.466e+10/s), 0.004 (2.466e+10/s), 0.004 (2.466e+10/s), 0.004 (2.466e+10/s), 0.004 (2.466e+10/s),
ncodebooks = 8
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 10, 8 (5x20): 0.043 (2.329e+09/s), 0.043 (2.329e+09/s), 0.043 (2.329e+09/s), 0.043 (2.329e+09/s), 0.043 (2.329e+09/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 10, 8 (5x20): 0.031 (1.292e+09/s), 0.032 (1.252e+09/s), 0.033 (1.214e+09/s), 0.034 (1.178e+09/s), 0.034 (1.178e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 10, 8 (5x20): 0.001 (4.006e+10/s), 0.001 (4.006e+10/s), 0.001 (4.006e+10/s), 0.001 (4.006e+10/s), 0.001 (4.006e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 100, 8 (5x20): 0.154 (6.504e+09/s), 0.162 (6.183e+09/s), 0.155 (6.462e+09/s), 0.155 (6.462e+09/s), 0.162 (6.183e+09/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 100, 8 (5x20): 0.035 (1.145e+09/s), 0.033 (1.214e+09/s), 0.032 (1.252e+09/s), 0.034 (1.178e+09/s), 0.034 (1.178e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 100, 8 (5x20): 0.001 (4.006e+10/s), 0.001 (4.006e+10/s), 0.001 (4.006e+10/s), 0.001 (4.006e+10/s), 0.001 (4.006e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 8 (5x20): 1.810 (1.483e+09/s), 1.790 (1.499e+09/s), 1.797 (1.493e+09/s), 1.809 (1.483e+09/s), 1.810 (1.483e+09/s),
f32 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 8 (5x20): 1.395 (6.412e+08/s), 1.371 (6.524e+08/s), 1.394 (6.417e+08/s), 1.394 (6.417e+08/s), 1.393 (6.421e+08/s),
f32 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 8 (5x20): 0.041 (2.182e+10/s), 0.041 (2.182e+10/s), 0.041 (2.182e+10/s), 0.041 (2.182e+10/s), 0.041 (2.182e+10/s),
----
i16 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 8 (5x20): 1.102 (2.435e+09/s), 1.106 (2.426e+09/s), 1.091 (2.460e+09/s), 1.101 (2.437e+09/s), 1.129 (2.377e+09/s),
i16 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 8 (5x20): 0.681 (1.313e+09/s), 0.653 (1.370e+09/s), 0.654 (1.368e+09/s), 0.653 (1.370e+09/s), 0.653 (1.370e+09/s),
i16 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 8 (5x20): 0.041 (2.182e+10/s), 0.041 (2.182e+10/s), 0.041 (2.182e+10/s), 0.043 (2.080e+10/s), 0.043 (2.080e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 8 (5x20): 0.173 (5.701e+08/s), 0.172 (5.734e+08/s), 0.173 (5.701e+08/s), 0.185 (5.331e+08/s), 0.173 (5.701e+08/s),
f32 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 8 (5x20): 0.160 (1.233e+09/s), 0.176 (1.121e+09/s), 0.185 (1.066e+09/s), 0.165 (1.195e+09/s), 0.161 (1.225e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 8 (5x20): 0.008 (2.466e+10/s), 0.008 (2.466e+10/s), 0.008 (2.466e+10/s), 0.008 (2.466e+10/s), 0.008 (2.466e+10/s),
----
i8 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 8 (5x20): 0.059 (1.672e+09/s), 0.059 (1.672e+09/s), 0.059 (1.672e+09/s), 0.059 (1.672e+09/s), 0.059 (1.672e+09/s),
i8 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 8 (5x20): 0.049 (4.025e+09/s), 0.050 (3.945e+09/s), 0.049 (4.025e+09/s), 0.048 (4.109e+09/s), 0.048 (4.109e+09/s),
i8 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 8 (5x20): 0.008 (2.466e+10/s), 0.008 (2.466e+10/s), 0.008 (2.466e+10/s), 0.008 (2.466e+10/s), 0.008 (2.466e+10/s),
ncodebooks = 16
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 10, 16 (5x20): 0.094 (1.066e+09/s), 0.093 (1.077e+09/s), 0.100 (1.002e+09/s), 0.100 (1.002e+09/s), 0.097 (1.033e+09/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 10, 16 (5x20): 0.065 (1.233e+09/s), 0.066 (1.214e+09/s), 0.066 (1.214e+09/s), 0.065 (1.233e+09/s), 0.066 (1.214e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 10, 16 (5x20): 0.003 (2.671e+10/s), 0.003 (2.671e+10/s), 0.003 (2.671e+10/s), 0.003 (2.671e+10/s), 0.003 (2.671e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 100, 16 (5x20): 0.367 (2.729e+09/s), 0.372 (2.692e+09/s), 0.374 (2.678e+09/s), 0.377 (2.657e+09/s), 0.374 (2.678e+09/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 100, 16 (5x20): 0.067 (1.196e+09/s), 0.064 (1.252e+09/s), 0.064 (1.252e+09/s), 0.064 (1.252e+09/s), 0.064 (1.252e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 100, 16 (5x20): 0.003 (2.671e+10/s), 0.003 (2.671e+10/s), 0.003 (2.671e+10/s), 0.003 (2.671e+10/s), 0.003 (2.671e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 16 (5x20): 3.597 (7.460e+08/s), 3.607 (7.439e+08/s), 3.599 (7.456e+08/s), 3.610 (7.433e+08/s), 3.614 (7.425e+08/s),
f32 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 16 (5x20): 2.761 (6.479e+08/s), 2.761 (6.479e+08/s), 2.760 (6.482e+08/s), 2.751 (6.503e+08/s), 2.763 (6.475e+08/s),
f32 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 16 (5x20): 0.103 (1.737e+10/s), 0.105 (1.704e+10/s), 0.123 (1.454e+10/s), 0.128 (1.398e+10/s), 0.123 (1.454e+10/s),
----
i16 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 16 (5x20): 2.233 (1.202e+09/s), 2.261 (1.187e+09/s), 2.207 (1.216e+09/s), 2.207 (1.216e+09/s), 2.261 (1.187e+09/s),
i16 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 16 (5x20): 1.417 (1.262e+09/s), 1.563 (1.145e+09/s), 1.514 (1.182e+09/s), 1.464 (1.222e+09/s), 1.483 (1.206e+09/s),
i16 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 16 (5x20): 0.136 (1.315e+10/s), 0.130 (1.376e+10/s), 0.147 (1.217e+10/s), 0.133 (1.345e+10/s), 0.134 (1.335e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 16 (5x20): 0.397 (2.484e+08/s), 0.407 (2.423e+08/s), 0.395 (2.497e+08/s), 0.388 (2.542e+08/s), 0.385 (2.562e+08/s),
f32 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 16 (5x20): 0.369 (1.069e+09/s), 0.368 (1.072e+09/s), 0.377 (1.046e+09/s), 0.375 (1.052e+09/s), 0.408 (9.669e+08/s),
f32 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 16 (5x20): 0.019 (2.076e+10/s), 0.019 (2.076e+10/s), 0.019 (2.076e+10/s), 0.019 (2.076e+10/s), 0.019 (2.076e+10/s),
----
i8 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 16 (5x20): 0.131 (7.529e+08/s), 0.131 (7.529e+08/s), 0.131 (7.529e+08/s), 0.131 (7.529e+08/s), 0.131 (7.529e+08/s),
i8 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 16 (5x20): 0.103 (3.830e+09/s), 0.103 (3.830e+09/s), 0.103 (3.830e+09/s), 0.103 (3.830e+09/s), 0.104 (3.793e+09/s),
i8 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 16 (5x20): 0.019 (2.076e+10/s), 0.019 (2.076e+10/s), 0.019 (2.076e+10/s), 0.019 (2.076e+10/s), 0.019 (2.076e+10/s),
ncodebooks = 32
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 10, 32 (5x20): 0.201 (4.983e+08/s), 0.194 (5.163e+08/s), 0.205 (4.886e+08/s), 0.201 (4.983e+08/s), 0.200 (5.008e+08/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 10, 32 (5x20): 0.142 (1.129e+09/s), 0.143 (1.121e+09/s), 0.144 (1.113e+09/s), 0.142 (1.129e+09/s), 0.161 (9.954e+08/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 10, 32 (5x20): 0.007 (2.289e+10/s), 0.007 (2.289e+10/s), 0.007 (2.289e+10/s), 0.007 (2.289e+10/s), 0.007 (2.289e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 100, 32 (5x20): 0.762 (1.314e+09/s), 0.781 (1.282e+09/s), 0.756 (1.325e+09/s), 0.753 (1.330e+09/s), 0.798 (1.255e+09/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 100, 32 (5x20): 0.183 (8.757e+08/s), 0.149 (1.076e+09/s), 0.154 (1.041e+09/s), 0.150 (1.068e+09/s), 0.147 (1.090e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 100, 32 (5x20): 0.007 (2.289e+10/s), 0.007 (2.289e+10/s), 0.007 (2.289e+10/s), 0.007 (2.289e+10/s), 0.007 (2.289e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 32 (5x20): 7.958 (3.372e+08/s), 7.142 (3.757e+08/s), 7.148 (3.754e+08/s), 7.114 (3.772e+08/s), 7.135 (3.761e+08/s),
f32 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 32 (5x20): 5.589 (6.402e+08/s), 5.642 (6.341e+08/s), 5.563 (6.432e+08/s), 5.592 (6.398e+08/s), 5.579 (6.413e+08/s),
f32 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 32 (5x20): 0.341 (1.049e+10/s), 0.330 (1.084e+10/s), 0.327 (1.094e+10/s), 0.327 (1.094e+10/s), 0.328 (1.091e+10/s),
----
i16 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 32 (5x20): 4.369 (6.142e+08/s), 4.357 (6.159e+08/s), 4.537 (5.914e+08/s), 4.361 (6.153e+08/s), 4.406 (6.090e+08/s),
i16 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 32 (5x20): 2.888 (1.239e+09/s), 2.889 (1.238e+09/s), 2.898 (1.235e+09/s), 2.898 (1.235e+09/s), 2.909 (1.230e+09/s),
i16 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 32 (5x20): 0.329 (1.087e+10/s), 0.326 (1.098e+10/s), 0.331 (1.081e+10/s), 0.328 (1.091e+10/s), 0.345 (1.037e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 32 (5x20): 0.781 (1.263e+08/s), 0.785 (1.256e+08/s), 0.793 (1.244e+08/s), 0.788 (1.252e+08/s), 0.787 (1.253e+08/s),
f32 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 32 (5x20): 0.814 (9.693e+08/s), 0.828 (9.529e+08/s), 0.755 (1.045e+09/s), 0.766 (1.030e+09/s), 0.768 (1.027e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 32 (5x20): 0.045 (1.753e+10/s), 0.041 (1.924e+10/s), 0.041 (1.924e+10/s), 0.046 (1.715e+10/s), 0.041 (1.924e+10/s),
----
i8 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 32 (5x20): 0.320 (3.082e+08/s), 0.303 (3.255e+08/s), 0.301 (3.277e+08/s), 0.321 (3.072e+08/s), 0.301 (3.277e+08/s),
i8 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 32 (5x20): 0.279 (2.828e+09/s), 0.260 (3.035e+09/s), 0.263 (3.000e+09/s), 0.221 (3.570e+09/s), 0.242 (3.260e+09/s),
i8 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 32 (5x20): 0.061 (1.293e+10/s), 0.044 (1.793e+10/s), 0.041 (1.924e+10/s), 0.041 (1.924e+10/s), 0.040 (1.972e+10/s),
ncodebooks = 64
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 10, 64 (5x20): 0.454 (2.206e+08/s), 0.497 (2.015e+08/s), 0.489 (2.048e+08/s), 0.486 (2.061e+08/s), 0.457 (2.192e+08/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 10, 64 (5x20): 0.349 (9.184e+08/s), 0.344 (9.317e+08/s), 0.385 (8.325e+08/s), 0.377 (8.502e+08/s), 0.344 (9.317e+08/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 10, 64 (5x20): 0.019 (1.687e+10/s), 0.019 (1.687e+10/s), 0.019 (1.687e+10/s), 0.019 (1.687e+10/s), 0.020 (1.603e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 10000, 512, 100, 64 (5x20): 1.586 (6.315e+08/s), 1.530 (6.546e+08/s), 1.531 (6.542e+08/s), 1.529 (6.551e+08/s), 1.539 (6.508e+08/s),
f32 amm mithral enc N, D, M, ncodebooks: 10000, 512, 100, 64 (5x20): 0.405 (7.914e+08/s), 0.408 (7.856e+08/s), 0.449 (7.138e+08/s), 0.403 (7.953e+08/s), 0.411 (7.798e+08/s),
f32 amm mithral zipb N, D, M, ncodebooks: 10000, 512, 100, 64 (5x20): 0.020 (1.603e+10/s), 0.020 (1.603e+10/s), 0.019 (1.687e+10/s), 0.019 (1.687e+10/s), 0.019 (1.687e+10/s),
----
f32 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 64 (5x20): 14.943 (1.796e+08/s), 15.205 (1.765e+08/s), 14.912 (1.799e+08/s), 14.951 (1.795e+08/s), 14.981 (1.791e+08/s),
f32 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 64 (5x20): 11.376 (6.290e+08/s), 11.305 (6.330e+08/s), 11.313 (6.325e+08/s), 11.315 (6.324e+08/s), 11.312 (6.326e+08/s),
f32 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 64 (5x20): 0.877 (8.159e+09/s), 0.822 (8.705e+09/s), 0.845 (8.468e+09/s), 0.849 (8.428e+09/s), 0.836 (8.559e+09/s),
----
i16 amm mithral N, D, M, ncodebooks: 223590, 96, 12, 64 (5x20): 9.459 (2.837e+08/s), 9.458 (2.837e+08/s), 9.420 (2.849e+08/s), 9.457 (2.837e+08/s), 9.452 (2.839e+08/s),
i16 amm mithral enc N, D, M, ncodebooks: 223590, 96, 12, 64 (5x20): 5.819 (1.230e+09/s), 5.820 (1.230e+09/s), 5.824 (1.229e+09/s), 5.845 (1.224e+09/s), 5.901 (1.213e+09/s),
i16 amm mithral zipb N, D, M, ncodebooks: 223590, 96, 12, 64 (5x20): 0.818 (8.748e+09/s), 0.823 (8.695e+09/s), 0.803 (8.911e+09/s), 0.818 (8.748e+09/s), 0.851 (8.409e+09/s),
----
f32 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 64 (5x20): 1.571 (6.278e+07/s), 1.571 (6.278e+07/s), 1.573 (6.270e+07/s), 1.574 (6.266e+07/s), 1.571 (6.278e+07/s),
f32 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 64 (5x20): 1.479 (1.067e+09/s), 1.473 (1.071e+09/s), 1.475 (1.070e+09/s), 1.476 (1.069e+09/s), 1.473 (1.071e+09/s),
f32 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 64 (5x20): 0.114 (1.384e+10/s), 0.115 (1.372e+10/s), 0.115 (1.372e+10/s), 0.110 (1.435e+10/s), 0.115 (1.372e+10/s),
----
i8 amm mithral N, D, M, ncodebooks: 49284, 27, 2, 64 (5x20): 0.561 (1.758e+08/s), 0.560 (1.761e+08/s), 0.561 (1.758e+08/s), 0.560 (1.761e+08/s), 0.560 (1.761e+08/s),
i8 amm mithral enc N, D, M, ncodebooks: 49284, 27, 2, 64 (5x20): 0.453 (3.483e+09/s), 0.492 (3.207e+09/s), 0.470 (3.357e+09/s), 0.464 (3.401e+09/s), 0.494 (3.194e+09/s),
i8 amm mithral zipb N, D, M, ncodebooks: 49284, 27, 2, 64 (5x20): 0.114 (1.384e+10/s), 0.120 (1.315e+10/s), 0.116 (1.360e+10/s), 0.114 (1.384e+10/s), 0.114 (1.384e+10/s),
blas sketch matmul N, D, M, d: 10000, 512, 10, 2 (5x20): 3.827 (2.613e+07/s), 3.815 (2.621e+07/s), 3.830 (2.611e+07/s), 3.858 (2.592e+07/s), 3.832 (2.610e+07/s),
our sketch matmul N, D, M, d: 10000, 512, 10, 2 (5x20): 1.080 (9.259e+07/s), 1.041 (9.606e+07/s), 1.049 (9.533e+07/s), 1.049 (9.533e+07/s), 1.045 (9.569e+07/s),
blas sketch matmul N, D, M, d: 10000, 512, 10, 4 (5x20): 3.505 (2.853e+07/s), 3.568 (2.803e+07/s), 3.541 (2.824e+07/s), 3.431 (2.915e+07/s), 3.234 (3.092e+07/s),
our sketch matmul N, D, M, d: 10000, 512, 10, 4 (5x20): 2.081 (4.805e+07/s), 2.135 (4.684e+07/s), 2.083 (4.801e+07/s), 2.077 (4.815e+07/s), 2.079 (4.810e+07/s),
blas sketch matmul N, D, M, d: 10000, 512, 10, 8 (5x20): 3.772 (2.651e+07/s), 3.641 (2.746e+07/s), 3.617 (2.765e+07/s), 3.616 (2.765e+07/s), 4.002 (2.499e+07/s),
our sketch matmul N, D, M, d: 10000, 512, 10, 8 (5x20): 2.864 (3.492e+07/s), 2.861 (3.495e+07/s), 2.901 (3.447e+07/s), 3.017 (3.315e+07/s), 2.880 (3.472e+07/s),
blas sketch matmul N, D, M, d: 10000, 512, 10, 16 (5x20): 4.535 (2.205e+07/s), 4.565 (2.191e+07/s), 4.475 (2.235e+07/s), 4.476 (2.234e+07/s), 4.480 (2.232e+07/s),
our sketch matmul N, D, M, d: 10000, 512, 10, 16 (5x20): 5.217 (1.917e+07/s), 5.185 (1.929e+07/s), 5.243 (1.907e+07/s), 5.256 (1.903e+07/s), 5.184 (1.929e+07/s),
blas sketch matmul N, D, M, d: 10000, 512, 10, 32 (5x20): 6.537 (1.530e+07/s), 6.527 (1.532e+07/s), 6.517 (1.534e+07/s), 6.507 (1.537e+07/s), 6.512 (1.536e+07/s),
our sketch matmul N, D, M, d: 10000, 512, 10, 32 (5x20): 9.143 (1.094e+07/s), 9.119 (1.097e+07/s), 9.137 (1.094e+07/s), 9.110 (1.098e+07/s), 9.128 (1.096e+07/s),
blas sketch matmul N, D, M, d: 10000, 512, 10, 64 (5x20): 10.156 (9.846e+06/s), 10.136 (9.866e+06/s), 10.143 (9.859e+06/s), 10.146 (9.856e+06/s), 10.147 (9.855e+06/s),
our sketch matmul N, D, M, d: 10000, 512, 10, 64 (5x20): 17.739 (5.637e+06/s), 17.767 (5.628e+06/s), 17.641 (5.669e+06/s), 17.647 (5.667e+06/s), 17.640 (5.669e+06/s),
blas sketch matmul N, D, M, d: 10000, 512, 10, 128 (5x20): 17.149 (5.831e+06/s), 17.183 (5.820e+06/s), 17.144 (5.833e+06/s), 17.109 (5.845e+06/s), 17.182 (5.820e+06/s),
our sketch matmul N, D, M, d: 10000, 512, 10, 128 (5x20): 35.289 (2.834e+06/s), 35.025 (2.855e+06/s), 35.294 (2.833e+06/s), 35.022 (2.855e+06/s), 35.071 (2.851e+06/s),
blas matmul N, D, M: 10000, 512, 10 (5x20): 4.174 (2.396e+07/s), 4.136 (2.418e+07/s), 4.164 (2.402e+07/s), 4.198 (2.382e+07/s), 4.188 (2.388e+07/s),
our matmul N, D, M: 10000, 512, 10 (5x20): 3.546 (2.820e+07/s), 3.546 (2.820e+07/s), 3.553 (2.815e+07/s), 3.555 (2.813e+07/s), 3.560 (2.809e+07/s),
blas sketch matmul N, D, M, d: 10000, 512, 100, 2 (5x20): 4.085 (2.448e+08/s), 4.091 (2.444e+08/s), 4.055 (2.466e+08/s), 4.045 (2.472e+08/s), 4.057 (2.465e+08/s),
our sketch matmul N, D, M, d: 10000, 512, 100, 2 (5x20): 1.322 (7.564e+08/s), 1.337 (7.479e+08/s), 1.336 (7.485e+08/s), 1.323 (7.559e+08/s), 1.322 (7.564e+08/s),
blas sketch matmul N, D, M, d: 10000, 512, 100, 4 (5x20): 3.631 (2.754e+08/s), 3.843 (2.602e+08/s), 3.798 (2.633e+08/s), 3.848 (2.599e+08/s), 3.847 (2.599e+08/s),
our sketch matmul N, D, M, d: 10000, 512, 100, 4 (5x20): 2.626 (3.808e+08/s), 2.491 (4.014e+08/s), 2.510 (3.984e+08/s), 2.589 (3.862e+08/s), 2.480 (4.032e+08/s),
blas sketch matmul N, D, M, d: 10000, 512, 100, 8 (5x20): 4.275 (2.339e+08/s), 4.313 (2.319e+08/s), 4.333 (2.308e+08/s), 4.289 (2.332e+08/s), 4.130 (2.421e+08/s),
our sketch matmul N, D, M, d: 10000, 512, 100, 8 (5x20): 3.405 (2.937e+08/s), 3.571 (2.800e+08/s), 3.405 (2.937e+08/s), 3.423 (2.921e+08/s), 3.405 (2.937e+08/s),
blas sketch matmul N, D, M, d: 10000, 512, 100, 16 (5x20): 5.392 (1.855e+08/s), 5.316 (1.881e+08/s), 5.283 (1.893e+08/s), 5.281 (1.894e+08/s), 5.184 (1.929e+08/s),
our sketch matmul N, D, M, d: 10000, 512, 100, 16 (5x20): 6.046 (1.654e+08/s), 6.047 (1.654e+08/s), 6.076 (1.646e+08/s), 6.071 (1.647e+08/s), 6.044 (1.655e+08/s),
blas sketch matmul N, D, M, d: 10000, 512, 100, 32 (5x20): 7.291 (1.372e+08/s), 7.293 (1.371e+08/s), 7.308 (1.368e+08/s), 7.296 (1.371e+08/s), 7.294 (1.371e+08/s),
our sketch matmul N, D, M, d: 10000, 512, 100, 32 (5x20): 10.697 (9.348e+07/s), 10.584 (9.448e+07/s), 10.599 (9.435e+07/s), 10.611 (9.424e+07/s), 10.594 (9.439e+07/s),
blas sketch matmul N, D, M, d: 10000, 512, 100, 64 (5x20): 11.586 (8.631e+07/s), 11.528 (8.675e+07/s), 11.528 (8.675e+07/s), 11.535 (8.669e+07/s), 11.530 (8.673e+07/s),
our sketch matmul N, D, M, d: 10000, 512, 100, 64 (5x20): 20.459 (4.888e+07/s), 20.514 (4.875e+07/s), 20.542 (4.868e+07/s), 20.429 (4.895e+07/s), 20.532 (4.870e+07/s),
blas matmul N, D, M: 10000, 512, 100 (5x20): 13.506 (7.404e+07/s), 13.432 (7.445e+07/s), 13.467 (7.426e+07/s), 13.464 (7.427e+07/s), 13.484 (7.416e+07/s),
our matmul N, D, M: 10000, 512, 100 (5x20): 27.160 (3.682e+07/s), 27.135 (3.685e+07/s), 27.260 (3.668e+07/s), 27.213 (3.675e+07/s), 27.268 (3.667e+07/s),
blas sketch matmul N, D, M, d: 223590, 96, 12, 2 (5x20): 17.987 (1.492e+08/s), 17.601 (1.524e+08/s), 18.118 (1.481e+08/s), 17.847 (1.503e+08/s), 17.977 (1.493e+08/s),
our sketch matmul N, D, M, d: 223590, 96, 12, 2 (5x20): 5.117 (5.243e+08/s), 5.115 (5.246e+08/s), 5.102 (5.259e+08/s), 5.088 (5.273e+08/s), 5.111 (5.250e+08/s),
blas sketch matmul N, D, M, d: 223590, 96, 12, 4 (5x20): 11.524 (2.328e+08/s), 12.362 (2.170e+08/s), 11.828 (2.268e+08/s), 11.793 (2.275e+08/s), 11.785 (2.277e+08/s),
our sketch matmul N, D, M, d: 223590, 96, 12, 4 (5x20): 9.979 (2.689e+08/s), 10.007 (2.681e+08/s), 10.010 (2.680e+08/s), 10.010 (2.680e+08/s), 9.973 (2.690e+08/s),
blas sketch matmul N, D, M, d: 223590, 96, 12, 8 (5x20): 19.261 (1.393e+08/s), 19.116 (1.404e+08/s), 19.205 (1.397e+08/s), 19.342 (1.387e+08/s), 19.189 (1.398e+08/s),
our sketch matmul N, D, M, d: 223590, 96, 12, 8 (5x20): 14.543 (1.845e+08/s), 14.510 (1.849e+08/s), 14.570 (1.842e+08/s), 14.556 (1.843e+08/s), 14.509 (1.849e+08/s),
blas matmul N, D, M: 223590, 96, 12 (5x20): 19.189 (1.398e+08/s), 19.231 (1.395e+08/s), 19.378 (1.385e+08/s), 19.348 (1.387e+08/s), 19.390 (1.384e+08/s),
our matmul N, D, M: 223590, 96, 12 (5x20): 16.242 (1.652e+08/s), 16.194 (1.657e+08/s), 16.197 (1.657e+08/s), 16.230 (1.653e+08/s), 16.238 (1.652e+08/s),
blas sketch matmul N, D, M, d: 49284, 27, 2, 2 (5x20): 0.375 (2.628e+08/s), 0.373 (2.643e+08/s), 0.380 (2.594e+08/s), 0.380 (2.594e+08/s), 0.378 (2.608e+08/s),
our sketch matmul N, D, M, d: 49284, 27, 2, 2 (5x20): 0.219 (4.501e+08/s), 0.220 (4.480e+08/s), 0.219 (4.501e+08/s), 0.216 (4.563e+08/s), 0.203 (4.856e+08/s),
blas matmul N, D, M: 49284, 27, 2 (5x20): 0.327 (3.014e+08/s), 0.318 (3.100e+08/s), 0.319 (3.090e+08/s), 0.328 (3.005e+08/s), 0.328 (3.005e+08/s),
our matmul N, D, M: 49284, 27, 2 (5x20): 0.186 (5.299e+08/s), 0.181 (5.446e+08/s), 0.183 (5.386e+08/s), 0.174 (5.665e+08/s), 0.173 (5.698e+08/s),
"""
def _load_matmul_times_for_n_d_m(startswith):
lines = microbench_output.split('\n')
matmul_lines = [line for line in lines if line.startswith(startswith)]
matmul_shape_to_times = {}
matmul_shape_to_thruputs = {}
for line in matmul_lines:
start_idx = line.find(':') + 1
end_idx = line.find('(')
nmd_str = line[start_idx:end_idx]
N, D, M = [int(substr) for substr in nmd_str.split(',')[:3]]
speeds_str = line[line.find('):') + 2:]
speed_pairs = speeds_str.split(',')[:5]
# print("N, D, M: ", N, D, M)
# print("speed pairs: ", speed_pairs)
times = []
thruputs = []
for pair in speed_pairs:
pair = pair.strip()
if not len(pair):
continue # handle trailing comma on line
# print("pair: ", pair)
pair = pair.strip()
time_str, thruput_str = pair.split()
times.append(float(time_str))
thruput_str = thruput_str.strip('()s/')
thruputs.append(float(thruput_str))
key = (N, D, M)
matmul_shape_to_times[key] = times
matmul_shape_to_thruputs[key] = thruputs
# print("what we're getting from func:")
# pprint.pprint(matmul_shape_to_times)
# pprint.pprint(matmul_shape_to_thruputs)
return matmul_shape_to_times, matmul_shape_to_thruputs
def _load_sketch_times_for_n_d_m(startswith):
# print("loading sketch times for ", startswith)
lines = microbench_output.split('\n')
matmul_lines = [line for line in lines if line.startswith(startswith)]
matmul_shape_to_times = {}
matmul_shape_to_thruputs = {}
for line in matmul_lines:
start_idx = line.find(':') + 1
end_idx = line.find('(')
nmd_str = line[start_idx:end_idx]
N, D, M, d = [int(substr) for substr in nmd_str.split(',')[:4]]
speeds_str = line[line.find('):') + 2:]
speed_pairs = speeds_str.split(',')[:5]
# print("N, D, M: ", N, D, M)
# print("speed pairs: ", speed_pairs)
times = []
thruputs = []
for pair in speed_pairs:
pair = pair.strip()
if not len(pair):
continue # handle trailing comma on line
# print("pair: ", pair)
pair = pair.strip()
time_str, thruput_str = pair.split()
times.append(float(time_str))
thruput_str = thruput_str.strip('()s/')
thruputs.append(float(thruput_str))
key = (N, D, M, d)
matmul_shape_to_times[key] = times
matmul_shape_to_thruputs[key] = thruputs
# pprint.pprint(matmul_shape_to_times)
# pprint.pprint(matmul_shape_to_thruputs)
return matmul_shape_to_times, matmul_shape_to_thruputs
def load_matmul_times_for_n_d_m(key1='blas matmul', key2='our matmul',
sketches=False):
if sketches:
# print("results from blas:")
shape2lat0, shape2thruput0 = _load_sketch_times_for_n_d_m(key1)
# print("results from ours:")
shape2lat1, shape2thruput1 = _load_sketch_times_for_n_d_m(key2)
else:
# print("results from blas:")
shape2lat0, shape2thruput0 = _load_matmul_times_for_n_d_m(key1)
# print("results from ours:")
shape2lat1, shape2thruput1 = _load_matmul_times_for_n_d_m(key2)
# take minimum of time from eigen blas and our sgemm
shape2lat = {}
for k in shape2lat0:
vals0 = shape2lat0.get(k, [1e20])
vals1 = shape2lat1.get(k, [1e20])
mean0, mean1 = np.mean(vals0), np.mean(vals1)
if mean0 < mean1:
shape2lat[k] = shape2lat0[k]
else:
shape2lat[k] = shape2lat1[k]
shape2thruput = {}
for k in shape2thruput0:
vals0 = shape2thruput0.get(k, [-1e20])
vals1 = shape2thruput1.get(k, [-1e20])
# print("k, vals0, vals1: ", k)
# print(vals0)
# print(vals1)
mean0, mean1 = np.mean(vals0), np.mean(vals1)
if mean0 > mean1:
shape2thruput[k] = shape2thruput0[k]
else:
shape2thruput[k] = shape2thruput1[k]
# print("what we're returning:")
# pprint.pprint(shape2lat)
# pprint.pprint(shape2thruput)
return shape2lat, shape2thruput
def _load_vq_times_for_n_d_m(startswith):
lines = microbench_output.split('\n')
lines = [line for line in lines if line.startswith(startswith)]
shape_ncodebooks_to_times = {}
shape_ncodebooks_to_thruputs = {}
for line in lines:
start_idx = line.find(':') + 1
end_idx = line.find('(')
nmd_str = line[start_idx:end_idx]
N, D, M, C = [int(substr) for substr in nmd_str.split(',')[:4]]
speeds_str = line[line.find('):') + 2:]
speed_pairs = speeds_str.split(',')[:5]
times = []
thruputs = []
for pair in speed_pairs:
pair = pair.strip()
if not len(pair):
continue # handle trailing comma on line
time_str, thruput_str = pair.split()
times.append(float(time_str))
thruput_str = thruput_str.strip('()s/')
thruputs.append(float(thruput_str))
key = (N, D, M, C)
shape_ncodebooks_to_times[key] = times
shape_ncodebooks_to_thruputs[key] = thruputs
# print("startswith: ", startswith)
# if 'bolt' in startswith:
# print("bolt speed dicts:")
# pprint.pprint(shape_ncodebooks_to_times)
# pprint.pprint(shape_ncodebooks_to_thruputs)
return shape_ncodebooks_to_times, shape_ncodebooks_to_thruputs
# def load_multisplit_times_for_n_d_m():
# return _load_vq_times_for_n_d_m('famm mithral')
def load_bolt_times_for_n_d_m():
return _load_vq_times_for_n_d_m('amm bolt')
def load_mithral_f32_times_for_n_d_m():
# two spaces so it doesn't try to read enc and zip times
return _load_vq_times_for_n_d_m('f32 amm mithral ')
def load_mithral_i16_times_for_n_d_m():
return _load_vq_times_for_n_d_m('i16 amm mithral ')
def load_mithral_i8_times_for_n_d_m():
return _load_vq_times_for_n_d_m('i8 amm mithral ')
def load_mithral_times_for_n_d_m():
return _load_vq_times_for_n_d_m('f32 amm mithral ')
def load_sketch_times_for_n_d_m():
return load_matmul_times_for_n_d_m(
'blas sketch matmul', 'our sketch matmul', sketches=True)
def main():
# load_matmul_times_for_n_d_m()
# load_multisplit_times_for_n_d_m()
# load_bolt_times_for_n_d_m()
# pprint.pprint(load_sketch_times_for_n_d_m())
# pprint.pprint(load_multisplit_times_for_n_d_m())
# pprint.pprint(load_mithral_times_for_n_d_m())
ret = load_matmul_times_for_n_d_m()
print("matmul latencies, thruputs")
pprint.pprint(ret)
# pprint.pprint(load_bolt_times_for_n_d_m())
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import itertools
import numpy as np
from sklearn import cluster
from scipy import signal
# import types
import kmc2 # state-of-the-art kmeans initialization (as of NIPS 2016)
from joblib import Memory
_memory = Memory('.', verbose=0)
# ================================================================ misc
def is_dict(x):
return isinstance(x, dict)
def is_list_or_tuple(x):
return isinstance(x, (list, tuple))
def as_list_or_tuple(x):
return x if is_list_or_tuple(x) else [x]
def is_scalar_seq(x):
try:
[float(element) for element in x]
return True
except TypeError:
return False
def as_scalar_seq(x):
if is_scalar_seq(x):
return x
try:
_ = float(x)
return [x]
except TypeError:
raise TypeError("Couldn't convert value '{}' to sequence "
"of scalars".format(x))
def is_string(x):
return isinstance(x, (str,))
def flatten_list_of_lists(l):
return list(itertools.chain.from_iterable(l))
def element_size_bytes(x):
return np.dtype(x.dtype).itemsize
def invert_permutation(permutation):
return np.arange(len(permutation))[np.argsort(permutation)]
# ================================================================ image
def conv2d(img, filt, pad='same'):
# assert pad in ('same',) # TODO support valid
# mode = 'constant'
if len(img.shape) == 2:
return signal.correlate2d(img, filt, mode=pad)
# img is more than 2d; do a 2d conv for each channel and sum results
assert len(img.shape) == 3
out = np.zeros(img.shape[:2], dtype=np.float32)
for c in range(img.shape[2]):
f = filt[:, :, c] if len(filt.shape) == 3 else filt
out += signal.correlate2d(img[:, :, c], f, mode=pad)
return out
# def filter_img(img, filt):
# out = conv2d(img, filt)
# return out / np.max(out)
# ================================================================ distance
def dists_sq(X, q):
diffs = X - q
return np.sum(diffs * diffs, axis=-1)
def dists_l1(X, q):
diffs = np.abs(X - q)
return np.sum(diffs, axis=-1)
def sq_dists_to_vectors(X, queries, rowNorms=None, queryNorms=None):
Q = queries.shape[0]
mat_size = X.shape[0] * Q
mat_size_bytes = element_size_bytes(X[0] + queries[0])
if mat_size_bytes > int(1e9):
print("WARNING: sq_dists_to_vectors: attempting to create a matrix" \
"of size {} ({}B)".format(mat_size, mat_size_bytes))
if rowNorms is None:
rowNorms = np.sum(X * X, axis=1, keepdims=True)
if queryNorms is None:
queryNorms = np.sum(queries * queries, axis=1)
dotProds = np.dot(X, queries.T)
return (-2 * dotProds) + rowNorms + queryNorms # len(X) x len(queries)
def all_eq(x, y):
if len(x) != len(y):
return False
if len(x) == 0:
return True
return np.max(np.abs(x - y)) < .001
def top_k_idxs(elements, k, smaller_better=True, axis=-1):
if smaller_better: # return indices of lowest elements
which_nn = np.arange(k)
return np.argpartition(elements, kth=which_nn, axis=axis)[:k]
else: # return indices of highest elements
which_nn = len(elements) - 1 - np.arange(k)
return np.argpartition(elements, kth=which_nn, axis=axis)[-k:][::-1]
def compute_true_knn(X, Q, k=1000, print_every=5, block_sz=128):
nqueries = Q.shape[0]
nblocks = int(np.ceil(nqueries / float(block_sz)))
truth = np.full((nqueries, k), -999, dtype=np.int32)
if nqueries <= block_sz:
dists = sq_dists_to_vectors(Q, X)
assert dists.shape == (Q.shape[0], X.shape[0])
for i in range(nqueries):
truth[i, :] = top_k_idxs(dists[i, :], k)
# truth[i, :] = top_k_idxs(dists[:, i], k)
return truth
for b in range(nblocks):
# recurse to fill in knn for each block
start = b * block_sz
end = min(start + block_sz, nqueries)
rows = Q[start:end, :]
truth[start:end, :] = compute_true_knn(X, rows, k=k, block_sz=block_sz)
if b % print_every == 0:
print("computing top k for query block "
"{} (queries {}-{})...".format(b, start, end))
# for i in range(nqueries):
# if i % print_every == 0:
# print "computing top k for query {}...".format(i)
# truth[i, :] = top_k_idxs(dists[i, :], k)
print("done")
assert np.all(truth != -999)
return truth
def knn(X, q, k, dist_func=dists_sq):
dists = dist_func(X, q)
idxs = top_k_idxs(dists, k)
return idxs, dists[idxs]
@_memory.cache
def kmeans(X, k, max_iter=16, init='kmc2', return_sse=False):
X = X.astype(np.float32)
# handle fewer nonzero rows than centroids (mostly just don't choke
# if X all zeros, which happens when run in PQ with tiny subspaces)
rowsums = X.sum(axis=1)
nonzero_mask = rowsums != 0
nnz_rows = np.sum(nonzero_mask)
if nnz_rows < k:
print("X.shape: ", X.shape)
print("k: ", k)
print("nnz_rows: ", nnz_rows)
centroids = np.zeros((k, X.shape[1]), dtype=X.dtype)
labels = np.full(X.shape[0], nnz_rows, dtype=np.int)
if nnz_rows > 0: # special case, because can't have slice of size 0
# make a centroid out of each nonzero row, and assign only those
# rows to that centroid; all other rows get assigned to next
# centroid after those, which is all zeros
centroids[nnz_rows] = X[nonzero_mask]
labels[nonzero_mask] = np.arange(nnz_rows)
if return_sse:
return centroids, labels, 0
return centroids, labels
# if k is huge, initialize centers with cartesian product of centroids
# in two subspaces
sqrt_k = int(np.ceil(np.sqrt(k)))
if k >= 16 and init == 'subspaces':
print("kmeans: clustering in subspaces first; k, sqrt(k) ="
" {}, {}".format(k, sqrt_k))
_, D = X.shape
centroids0, _ = kmeans(X[:, :D/2], sqrt_k, max_iter=1)
centroids1, _ = kmeans(X[:, D/2:], sqrt_k, max_iter=1)
seeds = np.empty((sqrt_k * sqrt_k, D), dtype=np.float32)
for i in range(sqrt_k):
for j in range(sqrt_k):
row = i * sqrt_k + j
seeds[row, :D/2] = centroids0[i]
seeds[row, D/2:] = centroids1[j]
seeds = seeds[:k] # rounded up sqrt(k), so probably has extra rows
elif init == 'kmc2':
try:
seeds = kmc2.kmc2(X, k).astype(np.float32)
except ValueError: # can happen if dist of 0 to centroid
print("WARNING: couldn't use kmc2 initialization")
seeds = 'k-means++' if k < max_iter else 'random'
else:
raise ValueError("init parameter must be one of {'kmc2', 'subspaces'}")
est = cluster.MiniBatchKMeans(
k, init=seeds, max_iter=max_iter, n_init=1).fit(X)
if return_sse:
return est.cluster_centers_, est.labels_, est.inertia_
return est.cluster_centers_, est.labels_
def orthonormalize_rows(A):
Q, R = np.linalg.qr(A.T)
return Q.T
def random_rotation(D):
rows = np.random.randn(D, D)
return orthonormalize_rows(rows)
def hamming_dist(v1, v2):
return np.count_nonzero(v1 != v2)
def hamming_dists(X, q):
return np.array([hamming_dist(row, q) for row in X])
if __name__ == '__main__':
a = np.random.randn(10)
sort_idxs = np.argsort(a)[::-1]
print(a)
print(top_k_idxs(a, 3, smaller_better=False))
print(sort_idxs[:3])
|
#!/usr/bin/env python
import numpy as np
import numba
import zstandard as zstd # pip install zstandard
# ================================================================ Funcs
def nbits_cost(diffs, signed=True):
"""
>>> [nbits_cost(i) for i in [0, 1, 2, 3, 4, 5, 7, 8, 9]]
[0, 2, 3, 3, 4, 4, 4, 5, 5]
>>> [nbits_cost(i) for i in [-1, -2, -3, -4, -5, -7, -8, -9]]
[1, 2, 3, 3, 4, 4, 4, 5]
>>> nbits_cost([])
array([], dtype=int32)
>>> nbits_cost([0, 2, 1, 0])
array([0, 3, 2, 0], dtype=int32)
>>> nbits_cost([0, 2, 1, 3, 4, 0], signed=False)
array([0, 2, 1, 2, 3, 0], dtype=int32)
"""
if diffs is None:
return None
diffs = np.asarray(diffs, dtype=np.int32)
if diffs.size == 0:
return np.array([], dtype=np.int32)
if not signed:
assert np.all(diffs >= 0)
pos_idxs = diffs > 0
nbits = np.zeros(diffs.shape, dtype=np.int32)
nbits[pos_idxs] = np.floor(np.log2(diffs[pos_idxs])) + 1
nbits[~pos_idxs] = 0
return nbits
# shape = diffs.shape
# diffs = diffs.ravel()
# zero_idxs = (diffs == 0)
# # nbits[zero_idxs] = 0
# nbits = np.zeros(len(diffs), dtype=np.int32)
# diffs = diffs[~zero_idxs]
# equiv_diffs = np.abs(diffs) + (diffs >= 0).astype(np.int32) # +1 if < 0
# # assert np.all(np.abs(diffs) > 0)
# # assert np.all(equiv_diffs > 0)
# nbits[~zero_idxs] = np.ceil(np.log2(equiv_diffs)) + 1
# nbits = np.asarray(nbits, dtype=np.int32) # next line can't handle scalar
# assert np.all(nbits >= 0)
shape = diffs.shape
diffs = diffs.ravel()
equiv_diffs = np.abs(diffs) + (diffs >= 0).astype(np.int32) # +1 if < 0
nbits = np.ceil(np.log2(equiv_diffs)) + 1
nbits = np.asarray(nbits, dtype=np.int32) # next line can't handle scalar
nbits[diffs == 0] = 0
assert np.all(nbits >= 0)
return nbits.reshape(shape) if nbits.size > 1 else nbits[0] # unpack if scalar
@numba.njit(fastmath=True)
def zigzag_encode(x):
"""
>>> [zigzag_encode(i) for i in [0,1,-1,2,-2,3,-3]]
[0, 1, 2, 3, 4, 5, 6]
>>> zigzag_encode([0,1,-1,2,-2,3,-3])
array([0, 1, 2, 3, 4, 5, 6], dtype=int32)
"""
x = np.asarray(x, dtype=np.int32)
return (np.abs(x) << 1) - (x > 0).astype(np.int32)
@numba.njit(fastmath=True)
def zigzag_decode(x):
return np.bitwise_xor(x >> 1, -np.bitwise_and(x, 1))
def quantize(X, nbits=16, minval=None, maxval=None):
minval = np.min(X) if minval is None else minval
maxval = np.max(X) if maxval is None else maxval
unsigned_max = (1 << nbits) - 1
dtype_min = 1 << (nbits - 1)
scale = float(unsigned_max) / maxval
X = np.maximum(0, X - minval)
X = np.minimum(unsigned_max, X * scale)
X -= dtype_min # center at 0
dtype = {16: np.int16, 12: np.int16, 8: np.int8}[nbits]
return X.astype(dtype)
# ================================================================
def zstd_compress(buff, comp=None):
comp = zstd.ZstdCompressor() if comp is None else comp
if isinstance(buff, str):
buff = bytes(buff, encoding='utf8')
return comp.compress(buff)
def zstd_decompress(buff, decomp=None):
decomp = zstd.ZstdDecompressor() if decomp is None else decomp
return decomp.decompress(decomp)
# ============================================================== sprintz
# except without the predictive coding part because we do that manually;
# we also omit the run-length encoding because the author says that's a
# huge pain to code and won't change the results much for our fast-changing
# time series; also we don't do the grouping thing since it only
# affects the decoding speed (it could affect the ratio slightly if the
# number of variables were really low and not a multiple of 8, but neither
# is the case for us)
# def bitpack_vec(x, nbits_per_element):
# n = len(x)
# total_nbits = n * nbits_per_element
# bitvec = np.zeros(total_nbits, dtype=np.bool)
# for i, val in enumerate(x):
# start_idx = i * nbits_per_element
# for b in range(nbits_per_element):
# bit = (val >> b) & 1
# bitvec[start_idx + b] = bit
# return np.packbits(bitvec)
# def bitunpack(X, nbits_per_element):
# was_1d = X.ndim == 1
# X = np.atleast_2d(X)
# N, D = X.shape
# ret = np.unpackbits(X, axis=1)
# if was_1d:
# ret = ret.squeeze()
# return ret
# @numba.njit(fastmath=True)
def bitpack(X, nbits_per_element):
was_1d = X.ndim == 1
X = np.atleast_2d(X)
N, D = X.shape
# orig_elemsz = X.dtype.itemsize
orig_elemsz_bits = 8 * X.dtype.itemsize
assert X.dtype in (np.uint8, np.uint16)
assert X.dtype in (np.uint8, np.uint16)
if nbits_per_element == orig_elemsz_bits:
ret = X
elif X.dtype == np.uint8:
# print("N, D, nbits: ", N, D, nbits_per_element)
# shape = X.shape
X = X.ravel()
# unpacked = np.unpackbits(X, count=nbits_per_element, bitorder='little', axis=-1)
unpacked = np.unpackbits(X, bitorder='little', axis=-1)
# print("unpacked initial shape: ", unpacked.shape)
unpacked = unpacked.reshape(N * D, 8)[:, :nbits_per_element]
# print("unpacked new shape: ", unpacked.shape)
ret = np.packbits(unpacked.reshape(N, -1), axis=1)
# ret = ret.reshape(N, -1)
# print("ret.shape: ", ret.shape)
else:
# X_low = (X & 0xff)[:, :, np.newaxis]
# X_high = ((X & 0xff00) >> 8)[:, :, np.newaxis]
# X_combined = np.concatenate([X_low, X_high], axis=-1)
# X = X[:, :, np.newaxis]
# X = np.concatenate([X, X], axis=-1)
# X[:, :, 0] = X[:, :, 0] & 0xff
# X[:, :, 1] = (X[:, :, 1] & 0xff00) >> 8
# X = X.reshape(N, 2 * D).astype(np.uint8)
X = np.ascontiguousarray(X).view(np.uint8).reshape(N, 2 * D)
# print("X shape: ", X.shape)
unpacked = np.unpackbits(X, axis=1, bitorder='little')
unpacked = unpacked.reshape(N, orig_elemsz_bits, D)
# unpacked = unpacked[:, ::-1, :] # low bits in low idxs
unpacked = np.ascontiguousarray(unpacked[:, :nbits_per_element])
ret = np.packbits(unpacked.reshape(N, -1))
# nbits_per_row = D * nbits_per_element
# bitmat = np.zeros((N, nbits_per_row), dtype=np.uint8)
# for j in range(D):
# col = X[:, j]
# start_idx = j * nbits_per_element
# for b in range(nbits_per_element):
# bit = (col >> b) & 1
# bitmat[:, start_idx + b] = bit
# ret = np.packbits(bitmat, axis=1)
if was_1d:
ret = ret.squeeze()
return ret
@numba.njit(fastmath=True)
def _sprintz_header_sz(headers, header_elem_nbits):
_, D = headers.shape
header_row_sz = int(np.ceil(D * header_elem_nbits / 8))
rows_total_nbits = headers.sum(axis=1)
# zero_rows = rows_total_nbits == 0
# header_sz = np.sum(nzero_rows) # one byte for run length
# pair_sums = zero_rows +
header_sz = 0
prev_was_zero = False
for row in rows_total_nbits:
is_zero = row == 0
if is_zero:
if prev_was_zero:
continue
else:
header_sz += 1 # start of run
else:
header_sz += header_row_sz
prev_was_zero = is_zero
return header_sz
# def sprintz_packed_size(X, nbits=None, just_return_sz=False, postproc='zstd'):
def sprintz_packed_size(X, nbits=None, just_return_sz=True, postproc=None):
if nbits is None:
nbits = {1: 8, 2: 16}.get(X.dtype.itemsize, 16)
unsigned_dtype = {8: np.uint8, 16: np.uint16}[nbits]
window_len = 8
pad_nrows = X.shape[0] % window_len
if pad_nrows != 0:
pad_rows = np.zeros((pad_nrows, X.shape[1]), dtype=X.dtype)
X = np.vstack([X, pad_rows])
N, D = X.shape
if X.dtype.itemsize > 2: # basically just catching floats
# print("sprintz: quantizing X...WTF")
X = quantize(X, nbits=nbits)
if np.min(X) < 0:
# print("sprintz: zigzag_encoding X!")
X = zigzag_encode(X).astype(unsigned_dtype)
# else:
# print("sprintz: not zigzag_encoding X!")
header_elem_nbits = {8: 3, 16: 4}[nbits]
X_nbits = nbits_cost(X, signed=False)
X_nbits = np.asfarray(X_nbits).reshape(N // window_len, window_len, -1)
block_nbits = X_nbits.max(axis=1).astype(np.uint8)
block_nbits[block_nbits == (nbits - 1)] = nbits
headers = block_nbits
if just_return_sz:
payload_sz = int(block_nbits.sum() * window_len / 8)
header_sz = _sprintz_header_sz(headers, header_elem_nbits)
# print("header sz: ", header_sz)
return header_sz + payload_sz
nwindows = N // window_len
payloads = []
for i in range(nwindows):
start_idx = i * window_len
end_idx = start_idx + window_len
X_slice = X[start_idx:end_idx]
for j in range(D):
col = X_slice[:, j]
payloads.append(bitpack(col, headers[i, j]))
headers = bitpack(headers, header_elem_nbits)
payloads = np.hstack(payloads)
if postproc is None:
return headers.nbytes + payloads.nbytes
elif postproc == 'zstd':
return len(zstd_compress(headers)) + len(zstd_compress(payloads))
# # nbits_slice = nbits_cost(X_slice, signed=False)
# nbits_slice = X_nbits[start_idx:end_idx]
# max_nbits = nbits_slice.max(axis=0)
# headers[i] = np.minimum(max_nbits, nbits - 1) # 8->7, 16->15
# max_nbits[max_nbits == nbits - 1] = nbits # 7->8, 15->16
# for j in range(D):
# col = X_slice[:, j]
# payloads.append(bitpack(col, max_nbits[j]))
# headers = bitpack(headers, header_elem_nbits)
# payloads = np.hstack(payloads)
# header_bytes = headers.tobytes()
# # payload_bytes = headers.tobytes()
# blosc.compress(buff, typesize=elem_sz,
# cname=compressor, shuffle=shuffle)
#
if __name__ == '__main__':
import doctest
doctest.testmod()
|
#!/usr/bin/env python
from __future__ import print_function
import os
import numpy as np
import pandas as pd
from io import StringIO
from . import amm_methods as methods
from joblib import Memory
_memory = Memory('.', verbose=1)
pd.options.mode.chained_assignment = None # suppress stupid warning
RESULTS_DIR = os.path.join('results', 'amm')
TIMING_RESULTS_DIR = os.path.join(RESULTS_DIR, 'timing')
# we log these, but don't need them for the plots
AMM_DROP_COLS = ['__pyience_timestamp__', 'y_mean', 'y_std', 'bias',
'raw_mse', 'r', 'alpha', 'ncentroids']
def _read_csv_with_garbage(path, **kwargs):
with open(path, 'r') as f:
# print("\n".join(f.readlines()))
keep_lines = [line.strip() for line in f.readlines() if
(',' in line and not line.startswith('-'))]
contents = '\n'.join(keep_lines)
# print("contents\n", contents)
return pd.read_csv(StringIO(contents), **kwargs)
def rename_values_in_col(df, col, name_map, drop_others=True):
name_map = {k.strip().lower(): v for k, v in name_map.items()}
vals = [name_map.get(name.strip().lower(), "") for name in df[col]]
valid_vals = set(name_map.values())
# print("valid_vals: ", valid_vals)
valid_mask = np.array([val in valid_vals for val in vals])
# print("valid mask: ", valid_mask)
df = df.copy()
df[col] = vals
if drop_others:
df = df.loc[valid_mask]
return df
# print(df)
def melt_observation_cols(df, cols, var_name=None, value_name=None):
"""like pd.melt, but assumes only 1 observation var instead of 1 id var"""
independent_vars = [col for col in df.columns
if col not in set(cols)]
return pd.melt(df, id_vars=independent_vars, value_vars=cols,
var_name=var_name, value_name='time')
def melt_times(df, ntimes=5):
observation_vars = 't0 t1 t2 t3 t4'.split()
observation_vars = observation_vars[:ntimes]
return melt_observation_cols(
df, observation_vars, var_name='timing_trial', value_name='time')
def drop_cols_inplace(df, cols):
for col in AMM_DROP_COLS:
try:
df.drop([col], axis=1, inplace=True)
except KeyError:
pass
return df
def frac_above_thresh(df, xvar, yvar, methodvar, unitvar, ythresh):
"""
(method, xvar) -> [0, 1]
Assumes you have a tidy dataframe where method, xvar, yvar, and unit are
each a col.
"""
df = df.copy()
# df['frac_above_thresh'] = (df[yvar] > ythresh).astype(np.float)
# (method, xvar, [(unit, yvar)]) -> bool
df['frac_above_thresh'] = (df[yvar] > ythresh).astype(np.float)
# independent_vars = [methodvar, unitvar, xvar]
independent_vars = [methodvar, xvar]
# return df.groupby(independent_vars)['is_above_thresh'].transform('mean')
# last part converts from groupby back to regular df EDIT: no it doesn't
# return df.groupby(independent_vars)['frac_above_thresh'].mean().apply(pd.Series)
# return df.groupby(independent_vars)['is_above_thresh'].mean()
df = df.groupby(independent_vars)['frac_above_thresh'].mean()
# this is the magic line; turn multi-index levels into regular cols, with
# multi-index value broadcast all the corresponding rows;
# WOW this took a long time to figure out...
df = df.reset_index(level=independent_vars)
return df
# tmp = df.groupby(independent_vars)['frac_above_thresh'].mean()
# tmp = df.groupby(independent_vars)['frac_above_thresh'].transform('mean')
# print("tmp:\n", tmp)
# df['frac_above_thresh'] = tmp
# return df
# return df.groupby(independent_vars)[independent_vars + ['frac_above_thresh']]
# ret = df.groupby([methodvar, unitvar, xvar])['__above_thresh__']
# ret.drop(['__above_thresh__'], axis=1, inplace=True)
# return ret
def encode_timings():
TIMINGS_PATH = os.path.join(TIMING_RESULTS_DIR, 'encode-timing.csv')
ORIG_HEADERS = 'algo __ N D C B ___ t0 _0 t1 _1 t2 _2 t3 _3 t4 _4'.split()
USE_HEADERS = 'algo N D C B t0 t1 t2 t3 t4'.split()
# ORIG_HEADERS = 'algo __ N D C ___ t0 _0 t1 _1 t2 _2'.split()
# USE_HEADERS = 'algo N D C t0 t1 t2'.split()
df = _read_csv_with_garbage(TIMINGS_PATH, names=ORIG_HEADERS, header=None)
df = df[USE_HEADERS]
return df
# print(df)
def lut_timings():
TIMINGS_PATH = os.path.join(TIMING_RESULTS_DIR, 'lut-timing.csv')
ORIG_HEADERS = ('algo __ N D C B lutconst ___ '
't0 _0 t1 _1 t2 _2 t3 _3 t4 _4').split()
USE_HEADERS = 'algo N D C B lutconst t0 t1 t2 t3 t4'.split()
df = _read_csv_with_garbage(TIMINGS_PATH, names=ORIG_HEADERS, header=None)
df = df[USE_HEADERS]
return df
def scan_timings():
TIMINGS_PATH = os.path.join(TIMING_RESULTS_DIR, 'scan-timing.csv')
ORIG_HEADERS = 'algo __ N C B M ___ t0 _0 t1 _1 t2 _2 t3 _3 t4 _4'.split()
USE_HEADERS = 'algo N C B M t0 t1 t2 t3 t4'.split()
df = _read_csv_with_garbage(TIMINGS_PATH, names=ORIG_HEADERS, header=None,
skiprows=1)
df = df[USE_HEADERS]
return df
def mithral_amm_timings():
TIMINGS_PATH = os.path.join(TIMING_RESULTS_DIR, 'amm-mithral-timing.csv')
ORIG_HEADERS = ('dset dtype algo __ N D M C lutconst ___ '
't0 _0 t1 _1 t2 _2 t3 _3 t4 _4').split()
USE_HEADERS = 'dset dtype algo N D M C lutconst t0 t1 t2 t3 t4'.split()
df = _read_csv_with_garbage(TIMINGS_PATH, names=ORIG_HEADERS, header=None)
df = df[USE_HEADERS]
return df
def bolt_amm_timings():
TIMINGS_PATH = os.path.join(TIMING_RESULTS_DIR, 'amm-bolt-timing.csv')
ORIG_HEADERS = ('dset dtype algo __ N D M C ___ '
't0 _0 t1 _1 t2 _2 t3 _3 t4 _4').split()
USE_HEADERS = 'dset dtype algo N D M C t0 t1 t2 t3 t4'.split()
df = _read_csv_with_garbage(TIMINGS_PATH, names=ORIG_HEADERS, header=None)
df = df[USE_HEADERS]
df['fixedB'] = df['algo'].str.strip().str.endswith('noenc')
df.drop('algo', axis=1, inplace=True)
df = df.loc[df['fixedB']]
# print("bolt df:\n", df)
# import sys; sys.exit()
return df
def dense_amm_timings():
TIMINGS_PATH = os.path.join(TIMING_RESULTS_DIR, 'amm-dense-timing.csv')
ORIG_HEADERS = ('dset algo __ N D M d ___ '
't0 _0 t1 _1 t2 _2 t3 _3 t4 _4').split()
USE_HEADERS = 'dset algo N D M d t0 t1 t2 t3 t4'.split()
df = _read_csv_with_garbage(TIMINGS_PATH, names=ORIG_HEADERS, header=None)
df = df[USE_HEADERS]
df['algo'] = df['algo'].str.strip()
# drop stuff that doesn't have fixedW; we just let the existing methods
# use fixedW (same as fixedB in amm.py), instead of having to encode the
# smaller matrix
# df = df.loc[~df['algo'].isin(['blas sketch matmul', 'our sketch matmul'])]
t_sums = (df['t0'] + df['t1'] + df['t2'] + df['t3'] + df['t4']).values / 5
# df['t_avg'] = (df['t0'] + df['t1'] + df['t2'] + df['t3'] + df['t4']) / 5.
# # mark whether it's from our gemm or eigen gemm
# df['is_ours'] = df['algo'].str.startswith('our')
# print("uniq n vals: ", np.unique(df['N']))
sizes = np.empty((len(df), 4), dtype=np.int)
sizes[:, 0] = df['N']
sizes[:, 1] = df['D']
sizes[:, 2] = df['M']
sizes[:, 3] = df['d']
as_tuples = [tuple(row) for row in sizes]
uniq_tuples = sorted(list(set(as_tuples)))
keep_idxs = []
# print("sizes:\n", sizes)
# print("uniq_tuples:\n", uniq_tuples)
for tup in uniq_tuples:
row = np.array(tup)
idxs = np.where((sizes == row).sum(axis=1) == sizes.shape[1])[0]
best_idx = idxs[np.argmin(t_sums[idxs])]
# print(f"{tup} -> {best_idx}")
keep_idxs.append(best_idx)
df = df.iloc[keep_idxs]
rename_dict = {}
rename_dict['blas matmul'] = 'Brute Force'
rename_dict['our matmul'] = 'Brute Force'
rename_dict['blas sketch matmul'] = 'Dense Sketch'
rename_dict['our sketch matmul'] = 'Dense Sketch'
rename_dict['blas sketch fixedw matmul'] = 'Dense Sketch'
rename_dict['our sketch fixedw matmul'] = 'Dense Sketch'
df = rename_values_in_col(df, 'algo', rename_dict, drop_others=False)
return df
def osnap_amm_timings():
TIMINGS_PATH = os.path.join(TIMING_RESULTS_DIR, 'amm-osnap-timing.csv')
ORIG_HEADERS = ('dset algo __ N D M d s ___ '
't0 _0 t1 _1 t2 _2 t3 _3 t4 _4').split()
USE_HEADERS = 'dset algo N D M d s t0 t1 t2 t3 t4'.split()
df = _read_csv_with_garbage(TIMINGS_PATH, names=ORIG_HEADERS, header=None)
df = df[USE_HEADERS]
df.drop('algo', axis=1, inplace=True)
return df
def sparse_amm_timings():
TIMINGS_PATH = os.path.join(TIMING_RESULTS_DIR, 'amm-sparse-timing.csv')
ORIG_HEADERS = ('dset algo __ N D M d frac ___ '
't0 _0 t1 _1 t2 _2 t3 _3 t4 _4').split()
USE_HEADERS = 'dset algo N D M d frac t0 t1 t2 t3 t4'.split()
df = _read_csv_with_garbage(TIMINGS_PATH, names=ORIG_HEADERS, header=None)
df = df[USE_HEADERS]
df.drop('algo', axis=1, inplace=True)
return df
def scalar_quantized_amm_timings():
timings = []
timings.append(['Cifar10', 10000, 512, 10, 2.013])
timings.append(['Cifar100', 10000, 512, 100, 6.472])
timings.append(['Ucr128', 1000, 320, 128, .4808])
timings.append(['Caltech3x3', 49284, 27, 2, .894])
timings.append(['Caltech5x5', 48400, 75, 2, 1.409])
dicts = [{'dset': l[0], 'N': l[1], 'D': l[2],
'M': l[3], 'time': l[4]} for l in timings]
# df = pd.DataFrame.from_records(dicts)
# print("scalar_quantized_amm_timings: ")
# print(df)
return pd.DataFrame.from_records(dicts)
# import sys; sys.exit()
# df = pd.DataFrame.from_records(timings)
# output from ./GEMMsBenchmark after defining FBGEMM_MEASURE_TIME_BREAKDOWN
# in include/fbgemm/Fbgemm.h; recorded here since not in a results file
'''
M, N, K, Type, Packing (us), Kernel (us), Postproc (us), Total (us), GOPs
10000, 10, 512, FBGEMM_i8_acc32, 438.069, 1465.04, 43.5959, 2013.31, 50.6
10000, 10, 512, FBGEMM_i8_acc16, 512.8, 1338.1, 69.9, 2115.1, 48.3
10000, 100, 512, FBGEMM_i8_acc32, 473.7, 9203.9, 85.9, 9923.9, 103.1
10000, 100, 512, FBGEMM_i8_acc16, 569.8, 5558.7, 108.5, 6472.2, 158.1
1000, 128, 320, FBGEMM_i8_acc32, 39.5, 724.6, 5.8, 795.2, 101.8
1000, 128, 320, FBGEMM_i8_acc16, 43.5, 404.1, 3.1, 480.8, 168.4
49284, 2, 27, FBGEMM_i8_acc32, 298.5, 226.2, 139.6, 894.0, 5.9
49284, 2, 27, FBGEMM_i8_acc16, 333.6, 650.1, 162.5, 1608.7, 3.3
48400, 2, 75, FBGEMM_i8_acc32, 482.0, 546.0, 141.5, 1409.3, 10.2
48400, 2, 75, FBGEMM_i8_acc16, 438.3, 1228.7, 159.2, 2278.4, 6.4
'''
# def _extract_cols_into_list_of_tuples(df, cols):
def _extract_cols_into_list_of_tuples(df, cols):
# return [tuple(row) for row in df[cols].iterrows()]
ar = np.vstack([df[col] for col in cols]).T
# print("ar: \n", ar)
ar = np.atleast_2d(ar).astype(np.int)
# return [tuple(row) for row in ar]
return [sum([hash(-12435 * i + 1) ^ hash(1234567 * val)
for i, val in enumerate(row)]) for row in ar]
# return [int(hash(tuple(row))) for row in ar]
def _join_on_cols(df_left, left_cols, df_right, right_cols, verbose=0):
df_left = df_left.copy()
df_right = df_right.copy()
df_left['__index__'] = _extract_cols_into_list_of_tuples(
df_left, left_cols)
df_right['__index__'] = _extract_cols_into_list_of_tuples(
df_right, right_cols)
# dup_cols = set(left_cols) & set(right_cols)
# if verbose > 0:
# print("_join_on_cols(); dropping duplicate cols from rhs: ", dup_cols)
# df_right = df_right.drop(dup_cols, axis=1)
df = df_left.merge(
df_right, on='__index__', how='left', suffixes=('', '_rhs'))
df.drop(['__index__'], axis=1, inplace=True)
# df.sort_values(left_cols, axis=0, inplace=True)
return df
def _join_with_mithral_times(df, timing_dtype='f32'):
time_df = mithral_amm_timings()
if timing_dtype is not None:
time_df = time_df.loc[time_df['dtype'].str.strip() == timing_dtype]
df = df.loc[df['method'].str.lower().str.startswith('mithral')]
df['ncodebooks'] = df['ncodebooks'].astype(np.int)
# time_df.reset_index(inplace=True, drop=True)
# df.reset_index(inplace=True, drop=True)
# print("time_df with appropriate dtype:\n", time_df)
# import sys; sys.exit()
# we also report times for subroutines within mithral; can't let it
# use any of these; just use rename_values_in_col to drop them and also
# get more intuitive debug output
# rename_dict = {'amm mithral sparselut': 'Mithral, L = ??',
# 'amm mithral nolut': 'Mithral, L = ∞'}
name_mithral_dense = 'mithralDense' # only one we use; others arbitrary
rename_dict = {'amm mithral sparselut': 'mithralSparse',
'amm mithral denselut': name_mithral_dense,
'amm mithral nolut': 'mithralOffline'}
time_df = rename_values_in_col(time_df, 'algo', rename_dict)
# give MithralPQ a valid lut const so the join will work (pq is equivalent
# to constant of 1)
is_mithral_pq = df['method'].str.lower().str.startswith('mithralpq')
df.loc[is_mithral_pq, 'lut_work_const'] = 1
df_mpq = df.loc[is_mithral_pq].copy()
# there shouldn't be rows that violated this, but there are (probably
# from early runs that haven't been overwritten yet)
df = df.loc[df['lut_work_const'].values <= df['ncodebooks'].values]
# now add in extra rows for mithral with no lut computation (which is
# assumed to use dense luts because no reason not to) vs mithral
# with dense lut computation as part of the timing
is_any_mithral = df['method'].str.lower().str.startswith('mithral')
is_mithral = is_any_mithral & (~is_mithral_pq)
is_dense = df['lut_work_const'] == -1
df_mithral_dense = df.loc[is_mithral & is_dense].copy()
dummy_lutconst = -2
df_mithral_dense['lut_work_const'] = dummy_lutconst
time_df['lutconst'].loc[
time_df['algo'] == name_mithral_dense] = dummy_lutconst
# add in version of mithralpq with offline lut computation
df_mpq = df.loc[df['method'].str.lower().str.startswith('mithralpq')]
df_mpq['lut_work_const'] = -1
df = pd.concat([df, df_mithral_dense, df_mpq], axis=0)
cols_df = 'N D M ncodebooks lut_work_const'.split()
cols_time_df = 'N D M C lutconst'.split()
# print("df cols: ", df.columns)
# time_df.reset_index(inplace=True, drop=True)
# df.reset_index(inplace=True, drop=True)
df.sort_values(['method'] + cols_df, axis=0, inplace=True)
time_df.sort_values(cols_time_df, axis=0, inplace=True)
ret = _join_on_cols(df, cols_df, time_df, cols_time_df)
# ret['lut_work_const'].loc[ret['lut_work_const'] == dummy_lutconst] = -1
# show_keys = 'method N D M C ncodebooks lutconst lut_work_const'.split()
# print("mithral df:\n", df['method N D M ncodebooks lut_work_const'.split()])
# # print("mithral time df:\n", time_df.loc[time_df['dset'] == 'Cifar10'])
# print("mithral time df:\n", time_df.loc[time_df['dset'] == 'Caltech3x3'])
# print("joined df:\n", ret[show_keys])
# # print("joined df:\n", ret)
# import sys; sys.exit()
# one of these fails if the join failed; check if you have redundant
# rows in either df or missing rows in the time df
assert np.all(ret['C'] == ret['ncodebooks'])
assert np.all(ret['lutconst'] == ret['lut_work_const'])
return ret
def _join_with_bolt_times(df):
time_df = bolt_amm_timings()
df = df.loc[df['method'].str.lower().str.startswith('bolt')]
return _join_on_cols(df, 'N D M ncodebooks'.split(),
time_df, 'N D M C'.split())
def _join_with_osnap_times(df):
time_df = osnap_amm_timings()
# df = df.loc[df['method'].str.lower().str.startswith('osnap')]
df = df.loc[df['method'].isin(
[methods.METHOD_OSNAP, methods.METHOD_HASHJL])]
df['s'] = 1
df['s'].loc[df['method'] == methods.METHOD_OSNAP] = 4
# print("osnap df shape: ", df.shape)
df['d'] = df['d'].astype(np.int)
# print("time_df:\n", time_df[time_df['dset'] == 'Cifar10'])
# note that d < s isn't present in time_df, which makes sense
return _join_on_cols(df, 'N D M d s'.split(),
time_df, 'N D M d s'.split())
def _join_with_brute_force_times(df):
time_df = dense_amm_timings()
df = df.loc[df['method'].str.lower().str.startswith('exact')]
time_df = time_df.loc[time_df['algo'].str.lower().str.startswith('brute')]
# print("df:\n", df)
# print("time_df:\n", time_df)
return _join_on_cols(df, 'N D M'.split(), time_df, 'N D M'.split())
def _join_with_dense_sketch_times(df):
time_df = dense_amm_timings()
# print("found methods in df: ", df['method'].unique())
# print("dense sketch methods: ", methods.DENSE_SKETCH_METHODS)
df = df.loc[df['method'].isin(methods.DENSE_SKETCH_METHODS)]
time_df = time_df.loc[time_df['algo'].str.lower().str.startswith(
'dense sketch')]
# print("df:\n", df)
# print("time_df:\n", time_df)
return _join_on_cols(df, 'N D M d'.split(),
time_df, 'N D M d'.split())
def _join_with_scalar_quantize_times(df):
time_df = scalar_quantized_amm_timings()
df = df.loc[df['method'] == methods.METHOD_SCALAR_QUANTIZE]
# print("scalar quantize time df:\n", time_df)
# print("scalar quantize acc df:\n", df.columns)
# print(df['N D M'.split()])
# df_joint = _join_on_cols(df, 'N D M'.split(), time_df, 'N D M'.split())
# print("joined df: ")
# print(df_joint['N D M time'.split()])
# import sys; sys.exit()
return _join_on_cols(df, 'N D M'.split(), time_df, 'N D M'.split())
def extract_pareto_frontier_idxs(xvals, yvals):
"""assumes lower x is better and higher y is better"""
assert len(xvals) == len(yvals)
sort_idxs = np.argsort(xvals)
xvals = xvals[sort_idxs]
yvals = yvals[sort_idxs]
# orig_idxs = np.arange(len(xvals))
first_idx = sort_idxs[0]
curr_thresh = yvals[first_idx]
keep_idxs = [first_idx]
for i, y in enumerate(yvals[1:]):
if y > curr_thresh:
curr_thresh = y
keep_idxs.append(sort_idxs[i + 1])
return keep_idxs
def _join_with_sparse_sketch_times(df, sparse_pareto=True):
time_df = sparse_amm_timings()
df = df.loc[df['method'].str.lower().str.startswith('sparse')]
df['d'] = df['d'].astype(np.int)
new_rows = []
for _, row in df.iterrows():
# pprint.pprint(dict(row))
subdf = time_df
for key in 'N D M d'.split():
subdf = subdf.loc[subdf[key] == row[key]]
if len(subdf) < 1:
continue
sparsities = subdf['frac']
# print("subdf for N, D, M, D: ", [row[k] for k in 'N D M d'.split()])
# print(subdf)
# N, D, M, d = [row[k] for k in 'N D M d'.split()]
target_frac = row['sparsity']
small_enough_sparsities_idxs = np.where(sparsities.values <= target_frac)[0]
if len(small_enough_sparsities_idxs):
take_idx = small_enough_sparsities_idxs[-1]
else: # no nonzeros, or at least uselessly few of them
take_idx = np.argmin(sparsities.values)
time_keys = 't0 t1 t2 t3 t4'.split()
times_row = subdf.iloc[take_idx]
# times = subdf.loc[take_idx, time_keys]
row = dict(row)
for key in time_keys:
row[key] = float(times_row[key])
row['time'] = sum([float(times_row[key])
for key in time_keys]) / len(time_keys)
new_rows.append(row)
# return pd.DataFrame.from_records(new_rows)
df = pd.DataFrame.from_records(new_rows)
if not sparse_pareto:
return df
# # for dset in df['']
# subdf = df.loc[df['method'] == 'SparsePCA']
# here we have a bunch of hack stuff
# print("df columns: ", df.columns)
# yvals = 1. - df['normalized_mse'].values
subdfs = []
for tid in df['task_id'].unique():
subdf = df.loc[df['task_id'] == tid]
xvals = subdf['time'].values
if 'acc_amm' in df.columns:
yvals = subdf['acc_amm'].values
else:
yvals = 1. - subdf['normalized_mse'].values
idxs = extract_pareto_frontier_idxs(xvals, yvals)
subdfs.append(subdf.iloc[idxs])
df = pd.concat(subdfs, axis=0)
return df
def _clean_method_names_amm(df):
key = 'method' if 'method' in df else 'algo'
if 'lutconst' in df:
df.loc[df['lutconst'] == -2, key] = 'MADDNESS Dense'
is_lutconst_neg1 = df['lutconst'] == -1
is_mithral_pq = df['method'] == 'MithralPQ'
df.loc[is_lutconst_neg1 & is_mithral_pq, key] = 'MADDNESS-PQ'
df.loc[is_lutconst_neg1 & ~is_mithral_pq, key] = 'MADDNESS'
df.loc[df['lutconst'] == 1, key] = 'MADDNESS, L = 1'
df.loc[df['lutconst'] == 2, key] = 'MADDNESS, L = 2'
df.loc[df['lutconst'] == 4, key] = 'MADDNESS, L = 4'
# df.loc[df['lutconst'] == -2, key] = 'Mithral Dense'
# is_lutconst_neg1 = df['lutconst'] == -1
# is_mithral_pq = df['method'] == 'MithralPQ'
# df.loc[is_lutconst_neg1 & is_mithral_pq, key] = 'MithralPQ'
# df.loc[is_lutconst_neg1 & ~is_mithral_pq, key] = 'Mithral'
# df.loc[df['lutconst'] == 1, key] = 'Mithral, L = 1'
# df.loc[df['lutconst'] == 2, key] = 'Mithral, L = 2'
# df.loc[df['lutconst'] == 4, key] = 'Mithral, L = 4'
# mask = df['lutconst'] == 1
# is_mithral_pq = df[key].str.lower().str.startswith('mithralpq')
# mask &= ~is_mithral_pq
# df[key][mask] = 'Mithral, L = ∞'
# df[key].loc[df[key] == 'Exact'] = 'Brute Force'
df[key].loc[df[key] == 'Exact'] = 'Exact'
return df
def _clean_metrics_amm(df):
df = df.rename({'acc_amm': 'Accuracy'}, axis=1)
# if 'time' not in df.columns:
mask = df['time'].isna()
# df.loc['time', mask] = ((df['t0'] + df['t1'] + df['t2'] + df['t3'] + df['t4']).values / 5.)[mask]
times = (df['t0'] + df['t1'] + df['t2'] + df['t3'] + df['t4']) / 5.
df.loc[mask, 'time'] = times.values[mask]
df['Throughput'] = 1e3 * df['N'] * df['M'] / df['time']
# create ops column that sums number of multiplies + lookups
df['muls'] = df['muls'].fillna(0)
mask = ~df['nlookups'].isna()
df['ops'] = df['muls']
# print("debugging df[ops]: ")
# # print(type(df['ops']))
# # print(type(df['ops']))
# print(type(df['nlookups']))
df['ops'].loc[mask] += df['nlookups'].loc[mask]
# df['nor']
# df_exact = df.loc[df['method'] == 'Brute Force']
df_exact = df.loc[df['method'] == 'Exact']
# print("df_exact\n", df_exact)
if 'task_id' in df.columns:
nuniq_tasks = len(df['task_id'].unique())
else:
nuniq_tasks = 1 # cifar{10,100}
assert df_exact.shape[0] == nuniq_tasks
base_time = float(df_exact.loc[0, 'time'])
df['NormalizedTime'] = df['time'] / base_time
df['Speedup'] = 1. / df['NormalizedTime']
df['1 - NMSE'] = 1. - df['normalized_mse']
if 'Accuracy' in df.columns:
df['Relative Accuracy'] = df['Accuracy'] / (df['acc_orig'] + 1e-20)
# print("df.columns", df.columns)
# df['Change in Accuracy'] = df['Accuracy'] - df['acc-1nn-raw']
# if 'acc-1nn-raw' in df.columns:
# # # note that relative accuracy can actually be higher if errors
# # # happen to compensate for incorrect classification sometimes
# # print("max relative acc: ", df['Relative Accuracy'].values.max())
# # # assert df['Relative Accuracy'].values.max() <= 1.000001
# # acc_orig field is supposed to capture this, but I messed it up for
# # 1nn so this will also work
# tid2acc = {}
# exactdf = df.loc[df['method'] == 'Exact']
# for tid in df['task_id'].unique():
# subdf = exactdf.loc[exactdf['task_id'] == tid]
# tid2acc[tid] = subdf['Accuracy'].values[0]
# df['BaseAccuracy'] = [tid2acc[tid] for tid in df['task_id']]
# df['Relative Accuracy'] = df['Accuracy'] / df['BaseAccuracy']
return df
def _join_with_times(df, timing_dtype='f32', sparse_pareto=True):
df_quant = _join_with_scalar_quantize_times(df)
# print("scalar quantize time df:\n", time_df)
# print("scalar quantize acc df:\n", df)
# print("df scalar quant:\n", df_quant['dset N D M time'.split()])
# import sys; sys.exit()
df_bolt = _join_with_bolt_times(df)
# # print("df bolt:\n", df_bolt)
# df_tmp = df_bolt['N D M C ncodebooks method normalized_mse t0 t1 t2 t3 t4 task_id'.split()]
# df_tmp = df_tmp.loc[df_tmp['task_id'].isin(['ucr Yoga k=128', 'ucr Wafer k=128'])]
# df_tmp['time'] = (df_tmp['t0'] + df_tmp['t1'] + df_tmp['t2'] + df_tmp['t3'] + df_tmp['t4']) / 5.
# print("df tmp:\n", df_tmp)
# print("df tmp times:\n", df_tmp[['C', 'time', 'task_id']])
# # tids = df_tmp['task_id'].unique()
# # # yep, exactly 6 results per dset
# # counts = df_tmp.groupby('task_id')['task_id'].count()
# # print("task counts: ", counts)
# import sys; sys.exit()
# print("df bolt:\n", df_bolt) # looks good
# import sys; sys.exit()
# assert np.all(df_mithral['lutconst'] == df_mithral['lut_work_const'])
# df_mithral = df.loc[df['method'].str.startswith('Mithral')]
# df_mithral.to_csv('mithral-caltech-debug.csv')
df_mithral = _join_with_mithral_times(df, timing_dtype=timing_dtype)
# df_tmp = df_mithral
# df_tmp = df_tmp['N D M C ncodebooks lutconst lut_work_const method algo normalized_mse t0 t1'.split()]
# # print("mithral rows:\n", df.loc[df['method'].str.startswith('mithral')])
# print("mithralpq rows after join:\n", df_tmp.loc[df_tmp['method'] == 'MithralPQ'])
# print("mithral rows after join:\n", df_tmp[:100])
# mismatch_mask = df_tmp['lutconst'] != df_tmp['lut_work_const']
# print("mithral mismatched rows:\n", df_tmp.loc[mismatch_mask])
# print(df_mithral['lutconst', 'lut_work_const'])
# import sys; sys.exit()
# if this line fails, it's usually because the join with mithral times
# failed
assert np.all(df_mithral['lutconst'] == df_mithral['lut_work_const'])
df_osnap = _join_with_osnap_times(df)
df_brute = _join_with_brute_force_times(df)
df_sketch = _join_with_dense_sketch_times(df)
df_sparse = _join_with_sparse_sketch_times(df, sparse_pareto=sparse_pareto)
# dfs = [df_mithral, df_bolt, df_osnap, df_brute, df_sketch, df_sparse]
dfs = [df_quant, df_mithral, df_bolt, df_osnap, df_brute,
df_sketch, df_sparse]
return pd.concat(dfs, axis=0, join='outer', sort=False)
def _clean_amm_results_df(df, timing_dtype='f32', sparse_pareto=True):
# print("initial methods: ", df['method'].unique())
df = _join_with_times(
df, timing_dtype=timing_dtype, sparse_pareto=sparse_pareto)
# df['time'] = df['t_avg']
# df = melt_times(df)
# print("uniq methods after joining with times:\n", sorted(df['method'].unique()))
# import sys; sys.exit()
df = _clean_metrics_amm(df)
df = df.loc[~df['time'].isna()]
# print("uniq methods after rming nan times:\n", sorted(df['method'].unique()))
# import sys; sys.exit()
df = _clean_method_names_amm(df)
return df
@_memory.cache
def _read_amm_csv(fname, **kwargs):
df = pd.read_csv(os.path.join(RESULTS_DIR, fname), **kwargs)
drop_cols_inplace(df, AMM_DROP_COLS)
return df
def cifar10_amm():
# df = pd.read_csv(os.path.join(RESULTS_DIR, 'cifar10.csv'))
# drop_cols_inplace(df, AMM_DROP_COLS)
df = _read_amm_csv('cifar10.csv')
# print("initial uniq methods:\n", sorted(df['method'].unique()))
return _clean_amm_results_df(df)
def cifar100_amm():
# df = pd.read_csv(os.path.join(RESULTS_DIR, 'cifar100.csv'))
# drop_cols_inplace(df, AMM_DROP_COLS)
df = _read_amm_csv('cifar100.csv')
return _clean_amm_results_df(df)
@_memory.cache
def caltech_amm(filt='sobel'):
"""filt must be one of {'sobel','dog5x5'}"""
df = _read_amm_csv('caltech_{}.csv'.format(filt))
return _clean_amm_results_df(df, timing_dtype='i8')
@_memory.cache
def ucr_amm(k=128, problem='rbf'):
"""k must be one of {64, 128, 256}"""
df = _read_amm_csv('ucr_k={}_problem={}.csv'.format(k, problem))
df['origN'] = df['N'].values
df['N'] = 1000 # timing is for a test set size of 1000
if problem == 'softmax':
df['M'] = k # to get meaningful timing vs acc comparison
return _clean_amm_results_df(df, sparse_pareto=False)
def main():
# print(encode_timings())
# print(lut_timings())
# print(scan_timings())
# print(bolt_amm_timings())
# print(mithral_amm_timings())
# print(dense_amm_timings())
# print(osnap_amm_timings())
# print(sparse_amm_timings())
# print(cifar10_amm())
# print(cifar100_amm())
# print(caltech_amm(filt='sobel'))
# print(caltech_amm(filt='dog5x5'))
# print(ucr_amm(k=64))
# df = ucr_amm(k=128, problem='rbf')
# df_bolt = df.loc[df['method'] == 'Bolt']
# print("number of uniq bolt speedups:")
# print(df_bolt['Speedup'].unique().size)
# import sys; sys.exit()
# df = df.loc[df['method'] == 'Bolt']
# print("bolt dset counts")
# print(df.groupby('task_id')['task_id'].count())
# # print("bolt speedup per dset counts")
# # print(df.groupby('task_id')['Speedup'].unique().count())
# print("number of uniq speedups:")
# print(df['Speedup'].unique().size)
# df = cifar10_amm()
# df = cifar100_amm()
df = caltech_amm(filt='dog5x5')
# df = caltech_amm(filt='sobel')
print(sorted(df['method'].unique()))
# # df = df.loc[df['method'].isin(['Brute Force', 'Mithral', 'SparsePCA'])]
# df = df.loc[df['method'].isin(['Exact', 'ScalarQuantize', 'MADDNESS', 'SparsePCA'])]
# df = df.sort_values(['method', 'Speedup'], axis=0)
# print(df['method Speedup Accuracy'.split()])
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import abc
import numpy as np
from . import vquantizers as vq
from . import amm
KEY_NLOOKUPS = 'nlookups'
class VQMatmul(amm.ApproxMatmul, abc.ABC):
def __init__(self, ncodebooks, ncentroids=None):
self.ncodebooks = ncodebooks
self.ncentroids = (self._get_ncentroids() if ncentroids is None
else ncentroids)
self.enc = self._create_encoder(ncodebooks)
self.reset_for_new_task()
@abc.abstractmethod
def _create_encoder(self, ncodebooks): # to be overriden by subclasses
return vq.PQEncoder(ncodebooks=ncodebooks, ncentroids=self.ncentroids,
**self._get_encoder_kwargs())
# @abc.abstractmethod
def _get_ncentroids(self):
pass
@abc.abstractmethod
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
pass
def _get_encoder_kwargs(self): # to be overriden by subclasses
return {}
def reset_for_new_task(self):
self.A_enc = None
self.luts = None
def fit(self, A, B, Y=None):
_, D = A.shape
if D < self.ncodebooks:
raise amm.InvalidParametersException(
'D < C: {} < {}'.format(D, self.ncodebooks))
self.enc.fit(A, B.T)
def set_A(self, A):
self.A_enc = self.enc.encode_X(A)
def set_B(self, B):
self.luts = self.enc.encode_Q(B.T)
def __call__(self, A, B):
if self.A_enc is None:
self.set_A(A)
if self.luts is None:
self.set_B(B)
return self.enc.dists_enc(self.A_enc, self.luts)
def get_params(self):
return {'ncodebooks': self.ncodebooks}
# ================================================================ PQ
class PQMatmul(VQMatmul):
def _create_encoder(self, ncodebooks): # to be overriden by subclasses
return vq.PQEncoder(ncodebooks=ncodebooks, ncentroids=self.ncentroids,
**self._get_encoder_kwargs())
def _get_ncentroids(self):
return 256
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
# data encoding and LUT costs
nmuls = 0
nmuls += 0 if fixedA else A.shape[0] * A.shape[1] * self.ncentroids
nmuls += 0 if fixedB else B.shape[0] * B.shape[1] * self.ncentroids
nlookups = A.shape[0] * B.shape[1] * self.ncodebooks
return {amm.KEY_NMULTIPLIES: nmuls, KEY_NLOOKUPS: nlookups}
# ================================================================ OPQ
class OPQMatmul(PQMatmul):
def _get_encoder_kwargs(self):
return dict(preproc='OPQ')
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
metrics = super().get_speed_metrics(A, B, fixedA=fixedA, fixedB=fixedB)
rot_nmuls = A.shape[0] * A.shape[1] * A.shape[1] # OPQ rotation cost
metrics[amm.KEY_NMULTIPLIES] += rot_nmuls
return metrics
# ================================================================ Bolt
class BoltMatmul(PQMatmul):
# def __init__(self, ncodebooks):
# self.ncodebooks = ncodebooks
# self.ncentroids = 16
# self.enc = self._create_encoder(self.ncodebooks)
# self._reset()
def _get_ncentroids(self):
return 16
def _create_encoder(self, ncodebooks):
return vq.PQEncoder(ncodebooks=ncodebooks, ncentroids=self.ncentroids,
quantize_lut=True,
# quantize_lut=False,
# accumulate_how='mean',
accumulate_how='sum',
upcast_every=-1,
# upcast_every=2,
# upcast_every=4,
# upcast_every=256, # fine as long as using mean
# TODO set quantize_lut=True after debug
**self._get_encoder_kwargs())
class GEHTBoltMatmul_CovTopk(BoltMatmul):
def _get_encoder_kwargs(self):
return dict(
preproc='GEHT', sample_how='deterministic', stats_mat='cov')
class GEHTBoltMatmul_CovSamp(BoltMatmul):
def _get_encoder_kwargs(self):
return dict(
preproc='GEHT', sample_how='importance', stats_mat='cov')
class GEHTBoltMatmul_CorrTopk(BoltMatmul):
def _get_encoder_kwargs(self):
return dict(
preproc='GEHT', sample_how='deterministic', stats_mat='corr')
class GEHTBoltMatmul_CorrSamp(BoltMatmul):
def _get_encoder_kwargs(self):
return dict(
preproc='GEHT', sample_how='importance', stats_mat='corr')
class BoltSplits(BoltMatmul):
def _get_encoder_kwargs(self):
return dict(
preproc='PQ', encode_algo='splits')
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
metrics = super().get_speed_metrics(A, B, fixedA=fixedA, fixedB=fixedB)
nmuls = 0
nmuls += 0 if fixedB else B.shape[0] * B.shape[1] * self.ncentroids
metrics[amm.KEY_NMULTIPLIES] = nmuls
return metrics
class BoltMultiSplits(BoltMatmul):
def _get_encoder_kwargs(self):
return dict(encode_algo='multisplits')
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
metrics = super().get_speed_metrics(A, B, fixedA=fixedA, fixedB=fixedB)
nmuls = 0
nmuls += 0 if fixedB else B.shape[0] * B.shape[1] * self.ncentroids
metrics[amm.KEY_NMULTIPLIES] = nmuls
return metrics
class BoltPermMultiSplits(BoltMatmul):
def _get_encoder_kwargs(self):
return dict(preproc='GEHT', encode_algo='multisplits')
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
metrics = super().get_speed_metrics(A, B, fixedA=fixedA, fixedB=fixedB)
nmuls = 0
nmuls += 0 if fixedB else B.shape[0] * B.shape[1] * self.ncentroids
metrics[amm.KEY_NMULTIPLIES] = nmuls
return metrics
class PQPerm(PQMatmul):
def _get_encoder_kwargs(self):
return dict(preproc='GEHT')
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
metrics = super().get_speed_metrics(A, B, fixedA=fixedA, fixedB=fixedB)
nmuls = 0
nmuls += 0 if fixedB else B.shape[0] * B.shape[1] * self.ncentroids
metrics[amm.KEY_NMULTIPLIES] = nmuls
return metrics
class PQMultiSplits(PQMatmul):
def __init__(self, ncodebooks, ncentroids=256):
super().__init__(ncodebooks=ncodebooks, ncentroids=ncentroids)
def _get_encoder_kwargs(self):
return dict(encode_algo='multisplits')
def get_params(self):
return {'ncodebooks': self.ncodebooks, 'ncentroids': self.ncentroids}
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
metrics = super().get_speed_metrics(A, B, fixedA=fixedA, fixedB=fixedB)
nmuls = 0
nmuls += 0 if fixedB else B.shape[0] * B.shape[1] * self.ncentroids
metrics[amm.KEY_NMULTIPLIES] = nmuls
return metrics
class PQPermMultiSplits(PQMatmul):
def __init__(self, ncodebooks, ncentroids=256):
super().__init__(ncodebooks=ncodebooks, ncentroids=ncentroids)
def _get_encoder_kwargs(self):
return dict(preproc='GEHT', encode_algo='multisplits')
def get_params(self):
return {'ncodebooks': self.ncodebooks, 'ncentroids': self.ncentroids}
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
metrics = super().get_speed_metrics(A, B, fixedA=fixedA, fixedB=fixedB)
nmuls = 0
nmuls += 0 if fixedB else B.shape[0] * B.shape[1] * self.ncentroids
metrics[amm.KEY_NMULTIPLIES] = nmuls
return metrics
# ================================================================ Mithral
class OldMithralPQ(PQMatmul):
# def _get_ncentroids(self):
# return 16
def __init__(self, ncodebooks):
super().__init__(ncodebooks=ncodebooks, ncentroids=16)
def _create_encoder(self, ncodebooks):
return vq.PQEncoder(ncodebooks=ncodebooks, ncentroids=self.ncentroids,
encode_algo='multisplits',
quantize_lut=True,
upcast_every=16, # fine as long as using mean
accumulate_how='mean')
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
N, D = A.shape
D, M = B.shape
# data encoding and LUT costs
nmuls = 0
nmuls += 0 if fixedA else N * D # offset + scale before quantize
nmuls += 0 if fixedB else M * self.ncentroids * D
# lookups given encoded data + luts
nlookups = N * M * self.ncodebooks
return {amm.KEY_NMULTIPLIES: nmuls, KEY_NLOOKUPS: nlookups}
class MithralMatmul(VQMatmul):
def __init__(self, ncodebooks, lut_work_const=-1):
self.lut_work_const = lut_work_const
if (lut_work_const is not None) and (lut_work_const > 0) and (
lut_work_const > ncodebooks):
raise amm.InvalidParametersException(
"lut_work_const > ncodebooks: {} > {}".format(
lut_work_const, ncodebooks))
super().__init__(ncodebooks=ncodebooks, ncentroids=16)
# def _get_ncentroids(self):
# return 16
# def fit(self, A, B, Y=None):
# super().fit(self, A, B, Y=Y)
def _create_encoder(self, ncodebooks):
return vq.MithralEncoder(
ncodebooks=ncodebooks, lut_work_const=self.lut_work_const)
def get_params(self):
return {'ncodebooks': self.ncodebooks,
'lut_work_const': self.lut_work_const}
def get_speed_metrics(self, A, B, fixedA=False, fixedB=False):
N, D = A.shape
D, M = B.shape
# data encoding and LUT costs
nmuls = 0
nmuls += 0 if fixedA else N * D # offset + scale before quantize
nmuls_per_codebook_per_output = self.ncentroids * D
nmuls_per_output = nmuls_per_codebook_per_output * self.ncodebooks
nmuls += 0 if fixedB else nmuls_per_output * M
# lookups given encoded data + luts
nlookups = N * M * self.ncodebooks
return {amm.KEY_NMULTIPLIES: nmuls, KEY_NLOOKUPS: nlookups}
def set_B(self, B):
self.luts, self.offset, self.scale = self.enc.encode_Q(B.T)
def __call__(self, A, B):
if self.A_enc is None:
self.set_A(A)
if self.luts is None:
self.set_B(B)
return self.enc.dists_enc(self.A_enc, self.luts,
offset=self.offset, scale=self.scale)
class MithralPQ(MithralMatmul):
def __init__(self, ncodebooks):
super().__init__(ncodebooks=ncodebooks, lut_work_const=1)
|
#!/bin/env/python
"""utility functions for running experiments"""
from __future__ import print_function, absolute_import
import datetime
import os
import itertools
import warnings
import numpy as np
import pandas as pd
import sys
import sklearn
# from sklearn.model_selection import StratifiedKFold
from python.files import ensure_dir_exists
try:
from joblib import Memory
memory = Memory('.', verbose=0)
cache = memory.cache
except Exception:
def cache(f):
return f
# ================================================================ Constants
KEY_FINISHED_UPDATING = '__pyn_finished_updating__'
KEY_NEW_KEYS = '__pyn_newkeys__'
# ================================================================ Types
class UsageError(Exception):
pass
class Options(object):
"""Wrapper for a collection to signify that each element is one possible
parameter value"""
def __init__(self, *args):
if args is None or len(args) < 1:
raise ValueError("No options given!")
if len(args) == 1 and hasattr(args, '__len__'):
self.values = args[0] # given a list
else:
self.values = args # given individual objects
def __len__(self):
return len(self.values)
# deliberately don't act like a collection so that we fail fast if
# code doesn't know that this is supposed to represent Options, rather
# than a collection of values. This is mostly to ensure that Options
# are always expanded out when generating sets of parameters.
def __getitem__(self, idx):
self._raise()
def __setitem__(self, idx, item):
self._raise()
def _raise(self):
raise TypeError("Options object is not a collection; use options.values"
" to access the collection of individual options")
# ================================================================ Funcs
# ------------------------------------------------ misc utils
def make_immutable(x):
"""
>>> make_immutable(5) == 5
True
>>> make_immutable('a') == 'a'
True
>>> make_immutable((1, 2)) == (1, 2)
True
>>> make_immutable([1, 2]) == [1, 2]
False
>>> make_immutable([1, 2]) == (1, 2)
True
"""
# must either be not a collections or immutable
try:
{}[x] = 0 # dicts require immutability
return x
except TypeError:
# so it's mutable; either a collection or a
# mutable class; if a class, we're hosed, so
# assume it's a collection
try:
# if it's a singleton collection, try returning
# first element; this will jump to except
# unless x is a collection
_ = len(x)
# apparently a collection, so make it a tuple
return tuple(x)
except TypeError:
return repr(x) # not a collection; stringify as last resort
def as_key(x):
return make_immutable(x)
# ------------------------------------------------ IO / saving results
def now_as_string():
return datetime.datetime.now().strftime("%Y-%m-%dT%H_%M_%S")
def save_data_frame(df, save_dir='results', name="", timestamp='copy',
cols_in_filename=None, col_kv_fmt="_{}={}",
store_index=False, append=True, dedup_cols=None,
add_timestamp_col=True, sort_by=None, **sink):
if timestamp == 'copy': # save one copy with and without timestamp
kwargs = dict(name=name, col_kv_fmt=col_kv_fmt,
cols_in_filename=cols_in_filename, dedup_cols=dedup_cols,
store_index=store_index, append=append, sort_by=sort_by,
add_timestamp_col=add_timestamp_col)
backups_dir = os.path.join(save_dir, 'pyience-backups')
save_data_frame(df, timestamp=True, save_dir=backups_dir, **kwargs)
save_data_frame(df, timestamp=False, save_dir=save_dir, **kwargs)
return
# construct filename
name = name if name else ""
if cols_in_filename:
cols = list(df.columns.values)
# substrs = ["{%s}" % col for col in cols]
# name = name_fmt
# for ss in substrs:
# key = ss[1:-1]
for key in cols_in_filename:
if key not in cols:
warnings.warn("Column '{}' not found in Dataframe."
"Excluding it from filename".format(key))
continue
# get value associated with this key; ignored if col not constant
vals = df[key]
nuniq = len(vals.unique())
if nuniq != 1:
warnings.warn("Column '{}' has more than one value in Dataframe."
"Excluding it from filename".format(key))
continue
val = vals[0]
fmt = col_kv_fmt
if name == "" and not col_kv_fmt.startswith("{"):
fmt = col_kv_fmt[1:]
name += fmt.format(key, val)
ensure_dir_exists(save_dir)
raw_timestamp_str = now_as_string()
timestamp_str = ("_" + raw_timestamp_str) if timestamp else ""
fileName = "{}{}.csv".format(name, timestamp_str).strip("_")
save_path = os.path.join(save_dir, fileName)
if add_timestamp_col:
df['__pyience_timestamp__'] = [raw_timestamp_str] * df.shape[0]
if append and os.path.exists(save_path):
existing_df = pd.read_csv(save_path)
# print("existing_df_cols", existing_df.columns)
# print("existing_df_cols", df.columns)
# print("dedup_cols", dedup_cols)
df = pd.concat([existing_df, df], axis=0, sort=False, ignore_index=True)
# print("df secs: ")
# print(df['secs'])
dedup_cols = set(dedup_cols) & set(list(df.columns))
df.drop_duplicates(subset=dedup_cols, keep='last', inplace=True)
df = df.sort_index(axis=1)
if sort_by is not None:
df.sort_values(sort_by, inplace=True)
# also move these cols to the front for legibility, since they're
# probably something you care about
other_cols = [col for col in df.columns.values if col not in sort_by]
df = df[sort_by + other_cols]
df.to_csv(save_path, index=store_index)
def save_dicts_as_data_frame(d, **kwargs):
if not isinstance(d, dict):
try:
df = pd.DataFrame.from_records(d)
except Exception:
dfs = [pd.DataFrame.from_records(dd, index=[0]) for dd in d]
df = pd.concat(dfs, axis=0, ignore_index=True)
else:
df = pd.DataFrame.from_records(d, index=[0])
save_data_frame(df, **kwargs)
def generate_save_path(params, savedir, subdir_keys=None):
subdir = ''
# create nested subdirectories with names specified by
# the values for the keys in subdir_keys
if subdir_keys is not None:
subdir_keys = list(subdir_keys)
subdir_names = ["{}__{}".format(str(key), str(params[key]))
for key in subdir_keys]
subdir = os.path.join(*subdir_names)
savedir = os.path.join(savedir, subdir)
return savedir
# ------------------------------------------------ parameter generation
def expand_params(params):
"""dict of kv pairs -> list of dicts with one option selected for
each key whose value is an instance of Options."""
# keys with values that are Options; try all combos of these
options_keys = [key for key in params if isinstance(params[key], Options)]
options_keys = sorted(options_keys) # sort for reproducibility
options_vals = [params[key].values for key in options_keys]
# keys with values that aren't Options; these are the same every time
no_options_keys = [key for key in params if not isinstance(params[key], Options)]
no_options_vals = [params[key] for key in no_options_keys]
no_options_params = dict(zip(no_options_keys, no_options_vals))
# make a list of all possible combos of values for each key with Options
expanded_params_list = []
for v in itertools.product(*options_vals):
expanded_params = dict(zip(options_keys, v)) # pick one option for each
expanded_params.update(no_options_params) # add in fixed params
expanded_params_list.append(expanded_params)
return expanded_params_list
def update_func_from_dict(d):
def f(params, new_keys, d=d):
updated = False
for k, v in d.items():
if k in new_keys:
for kk, vv in v.items():
updated = updated or (kk not in params)
params.setdefault(kk, vv)
return updated
return f
def generate_params_combinations(params_list, update_func={}):
"""Uses update_func to update each dict based on its values (e.g., to
add SVM kernel params if it contains "classifier": "SVM")"""
if not isinstance(params_list, (list, set, frozenset, tuple)):
params_list = [params_list]
for params in params_list:
params[KEY_NEW_KEYS] = set(params.keys())
if isinstance(update_func, dict):
update_func = update_func_from_dict(update_func)
while True:
new_list = []
for params in params_list:
expanded = expand_params(params)
new_list += expanded
if not update_func:
params_list = new_list
break
allFinished = True
for params in new_list:
# if these params aren't fully updated, update them; keep
# track of which keys are added along the way so we can
# pass this set to the update function next time
if not params.get(KEY_FINISHED_UPDATING, False):
# read which keys were added last time and which keys
# are currently present
new_keys = params[KEY_NEW_KEYS]
existing_keys = frozenset(params.keys())
params.pop(KEY_NEW_KEYS)
unfinished = update_func(params, new_keys)
# compute and store which keys were added this time
new_keys = frozenset(params.keys()) - existing_keys
params[KEY_NEW_KEYS] = new_keys
if unfinished:
allFinished = False
params[KEY_FINISHED_UPDATING] = not unfinished
params_list = new_list
if allFinished:
break
for p in params_list:
p.pop(KEY_FINISHED_UPDATING)
p.pop(KEY_NEW_KEYS)
return params_list
# ------------------------------------------------ cross validation
def stratified_split_train_test(X, Y, train_frac=.8, random_state=123):
"""Returns X_train, X_test, y_train, y_test"""
return sklearn.model_selection.train_test_split(
X, Y, train_size=train_frac, stratify=Y, random_state=random_state)
def split_train_test(X, Y, train_frac=.8, random_state=123):
"""Returns X_train, X_test, y_train, y_test"""
np.random.seed(123)
return sklearn.model_selection.train_test_split(
X, Y, train_size=train_frac, random_state=random_state)
# n_folds = int(train_frac / (2. - train_frac))
# split = StratifiedKFold(Y, n_folds=n_folds, random_state=12345)
# train_index, test_index = next(iter(split))
# X, Xtest = X[train_index], X[test_index]
# Y, Ytest = Y[train_index], Y[test_index]
# return X, Xtest, Y, Ytest
# ------------------------------------------------ Command line
def _split_kv_arg(arg):
key, val = arg.split('=')
return key.strip('-'), val
def _is_kv_arg(arg):
return len(arg.split('=')) == 2
def _clean_flag_arg(arg):
return arg.strip('-')
def _is_flag_arg(arg):
return arg[0] == '-'
def _parse_func_call_cmd(s):
"""
>>> _parse_func_call_cmd("range(5)")
array([0, 1, 2, 3, 4])
>>> _parse_func_call_cmd("range(2, -3, -2)")
array([ 2, 0, -2])
>>> _parse_func_call_cmd("linspace( -2,-20, 3)")
array([ -2., -11., -20.])
>>> _parse_func_call_cmd("logspace(-1, 3, 3)")
array([1.e-01, 1.e+01, 1.e+03])
"""
fnames = 'randn randint range linspace logspace'.split()
nargs = [(1,), (1, 2, 3), (1, 2, 3), (2, 3), (2, 3)]
funcs = [np.random.randn, np.random.randint, np.arange,
np.linspace, np.logspace]
if not isinstance(s, str):
return None
for fname, argc, func in zip(fnames, nargs, funcs):
if not s.startswith(fname + '('):
continue
if not s.endswith(')'):
raise ValueError("You tried to call function '{}', but forgot the"
" closing parenthesis".format(fname))
in_parens = s[len(fname) + 1:-1]
maybe_args = in_parens.split(',')
if len(maybe_args) not in argc:
raise ValueError(
"You tried to call function '{}', but passed an invalid number"
" of arguments: {}. Needed to be one of: {}" .format(
fname, len(maybe_args), argc))
try:
nums = [int(arg) for arg in maybe_args]
return func(*nums)
except: # noqa
raise ValueError("Command '{}' has arguments that can't be coerced"
" into integers".format(s))
return None
def _to_appropriate_type(s):
"""convert string `s` to an int, bool, float, or integer range as
appropriate. Returns the original string if it does not appear to be
any of these types."""
if s == 'True' or s == 'T':
return True
elif s == 'False' or s == 'F':
return False
try:
return int(s)
except: # noqa
pass
try:
return float(s)
except: # noqa
pass
if len(s.split('..')) in (2, 3): # range
vals_as_strs = s.split('..')
try:
return np.arange(*[int(val) for val in vals_as_strs])
except: # noqa
pass
as_func_result = _parse_func_call_cmd(s)
if as_func_result is not None:
return as_func_result
return s
def parse_cmd_line(argv=None, positional_keys=None, allow_flags=True,
infer_types=True):
"""Parses the list of command line arguments into a dictionary of
key-value pairs
Parameters
----------
argv : iterable of strings
This should be sys.argv if supplied. Otherwise, sys.argv is read.
positional_keys : iterable of strings, optional
If k strings are specified, the up to the first k arguments will
be treated as values to be paired with these keys. Arguments of the
form foo=bar will never be treated this way.
allow_flags : bool, optional
If True, allows arguments of the form --myArg. When passed, this will
add {'myArg': True} to the returned dictionary. This is equivalent to
myArg=True
infer_types : bool, optional
If True, attempts to infer the type of each value in the returned
dictionary. E.g., instead of returning {'height': '72'}, it will
return {'height': 72}.
Returns
-------
argKV : dict: string -> inferred type or string
A dictionary whose keys and values are specified by the command line
arguments
>>> # ------------------------ positional args only
>>> argv = ['pyience.py', 'fooVal', 'barVal']
>>> d = parse_cmd_line(argv, positional_keys=['fooKey', 'barKey'])
>>> len(d)
2
>>> d['fooKey']
'fooVal'
>>> d['barKey']
'barVal'
>>> # ------------------------ key-value args
>>> argv = ['pyience.py', 'fooVal', 'bletchKey=bletchVal', 'blahKey=blahVal']
>>> d = parse_cmd_line(argv, positional_keys=['fooKey', 'barKey'])
>>> len(d)
3
>>> d['fooKey']
'fooVal'
>>> d.get('barKey', 'notHere')
'notHere'
>>> d['bletchKey']
'bletchVal'
>>> d['blahKey']
'blahVal'
>>> # ------------------------ flags
>>> argv = ['pyience.py', 'fooVal', 'bletchKey=bletchVal', '--myFlag']
>>> d = parse_cmd_line(argv, positional_keys=['fooKey', 'barKey'])
>>> d['myFlag']
True
>>> # ------------------------ type inference
>>> argv = ['pyience.py', '--myFlag', 'foo=1.1', 'bar=7', 'baz=T', 'r=1..5']
>>> d = parse_cmd_line(argv, positional_keys=['fooKey', 'barKey'])
>>> len(d)
5
>>> d['myFlag']
True
>>> d['foo']
1.1
>>> d['bar']
7
>>> d['baz']
True
>>> d['r']
array([1, 2, 3, 4])
>>> # ------------------------ no positional args
>>> d = parse_cmd_line(argv)
>>> len(d)
5
>>> d['myFlag']
True
>>> d['foo']
1.1
"""
if argv is None:
argv = sys.argv
args = argv[1:] # ignore file name
num_positional_keys = 0
if positional_keys is not None and len(positional_keys):
num_positional_keys = len(positional_keys)
# validate input; keyword arguments must come after positional
# arguments, and there must be no more positional arguments than
# we have keys to associate with them
kwargs_started = False
flags_started = False
for i, arg in enumerate(args):
if _is_kv_arg(arg): # it's a keyword argument
kwargs_started = True
elif _is_flag_arg(arg):
flags_started = True
else: # it's not a keyword argument or flag arguemnt
if kwargs_started:
raise UsageError("key=value arguments must come after"
"positional arguments!")
if flags_started:
raise UsageError("flag (e.g., --myFlag) arguments must come"
"after positional arguments!")
arg_num = i + 1
if arg_num > num_positional_keys:
raise UsageError("only expecting "
"{} positional arguments!".format(
num_positional_keys))
argKV = {}
for i, arg in enumerate(args):
if _is_kv_arg(arg):
key, val = _split_kv_arg(arg)
argKV[key] = val
elif _is_flag_arg(arg):
key = _clean_flag_arg(arg)
argKV[key] = 'True' # string so that all vals are strings
elif i < num_positional_keys:
key = positional_keys[i]
argKV[key] = arg
else:
raise UsageError("couldn't parse argument '{}'".format(arg))
if infer_types:
for k, v in argKV.items():
argKV[k] = _to_appropriate_type(v)
return argKV
# ------------------------------------------------ other stuff
def apply_funcs(funcs, data):
f = chain(funcs)
return f(data)
def chain(funcs):
if funcs is None or not len(funcs):
return lambda x: x
def f(*args, **kwargs):
res = funcs[0](*args, **kwargs)
for func in funcs[1:]:
res = func(res)
return f
def subdict(d, keys):
"""Returns a new dictionary composed of the (key, value) pairs
from d for the keys specified in keys"""
return {k: d[k] for k in keys}
# ------------------------------------------------ sklearn interop
def set_attrs(obj, attrs_dict, require_attrs_exist=False):
if require_attrs_exist:
keys_and_there = ([(k, k in obj.__dict__) for k in attrs_dict])
missing_keys = [k for (k, there) in keys_and_there if not there]
there = zip(*keys_and_there)[1]
if not all(there):
raise ValueError("Object is missing keys {}".format(
missing_keys))
obj.__dict__.update(attrs_dict)
# ------------------------------------------------ cross validation
def _uniq_element_positions(iterable):
"""
Returns a mapping of unique elements to positions at which they
occur within the iterable
"""
objs2positions = {}
for i, obj in enumerate(iterable):
key = as_key(obj)
positions = objs2positions.get(key, [])
positions.append(i)
objs2positions[key] = positions
return objs2positions
# def _group_start_idxs_eq_split(nelements, ngroups):
# group_sz = nelements // ngroups
# return np.arange(0, nelements, group_sz, dtype=np.int)
def _group_start_end_idxs(nelements, ngroups=-1, fractions=None):
hasFracs = fractions is not None and len(fractions)
if ngroups <= 1 and not hasFracs:
return np.array([0], dtype=np.int), np.array([nelements], dtype=np.int)
if not hasFracs:
fracs = np.ones(ngroups)
fractions = np.asarray(fracs)
fractions /= np.max(fracs)
cum_fracs = np.cumsum(fractions)
end_idxs = (nelements * cum_fracs).astype(np.int)
start_idxs = np.r_[0, end_idxs[:-1]]
return start_idxs, end_idxs
def _split_into_groups(iterable, ngroups=-1, fractions=None, shuffle=True):
if shuffle:
iterable = np.copy(iterable)
np.shuffle(iterable)
start_idxs, end_idxs = _group_start_end_idxs(len(iterable), ngroups,
fractions)
return [iterable[start:end] for start, end in zip(start_idxs, end_idxs)]
def cv_partition_idxs(labels, n_folds=5, fractions=None, stratified=True):
if fractions is not None and len(fractions):
if len(fractions) != n_folds:
raise ValueError("Specified fractions of total for {} groups, but "
"n_folds is {}; ignoring n_fold".format(
len(fractions), n_folds))
if stratified:
all_idxs = [[] for i in range(n_folds)]
lbl2idxs = _uniq_element_positions(labels)
for lbl, idxs in lbl2idxs.items():
if len(idxs) < n_folds:
warnings.warn(("Label {} appears only {} times, which is "
"less than the number of folds requested, {}"
.format(lbl, len(idxs), n_folds)), Warning)
idxGroups = _split_into_groups(idxs, n_folds, fractions)
for i, group in enumerate(idxGroups):
all_idxs[i] += group
return all_idxs
else:
possible_idxs = np.arange(len(labels))
return _split_into_groups(possible_idxs, n_folds, fractions)
def cv_split(X, y, n_folds=5, fractions=None, stratified=True):
if len(X) != len(y):
raise IndexError("len(X) {} != len(y) {}".format(len(X), len(y)))
all_idxs = cv_partition_idxs(y, n_folds=n_folds, fractions=fractions,
stratified=stratified)
X_split = [X[idxs] for idxs in all_idxs]
y_split = [y[idxs] for idxs in all_idxs]
return X_split, y_split
# ================================================================ Main
def update(params, new_keys):
if 'classifier' in new_keys:
params['kernel'] = Options('rbf', 'linear')
# we use setdefault here so that we don't overwrite values
# passed in at the top level
if 'kernel' in new_keys:
kernel = params['kernel']
params.setdefault('C', Options(10. ** np.arange(-5, 3)))
if kernel == 'rbf':
params.setdefault('gamma', Options([1, 10]))
return True if new_keys else False
def main():
cVals = 10. ** np.arange(-3, 3)
d = {"classifier": "SVM", 'C': Options(cVals)}
# generate_params_combinations(d, update)
combos = generate_params_combinations(d, update)
# add a fake outcome variable
for combo in combos:
combo['runtime'] = np.random.rand() * 10
# print out a dataframe so we can see that this worked
import pandas as pd
print(pd.DataFrame.from_records(combos)) # woot; it worked
if __name__ == '__main__':
from doctest import testmod
testmod()
main()
|
#!/usr/bin/env python
import functools
import matplotlib.pyplot as plt
import numpy as np
import os
from scipy.stats.stats import pearsonr
import seaborn as sb
import time
from collections import namedtuple
# import datasets
import files
import product_quantize as pq
import pyience as pyn
from datasets import neighbors as dsets
from utils import kmeans, top_k_idxs
from joblib import Memory
_memory = Memory('.', verbose=0)
np.set_printoptions(precision=3)
SAVE_DIR = '../results'
# ================================================================ Distances
def dists_elemwise_sq(x, q):
diffs = x - q
return diffs * diffs
def dists_elemwise_l1(x, q):
return np.abs(x - q)
def dists_elemwise_dot(x, q):
return x * q
# ================================================================ Clustering
def load_dataset_object(which_dataset, **load_dataset_kwargs):
X_train, Q, X_test, true_nn = dsets.load_dataset(
which_dataset, **load_dataset_kwargs)
assert Q.shape[-1] == X_train.shape[-1]
if isinstance(which_dataset, str):
name = files.basename(which_dataset, noext=True)
else:
name = which_dataset.__name__ # assumes which_dataset is a class
return Dataset(Q, X_train, X_test, true_nn, name)
Dataset = namedtuple('Dataset', [
'Q', 'X_train', 'X_test', 'true_nn', 'name'])
# ================================================================ Quantizers
# ------------------------------------------------ Product Quantization
def _learn_centroids(X, ncentroids, nsubvects, subvect_len):
ret = np.empty((ncentroids, nsubvects, subvect_len))
for i in range(nsubvects):
start_col = i * subvect_len
end_col = start_col + subvect_len
X_in = X[:, start_col:end_col]
centroids, labels = kmeans(X_in, ncentroids)
ret[:, i, :] = centroids
return ret
def _parse_codebook_params(D, code_bits=-1, bits_per_subvect=-1, nsubvects=-1):
if nsubvects < 0:
nsubvects = code_bits // bits_per_subvect
elif code_bits < 1:
code_bits = bits_per_subvect * nsubvects
elif bits_per_subvect < 1:
bits_per_subvect = code_bits // nsubvects
ncentroids = int(2 ** bits_per_subvect)
subvect_len = D // nsubvects
assert code_bits % bits_per_subvect == 0
if D % subvect_len:
print("D, nsubvects, subvect_len = ", D, nsubvects, subvect_len)
assert D % subvect_len == 0 # TODO rm this constraint
return nsubvects, ncentroids, subvect_len
def _fit_pq_lut(q, centroids, elemwise_dist_func):
_, nsubvects, subvect_len = centroids.shape
assert len(q) == nsubvects * subvect_len
q = q.reshape((1, nsubvects, subvect_len))
q_dists_ = elemwise_dist_func(centroids, q)
q_dists_ = np.sum(q_dists_, axis=-1)
return np.asfortranarray(q_dists_) # ncentroids, nsubvects, col-major
class PQEncoder(object):
def __init__(self, dataset, code_bits=-1, bits_per_subvect=-1,
nsubvects=-1, elemwise_dist_func=dists_elemwise_sq):
X = dataset.X_train
self.elemwise_dist_func = elemwise_dist_func
tmp = _parse_codebook_params(X.shape[1], code_bits=code_bits,
bits_per_subvect=bits_per_subvect,
nsubvects=nsubvects)
self.nsubvects, self.ncentroids, self.subvect_len = tmp
self.code_bits = int(np.log2(self.ncentroids))
# for fast lookups via indexing into flattened array
self.offsets = np.arange(self.nsubvects, dtype=np.int) * self.ncentroids
self.centroids = _learn_centroids(X, self.ncentroids, self.nsubvects,
self.subvect_len)
def name(self):
return "PQ_{}x{}b".format(self.nsubvects, self.code_bits)
def params(self):
return {'_algo': 'PQ', '_ncodebooks': self.nsubvects,
'_code_bits': self.code_bits}
def encode_X(self, X, **sink):
idxs = pq._encode_X_pq(X, codebooks=self.centroids)
return idxs + self.offsets # offsets let us index into raveled dists
def encode_q(self, q, **sink):
return None # we use fit_query() instead, so fail fast
def dists_true(self, X, q):
return np.sum(self.elemwise_dist_func(X, q), axis=-1)
def fit_query(self, q, **sink):
self.q_dists_ = _fit_pq_lut(q, centroids=self.centroids,
elemwise_dist_func=self.elemwise_dist_func)
def dists_enc(self, X_enc, q_unused=None):
# this line has each element of X_enc index into the flattened
# version of q's distances to the centroids; we had to add
# offsets to each col of X_enc above for this to work
centroid_dists = self.q_dists_.T.ravel()[X_enc.ravel()]
return np.sum(centroid_dists.reshape(X_enc.shape), axis=-1)
def _learn_best_quantization(luts): # luts can be a bunch of vstacked luts
best_loss = np.inf
best_alpha = None
best_floors = None
best_scale_by = None
for alpha in [.001, .002, .005, .01, .02, .05, .1]:
alpha_pct = int(100 * alpha)
# compute quantized luts this alpha would yield
floors = np.percentile(luts, alpha_pct, axis=0)
luts_offset = np.maximum(0, luts - floors)
ceil = np.percentile(luts_offset, 100 - alpha_pct)
scale_by = 255. / ceil
luts_quantized = np.floor(luts_offset * scale_by).astype(np.int)
luts_quantized = np.minimum(255, luts_quantized)
# compute err
luts_ideal = (luts - luts_offset) * scale_by
diffs = luts_ideal - luts_quantized
loss = np.sum(diffs * diffs)
if loss <= best_loss:
best_loss = loss
best_alpha = alpha
best_floors = floors
best_scale_by = scale_by
return best_floors, best_scale_by, best_alpha
class OPQEncoder(PQEncoder):
def __init__(self, dataset, code_bits=-1, bits_per_subvect=-1,
nsubvects=-1, elemwise_dist_func=dists_elemwise_sq,
opq_iters=20, quantize_lut=False, algo='OPQ', **opq_kwargs):
X = dataset.X_train
self.elemwise_dist_func = elemwise_dist_func
self.quantize_lut = quantize_lut
self.opq_iters = opq_iters
self.algo = algo
tmp = _parse_codebook_params(X.shape[1], code_bits=code_bits,
bits_per_subvect=bits_per_subvect,
nsubvects=nsubvects)
self.nsubvects, self.ncentroids, self.subvect_len = tmp
self.code_bits = int(np.log2(self.ncentroids))
# for fast lookups via indexing into flattened array
self.offsets = np.arange(self.nsubvects, dtype=np.int) * self.ncentroids
if self.algo == 'Bolt':
# Note: we always pass in 0 iters in the reported experiments,
# so it never rotates anything
self.centroids, _, self.rotations = pq.learn_bopq(
X, ncodebooks=nsubvects, codebook_bits=bits_per_subvect,
niters=opq_iters, **opq_kwargs)
elif self.algo == 'OPQ':
self.centroids, _, self.R = pq.learn_opq(
X, ncodebooks=nsubvects, codebook_bits=bits_per_subvect,
niters=opq_iters, **opq_kwargs)
else:
raise ValueError("argument algo must be one of {OPQ, Bolt}")
# learn appropriate offsets and shared scale factor for quantization
self.lut_offsets = np.zeros(self.nsubvects)
self.order_idxs = np.arange(self.nsubvects, dtype=np.int)
if self.quantize_lut: # TODO put this logic in separate function
print("learning quantization...")
num_rows = min(10*1000, len(X) // 2)
_, queries = dsets.extract_random_rows(
X[num_rows:], how_many=1000, remove_from_X=False)
X = X[:num_rows] # limit to first 10k rows of X
# compute luts for all the queries
luts = [self._fit_query(q, quantize=False) for q in queries]
luts = np.vstack(luts)
assert luts.shape == (self.ncentroids * len(queries), self.nsubvects)
self.lut_offsets, self.scale_by, _ = _learn_best_quantization(luts)
def name(self):
return "{}_{}x{}b_iters={}_quantize={}".format(
self.algo, self.nsubvects, self.code_bits, self.opq_iters,
int(self.quantize_lut))
def params(self):
return {'_algo': self.algo, '_ncodebooks': self.nsubvects,
'_code_bits': self.code_bits, 'opq_iters': self.opq_iters,
'_quantize': self.quantize_lut}
def _fit_query(self, q, quantize=False):
if self.algo == 'OPQ':
qR = pq.opq_rotate(q, self.R).ravel()
elif self.algo == 'Bolt':
qR = pq.bopq_rotate(q, self.rotations).ravel()
lut = _fit_pq_lut(qR, centroids=self.centroids,
elemwise_dist_func=self.elemwise_dist_func)
if quantize:
if False: # roughly laplace distro, reaching all the way to 0
ax = sb.distplot(lut.ravel(), hist=False, rug=True)
ax.set_xlabel('Query dist to centroids (lut dist histogram)')
ax.set_ylabel('Fraction of queries')
plt.show()
lut = np.maximum(0, lut - self.lut_offsets)
lut = np.floor(lut * self.scale_by).astype(np.int)
return np.minimum(lut, 255)
return lut
def encode_X(self, X, **sink):
if self.algo == 'OPQ':
X = pq.opq_rotate(X, self.R)
elif self.algo == 'Bolt':
X = pq.bopq_rotate(X, self.rotations)
idxs = pq._encode_X_pq(X, codebooks=self.centroids)
return idxs + self.offsets # offsets let us index into raveled dists
def fit_query(self, q, quantize=True, **sink):
quantize = quantize and self.quantize_lut
self.q_dists_ = self._fit_query(q, quantize=quantize)
if quantize:
# print "min, max lut values: {}, {}".format(np.min(self.q_dists_),
# np.max(self.q_dists_))
assert np.min(self.q_dists_) >= 0
assert np.max(self.q_dists_) <= 255
if False:
_, axes = plt.subplots(3, figsize=(9, 11))
sb.violinplot(data=self.q_dists_, inner="box", cut=0, ax=axes[0])
axes[0].set_xlabel('Codebook')
axes[0].set_ylabel('Distance to query')
axes[0].set_ylim([0, np.max(self.q_dists_)])
sb.heatmap(data=self.q_dists_, ax=axes[1], cbar=False, vmin=0)
axes[1].set_xlabel('Codebook')
axes[1].set_ylabel('Centroid')
sb.distplot(self.q_dists_.ravel(), hist=False, rug=True, vertical=False, ax=axes[2])
axes[2].set_xlabel('Centroid dist to query')
axes[2].set_ylabel('Fraction of centroids')
axes[2].set_xlim([0, np.max(self.q_dists_) + .5])
# plot where the mean is
mean_dist = np.mean(self.q_dists_)
ylim = axes[2].get_ylim()
axes[2].plot([mean_dist, mean_dist], ylim, 'r--')
axes[2].set_ylim(ylim)
plt.show()
# ================================================================ Main
def eval_encoder(dataset, encoder, dist_func_true=None, dist_func_enc=None,
eval_dists=True, verbosity=1, plot=False, smaller_better=True):
X = dataset.X_test
queries = dataset.Q
true_nn = dataset.true_nn
if true_nn is not None:
print("eval encoder(): got true_nn with shape: ", true_nn.shape)
queries = queries[:1000] # TODO rm for tables; fine for plots
print("queries.shape", queries.shape)
need_true_dists = eval_dists or plot or true_nn is None
if len(queries.shape) == 1:
queries = [queries]
if dist_func_true is None:
dist_func_true = encoder.dists_true
if dist_func_enc is None:
dist_func_enc = encoder.dists_enc
t0 = time.time()
# performance metrics
RECALL_Rs = [1, 5, 10, 50, 100, 500, 1000]
recall_counts = np.zeros(len(RECALL_Rs))
fracs_below_max = []
if eval_dists:
all_corrs = []
all_rel_errs = []
all_errs = []
total_dist = 0.
if need_true_dists:
X = X[:10000] # limit to 10k points because otherwise it takes forever
queries = queries[:256, :]
print("encoding X...")
X_enc = encoder.encode_X(X)
print("trying queries...")
for i, q in enumerate(queries):
if i % 100 == 0:
print("trying query {}...".format(i))
q_enc = encoder.encode_q(q)
encoder.fit_query(q)
if need_true_dists:
all_true_dists = dist_func_true(X, q)
all_enc_dists = dist_func_enc(X_enc, q_enc)
# ------------------------ begin analysis / reporting code
# find true knn
if need_true_dists:
knn_idxs = top_k_idxs(all_true_dists, 10, smaller_better=smaller_better)
else:
knn_idxs = true_nn[i, :10]
# compute fraction of points with enc dists as close as 10th nn
knn_enc_dists = all_enc_dists[knn_idxs]
if smaller_better:
max_enc_dist = np.max(knn_enc_dists)
num_below_max = np.sum(all_enc_dists <= max_enc_dist)
else:
max_enc_dist = np.min(knn_enc_dists)
num_below_max = np.sum(all_enc_dists >= max_enc_dist)
frac_below_max = float(num_below_max) / len(all_enc_dists)
fracs_below_max.append(frac_below_max)
# compute recall@R stats
top_1000 = top_k_idxs(all_enc_dists, 1000, smaller_better=smaller_better)
nn_idx = knn_idxs[0]
for i, r in enumerate(RECALL_Rs):
recall_counts[i] += nn_idx in top_1000[:r]
# compute distortion in distances, quantified by corr and rel err
if eval_dists:
total_dist += np.sum(all_true_dists)
corr, _ = pearsonr(all_enc_dists, all_true_dists)
all_corrs.append(corr)
rel_errs = (all_enc_dists - all_true_dists) / all_true_dists
all_rel_errs.append(rel_errs)
all_errs.append(all_enc_dists - all_true_dists)
assert not np.any(np.isinf(all_enc_dists))
assert not np.any(np.isnan(all_enc_dists))
assert not np.any(np.isinf(all_true_dists))
assert not np.any(np.isnan(all_true_dists))
if plot and i < 3: # at most 3 plots
num_nn = min(10000, len(all_true_dists) - 1)
xlim = [0, np.partition(all_true_dists, num_nn)[num_nn]]
ylim = [0, np.partition(all_enc_dists, num_nn)[num_nn]]
grid = sb.jointplot(x=all_true_dists, y=all_enc_dists,
xlim=xlim, ylim=ylim, joint_kws=dict(s=10))
# hack to bully the sb JointGrid into plotting a vert line
cutoff = all_true_dists[knn_idxs[-1]]
grid.x = [cutoff, cutoff]
grid.y = ylim
grid.plot_joint(plt.plot, color='r', linestyle='--')
# also make it plot cutoff in terms of quantized dist
grid.x = xlim
grid.y = [max_enc_dist, max_enc_dist]
grid.plot_joint(plt.plot, color='k', linestyle='--')
if plot:
plt.show()
t = time.time() - t0
# log a lot of performance metrics / experimental params
detailed_stats = [] # list of dicts
stats = {}
stats['X_rows'] = X.shape[0]
stats['X_cols'] = X.shape[1]
stats['nqueries'] = len(queries)
stats['eval_time_secs'] = t
stats['fracs_below_max_mean'] = np.mean(fracs_below_max)
stats['fracs_below_max_std'] = np.std(fracs_below_max)
stats['fracs_below_max_50th'] = np.median(fracs_below_max)
stats['fracs_below_max_90th'] = np.percentile(fracs_below_max, q=90)
for i, r in enumerate(RECALL_Rs):
key = 'recall@{}'.format(r)
val = float(recall_counts[i]) / len(queries)
stats[key] = val
if eval_dists:
corrs = np.hstack(all_corrs)
rel_errs = np.hstack(all_rel_errs)
rel_errs = rel_errs[~(np.isnan(rel_errs) + np.isinf(rel_errs))]
errs = np.hstack(all_errs)
stats['corr_mean'] = np.mean(all_corrs)
stats['corr_std'] = np.std(all_corrs)
stats['mse_mean'] = np.mean(errs * errs)
stats['mse_std'] = np.std(errs * errs)
stats['rel_err_mean'] = np.mean(rel_errs)
stats['rel_err_std'] = np.std(rel_errs)
stats['rel_err_sq_mean'] = np.mean(rel_errs * rel_errs)
stats['rel_err_sq_std'] = np.std(rel_errs * rel_errs)
# sample some relative errs cuz all we need them for is plotting
# confidence intervals
np.random.shuffle(rel_errs)
np.random.shuffle(errs)
detailed_stats = [{'corr': all_corrs[i], 'rel_err': rel_errs[i],
'err': errs[i]} for i in range(len(corrs))]
for d in detailed_stats:
d.update(encoder_params(encoder))
if verbosity > 0:
print("------------------------ {}".format(name_for_encoder(encoder)))
keys = sorted(stats.keys())
lines = ["{}: {}".format(k, stats[k]) for k in keys if isinstance(stats[k], str)]
lines += ["{}: {:.4g}".format(k, stats[k]) for k in keys if not isinstance(stats[k], str)]
print("\n".join(lines))
stats.update(encoder_params(encoder))
return stats, detailed_stats # detailed_stats empty unless `eval_dists`
def name_for_encoder(encoder):
try:
return encoder.name()
except AttributeError:
return str(type(encoder))
def encoder_params(encoder):
try:
return encoder.params()
except AttributeError:
return {'algo': name_for_encoder(encoder)}
# @_memory.cache
def _experiment_one_dataset(which_dataset, eval_dists=False, dotprods=False,
save_dir=None):
SAVE_DIR = save_dir if save_dir else '../results/acc/'
elemwise_dist_func = dists_elemwise_dot if dotprods else dists_elemwise_sq
smaller_better = not dotprods
N, D = -1, -1
num_queries = -1 # no effect for "real" datasets
if isinstance(which_dataset, str):
print("WARNING: sampling queries from data file")
num_queries = 128 # if just loading one file, need to sample queries
norm_len = False # set to true for cosine similarity
norm_mean = True
max_ncodebooks = 64 # 32B bolt has 64 codebooks
dataset_func = functools.partial(load_dataset_object, N=N, D=D,
num_queries=num_queries,
norm_len=norm_len, norm_mean=norm_mean,
D_multiple_of=max_ncodebooks)
dataset = dataset_func(which_dataset)
print("=== Using Dataset: {} ({}x{})".format(dataset.name, N, D))
dicts = []
detailed_dicts = []
nbytes_list = [8, 16, 32]
# max_opq_iters = 5 # TODO uncomment below
max_opq_iters = 20
# ------------------------------------------------ Bolt
# Note: we considered having learned rotations like OPQ but constrained
# to be block diagonal; this is why you'll see mentions of rotations
# in some of the Bolt code. However, it ended up not helping at all
# and also slows down Bolt considerably. All the reported results are
# without any rotation.
# rotation_sizes = [8, 16, 32]
rotation_sizes = [32]
# rotation_sizes = [16]
for nbytes in nbytes_list:
for opq_iters in [0]: # 0 opq iters -> no rotations
rot_sizes = rotation_sizes if opq_iters > 0 else [16]
for rot_sz in rot_sizes:
nsubvects = nbytes * 2
encoder = OPQEncoder(dataset, nsubvects=nsubvects,
bits_per_subvect=4,
opq_iters=opq_iters,
R_sz=rot_sz,
elemwise_dist_func=elemwise_dist_func,
algo='Bolt', quantize_lut=True)
stats, detailed_stats = eval_encoder(
dataset, encoder, eval_dists=eval_dists,
smaller_better=smaller_better)
stats['rot_sz'] = rot_sz
for d in detailed_dicts:
d['rot_sz'] = rot_sz
dicts.append(stats)
detailed_dicts += detailed_stats
# ------------------------------------------------ PQ
# for codebook_bits in [4, 8]:
for codebook_bits in [8]:
for nbytes in nbytes_list:
nsubvects = nbytes * (8 // codebook_bits)
encoder = PQEncoder(dataset, nsubvects=nsubvects,
bits_per_subvect=codebook_bits,
elemwise_dist_func=elemwise_dist_func)
stats, detailed_stats = eval_encoder(
dataset, encoder, eval_dists=eval_dists,
smaller_better=smaller_better)
dicts.append(stats)
detailed_dicts += detailed_stats
# ------------------------------------------------ OPQ
init = 'identity'
opq_iters = max_opq_iters
for codebook_bits in [8]:
for nbytes in nbytes_list:
nsubvects = nbytes * (8 // codebook_bits)
encoder = OPQEncoder(dataset, nsubvects=nsubvects,
bits_per_subvect=codebook_bits,
opq_iters=opq_iters, init=init,
elemwise_dist_func=elemwise_dist_func)
stats, detailed_stats = eval_encoder(
dataset, encoder, eval_dists=eval_dists,
smaller_better=smaller_better)
dicts.append(stats)
detailed_dicts += detailed_stats
for d in dicts:
d['dataset'] = dataset.name
d['norm_mean'] = norm_mean
for d in detailed_dicts:
d['dataset'] = dataset.name
d['norm_mean'] = norm_mean
savedir = os.path.join(SAVE_DIR, dataset.name)
pyn.save_dicts_as_data_frame(dicts, savedir, name='summary')
# also just save versions with timestamps to recover from clobbering
pyn.save_dicts_as_data_frame(dicts, savedir, name='summary',
timestamp=True)
if eval_dists:
pyn.save_dicts_as_data_frame(detailed_dicts, savedir, name='all_results')
pyn.save_dicts_as_data_frame(detailed_dicts, savedir, name='all_results',
timestamp=True)
return dicts, detailed_dicts
def experiment(eval_dists=False, dotprods=False):
# which_datasets = [dsets.Mnist]
which_datasets = [dsets.Mnist, dsets.Sift1M,
dsets.LabelMe, dsets.Convnet1M]
# which_datasets = [dsets.Glove]
# which_datasets = [dsets.Deep1M, dsets.Gist]
if eval_dists:
save_dir = '../results/acc_dotprods/' if dotprods else '../results/acc_l2'
else:
save_dir = '../results/recall_at_r/'
for which_dataset in which_datasets:
_dicts, _details = _experiment_one_dataset(
which_dataset, eval_dists=eval_dists, dotprods=dotprods,
save_dir=save_dir)
def main():
import doctest
doctest.testmod()
np.set_printoptions(precision=3)
opts = pyn.parse_cmd_line()
opts.setdefault('eval_l2_dists', False)
opts.setdefault('eval_dotprods', False)
opts.setdefault('eval_recall@R', False)
if opts['eval_l2_dists']:
print(">>>>>>>> evaluating l2 dists")
experiment(eval_dists=True, dotprods=False)
if opts['eval_dotprods']:
print(">>>>>>>> evaluating dot prods")
experiment(eval_dists=True, dotprods=True)
if opts['eval_recall@R']:
print(">>>>>>>> evaluating recall@R")
experiment(eval_dists=False, dotprods=False)
return
if __name__ == '__main__':
main()
|
#!/bin/env/python
import copy
import numpy as np
from functools import reduce
import numba
from sklearn.decomposition import PCA
from sklearn import linear_model
from . import subspaces as subs
from joblib import Memory
_memory = Memory('.', verbose=0)
# def bucket_id_to_new_bucket_ids(old_id):
# i = 2 * old_id
# return i, i + 1
class Bucket(object):
__slots__ = 'N D id sumX sumX2 point_ids support_add_and_remove'.split()
def __init__(self, D=None, N=0, sumX=None, sumX2=None, point_ids=None,
bucket_id=0, support_add_and_remove=False):
# self.reset(D=D, sumX=sumX, sumX2=sumX2)
# assert point_ids is not None
if point_ids is None:
assert N == 0
point_ids = (set() if support_add_and_remove
else np.array([], dtype=np.int))
self.N = len(point_ids)
self.id = bucket_id
# this is just so that we can store the point ids as array instead of
# set, while still retaining option to run our old code that needs
# them to be a set for efficient inserts and deletes
self.support_add_and_remove = support_add_and_remove
if support_add_and_remove:
self.point_ids = set(point_ids)
else:
self.point_ids = np.asarray(point_ids)
# figure out D
if (D is None or D < 1) and (sumX is not None):
D = len(sumX)
elif (D is None or D < 1) and (sumX2 is not None):
D = len(sumX2)
assert D is not None
self.D = D
# figure out + sanity check stats arrays
self.sumX = np.zeros(D, dtype=np.float32) if (sumX is None) else sumX
self.sumX2 = np.zeros(D, dtype=np.float32) if (sumX2 is None) else sumX2 # noqa
# print("D: ", D)
# print("sumX type: ", type(sumX))
assert len(self.sumX) == D
assert len(self.sumX2) == D
self.sumX = np.asarray(self.sumX).astype(np.float32)
self.sumX2 = np.asarray(self.sumX2).astype(np.float32)
def add_point(self, point, point_id=None):
assert self.support_add_and_remove
# TODO replace with more numerically stable updates if necessary
self.N += 1
self.sumX += point
self.sumX2 += point * point
if point_id is not None:
self.point_ids.add(point_id)
def remove_point(self, point, point_id=None):
assert self.support_add_and_remove
self.N -= 1
self.sumX -= point
self.sumX2 -= point * point
if point_id is not None:
self.point_ids.remove(point_id)
def deepcopy(self, bucket_id=None): # deep copy
bucket_id = self.id if bucket_id is None else bucket_id
return Bucket(
sumX=np.copy(self.sumX), sumX2=np.copy(self.sumX2),
point_ids=copy.deepcopy(self.point_ids), bucket_id=bucket_id)
def split(self, X=None, dim=None, val=None, X_orig=None):
id0 = 2 * self.id
id1 = id0 + 1
if X is None or self.N < 2: # copy of this bucket + an empty bucket
return (self.deepcopy(bucket_id=id0),
Bucket(D=self.D, bucket_id=id1))
assert dim is not None
assert val is not None
assert self.point_ids is not None
my_idxs = np.asarray(self.point_ids)
# print("my_idxs shape, dtype", my_idxs.shape, my_idxs.dtype)
X = X[my_idxs]
X_orig = X if X_orig is None else X_orig[my_idxs]
mask = X_orig[:, dim] < val
not_mask = ~mask
X0 = X[mask]
X1 = X[not_mask]
ids0 = my_idxs[mask]
ids1 = my_idxs[not_mask]
def create_bucket(points, ids, bucket_id):
sumX = points.sum(axis=0) if len(ids) else None
sumX2 = (points * points).sum(axis=0) if len(ids) else None
# return Bucket(N=len(ids), D=self.D, point_ids=ids,
return Bucket(D=self.D, point_ids=ids, sumX=sumX, sumX2=sumX2,
bucket_id=bucket_id)
return create_bucket(X0, ids0, id0), create_bucket(X1, ids1, id1)
def optimal_split_val(self, X, dim, possible_vals=None, X_orig=None,
return_possible_vals_losses=False):
if self.N < 2 or self.point_ids is None:
if return_possible_vals_losses:
return 0, 0, np.zeros(len(possible_vals), dtype=X.dtype)
return 0, 0
# my_idxs = np.array(list(self.point_ids))
my_idxs = np.asarray(self.point_ids)
if X_orig is not None:
X_orig = X_orig[my_idxs]
return optimal_split_val(
X[my_idxs], dim, possible_vals=possible_vals, X_orig=X_orig,
return_possible_vals_losses=return_possible_vals_losses)
def col_means(self):
return self.sumX.astype(np.float64) / max(1, self.N)
def col_variances(self, safe=False):
if self.N < 1:
return np.zeros(self.D, dtype=np.float32)
E_X2 = self.sumX2 / self.N
E_X = self.sumX / self.N
ret = E_X2 - (E_X * E_X)
return np.maximum(0, ret) if safe else ret
def col_sum_sqs(self):
return self.col_variances() * self.N
@property
def loss(self):
# if self.N < 1:
# return 0
# # less stable version with one less divide and mul
# return max(0, np.sum(self.sumX2 - (self.sumX * (self.sumX / self.N))))
# more stable version, that also clamps variance at 0
return max(0, np.sum(self.col_sum_sqs()))
# expected_X = self.sumX / self.N
# expected_X2 = self.sumX2 / self.N
# return max(0, np.sum(expected_X2 - (expected_X * expected_X)) * self.N)
# @numba.jit(nopython=True) # numpy cumsum in insanely slow
# def _cumsum_cols(X):
# X = np.copy(X)
# for i in range(1, X.shape[0]):
# X[i] += X[i - 1]
# return X
# numpy cumsum in insanely slow; also, having the nested loops is twice
# as fast as assigning rows (ie, X[i] += X[i-1])
@numba.njit(fastmath=True)
def _cumsum_cols(X):
out = np.empty(X.shape, X.dtype)
for j in range(X.shape[1]):
out[0, j] = X[0, j]
for i in range(1, X.shape[0]):
for j in range(X.shape[1]):
out[i, j] = X[i, j] + out[i - 1, j]
return out
@numba.njit(fastmath=True, cache=True) # njit = no python, cache binary
def _cumsse_cols(X):
N, D = X.shape
cumsses = np.empty((N, D), X.dtype)
cumX_row = np.empty(D, X.dtype)
cumX2_row = np.empty(D, X.dtype)
for j in range(D):
cumX_row[j] = X[0, j]
cumX2_row[j] = X[0, j] * X[0, j]
cumsses[0, j] = 0 # no err in bucket with 1 element
for i in range(1, N):
one_over_count = 1. / (i + 1)
for j in range(D):
cumX_row[j] += X[i, j]
cumX2_row[j] += X[i, j] * X[i, j]
meanX = cumX_row[j] * one_over_count
cumsses[i, j] = cumX2_row[j] - (cumX_row[j] * meanX)
return cumsses
# def optimal_split_val(X, dim, possible_vals=None, return_val_idx=False):
# @_memory.cache
def optimal_split_val(X, dim, possible_vals=None, X_orig=None,
# return_possible_vals_losses=False, force_val='median'):
return_possible_vals_losses=False, force_val=None,
# shrink_towards_median=True):
shrink_towards_median=False):
X_orig = X if X_orig is None else X_orig
# X_orig = X # TODO rm
if X_orig.shape != X.shape:
print("X orig shape: ", X_orig.shape)
print("X shape: ", X.shape)
assert X_orig.shape == X.shape
if force_val in ('mean', 'median'):
assert not return_possible_vals_losses
x = X_orig[:, dim]
val = np.median(x) if force_val == 'median' else np.mean(x)
mask = X_orig < val
X0 = X[mask]
errs0 = X0 - X0.mean(axis=0)
loss0 = np.sum(errs0 * errs0)
X1 = X[~mask]
errs = X1 - X1.mean(axis=0)
loss1 = np.sum(errs * errs)
return val, loss0 + loss1
N, D = X.shape
# sort_idxs = np.argsort(X[:, dim])
sort_idxs = np.argsort(X_orig[:, dim])
X_sort = X[sort_idxs]
# use_jit = False
use_jit = True
if use_jit:
# X_sort = X_sort[:100] # TODO rm
# X_sort = np.ascontiguousarray(X_sort)
# N, D = X_sort.shape
# print("about to call jitted func; N, D = ", N, D)
sses_head = _cumsse_cols(X_sort)
# print("got thru first call...")
# X_sort_rev = np.ascontiguousarray(X_sort[::-1])
# sses_tail = _cumsse_cols(X_sort_rev)[::-1]
sses_tail = _cumsse_cols(X_sort[::-1])[::-1]
# print("returned from jitted func!")
else:
X_sort_sq = X_sort * X_sort
# cumX_head = np.cumsum(X_sort, axis=0)
# cumX2_head = np.cumsum(X_sort_sq, axis=0)
# cumX_tail = np.cumsum(X_sort[::-1], axis=0)[::-1]
# cumX2_tail = np.cumsum(X_sort_sq[::-1], axis=0)[::-1]
cumX_head = _cumsum_cols(X_sort)
cumX2_head = _cumsum_cols(X_sort_sq)
cumX_tail = _cumsum_cols(X_sort[::-1])[::-1]
cumX2_tail = _cumsum_cols(X_sort_sq[::-1])[::-1]
all_counts = np.arange(1, N + 1).reshape(-1, 1)
EX_head = cumX_head / all_counts # E[X], starting from 0
EX_tail = cumX_tail / all_counts[::-1] # E[X], starting from N-1
# EX2_head = cumX2_head / all_counts # E[X^2], starting from 0
# EX2_tail = cumX2_tail / all_counts[::-1] # E[X^2], starting from N-1
# mses_head = EX2_head - (EX_head * EX_head) # mses from 0
# mses_tail = EX2_tail - (EX_tail * EX_tail) # mses from N-1
# sses_head = mses_head * all_counts #
# sses_tail = mses_tail * all_counts[::-1]
# simpler equivalent of above; mse * N reduces to this
sses_head = cumX2_head - (cumX_head * EX_head)
sses_tail = cumX2_tail - (cumX_tail * EX_tail)
# # TODO rm
# mse_head_diffs = sses_head[1:] - sses_head[:-1]
# # print("mse_head_diffs[:20]", mse_head_diffs[:20])
# assert np.all(mse_head_diffs > -.1) # should be nondecreasing
# mse_tail_diffs = sses_tail[1:] - sses_tail[:-1]
# assert np.all(mse_tail_diffs < .1) # should be nonincreasing
sses = sses_head
sses[:-1] += sses_tail[1:] # sse of X_sort[:i] + sse of X_sort[i:]
sses = sses.sum(axis=1)
if shrink_towards_median:
minsse, maxsse = np.min(sses), np.max(sses)
scale = maxsse - minsse
# n_over_2 = N // 2
# scale = (maxsse - minsse) / n_over_2
coeffs = np.abs(np.arange(N, dtype=np.float32))
penalties = coeffs * (scale / np.max(coeffs))
sses += penalties
# # TODO rm
# E_X = X.mean(axis=0)
# E_X2 = (X * X).mean(axis=0)
# sse_true = np.sum(E_X2 - (E_X * E_X)) * N
# print("sses[0], sses[-1], true loss, np.sum(X.var(axis=0)) * N",
# sses[0], sses[-1], sse_true, np.sum(X.var(axis=0)) * N)
# X_orig_sort = X_orig[sort_idxs]
if possible_vals is None or not len(possible_vals): # can split anywhere
best_idx = np.argmin(sses)
next_idx = min(N - 1, best_idx + 1)
# best_val = (X_sort[best_idx, dim] + X_sort[next_idx, dim]) / 2.
# X_orig_sort = X_orig[sort_idxs]
col = X_orig[:, dim]
best_val = (col[sort_idxs[best_idx]] + col[sort_idxs[next_idx]]) / 2
# best_val = (X_orig_sort[best_idx, dim] + X_orig_sort[next_idx, dim]) / 2
else: # have to choose one of the values in possible_vals
sorted_col = X_orig[:, dim][sort_idxs]
idxs = np.searchsorted(sorted_col, possible_vals)
# idxs = np.unique(idxs)
idxs = np.maximum(0, idxs - 1) # searchsorted returns first idx larger
sses_for_idxs = sses[idxs]
which_idx_idx = np.argmin(sses_for_idxs)
best_idx = idxs[which_idx_idx]
best_val = possible_vals[which_idx_idx]
# print("return_possible_vals_losses: ", return_possible_vals_losses)
ret = best_val, sses[best_idx]
return ret + (sses_for_idxs,) if return_possible_vals_losses else ret
def evenly_spaced_quantiles(x, nquantiles, dedup=True):
x = np.unique(x)
# handle x with fewer unique elements than nquantiles (or same number, or
# not that many more; basically just want each returned value to be uniq
# and useful for binning the distribution)
if len(x) == nquantiles:
return x
elif len(x) == 1:
return np.linspace(-1, 3, num=nquantiles) * x[0]
elif len(x) < 2 * nquantiles:
return np.linspace(np.min(x), np.max(x), num=nquantiles)
n = nquantiles + 1
fracs = np.arange(1, n) / float(n)
return np.array([np.quantile(x, frac) for frac in fracs])
class PointInfo(object):
__slots__ = 'data bucket_id'.split()
def __init__(self, data, bucket_id):
self.data = data
self.bucket_id = bucket_id
class Split(object):
__slots__ = 'dim val loss_change'.split()
def __init__(self, dim, val, loss_change=None):
self.dim = dim
self.val = val
self.loss_change = loss_change
def _sort_and_append_orig_idx(x, ascending=True):
sort_idxs = np.argsort(x)
if not ascending:
sort_idxs = sort_idxs[::-1]
x_sort = x[sort_idxs]
orig_idxs = np.arange(len(x))[sort_idxs]
return list(zip(x_sort, orig_idxs))
def _split_existing_buckets(buckets):
return [buck.split() for buck in buckets]
# new_buckets = []
# # D = len(buckets[0].sumX)
# for buck in buckets:
# # buck0 = copy.deepcopy(bucket)
# # buck0 = Bucket(N=buck.N, D=D, point_ids=copy.deepcopy(buck.point_ids),
# # sumX=np.copy(buck.sumX), sumX2=np.copy(buck.sumX2))
# # buck0 = buck.copy()
# # buck1 = Bucket(D=buckets[0].D)
# new_buckets.append((buck0, buck1))
# return new_buckets
class MultiSplit(object):
__slots__ = 'dim vals scaleby offset'.split()
def __init__(self, dim, vals, scaleby=None, offset=None):
self.dim = dim
self.vals = np.asarray(vals)
self.scaleby = scaleby
self.offset = offset
def preprocess_x(self, x):
if self.offset is not None:
x = x - self.offset
if self.scaleby is not None:
x = x * self.scaleby
return x
def learn_multisplits_orig(X, nsplits, log2_max_vals_per_split=4,
try_nquantiles=16, return_centroids=True,
# learn_quantize_params=False,
learn_quantize_params='int16',
# learn_quantize_params=True,
# verbose=1):
verbose=2):
# verbose=3):
X = X.astype(np.float32)
N, D = X.shape
max_vals_per_split = 1 << log2_max_vals_per_split
X_hat = np.zeros_like(X)
# initially, one big bucket with everything
buckets = [Bucket(sumX=X.sum(axis=0), sumX2=(X * X).sum(axis=0),
point_ids=np.arange(N))]
total_loss = sum([bucket.loss for bucket in buckets])
# values to try in each dim, after buckets no longer get to pick optimal
# ones; we try values that at evenly spaced quantiles
possible_split_vals = np.empty((D, try_nquantiles), dtype=X.dtype)
for dim in range(D):
# possible_split_vals[dim] = evenly_spaced_quantiles(
# X[:, dim], try_nquantiles)
# exclude enpoints, so we get appropriate number of points linearly
# spaced *between* min and max values
minval, maxval = np.min(X[:, dim]), np.max(X[:, dim])
possible_split_vals[dim] = np.linspace(
minval, maxval, num=(try_nquantiles + 2))[1:-1]
# print("initial possible split vals: ")
# print(possible_split_vals[:8])
# print(possible_split_vals[8:16])
# import sys; sys.exit()
if verbose > 0:
print("================================")
print("learn_multisplits(): initial loss: ", total_loss)
splits = []
col_losses = np.zeros(D, dtype=np.float32) # TODO rm?
for s in range(nsplits):
# if s >= 2:
# print("exiting after two splits")
# import sys; sys.exit()
if verbose > 1:
print("------------------------ finding split #:", s)
for i, buck in enumerate(buckets): # TODO rm sanity check
assert buck.id == i
nbuckets = len(buckets)
# compute number of bucket groups and size of each
ngroups = min(nbuckets, max_vals_per_split)
nbuckets_per_group = nbuckets // ngroups
assert nbuckets_per_group * ngroups == nbuckets # sanity check
# try_ndims = 8
# try_ndims = 4
try_ndims = 1
# dim_heuristic = 'eigenvec'
# dim_heuristic = 'bucket_eigenvecs'
dim_heuristic = 'variance'
if dim_heuristic == 'eigenvec':
# compute current reconstruction of X, along with errs
for buck in buckets:
# print("point ids: ", buck.point_ids)
if len(buck.point_ids):
centroid = buck.col_means()
# X_hat[np.array(buck.point_ids)] = centroid
X_hat[buck.point_ids] = centroid
X_res = X - X_hat
# pick dims by looking at top principal component
v = subs.top_principal_component(X_res)
try_dims = np.argsort(np.abs(v))[-try_ndims:]
elif dim_heuristic == 'bucket_eigenvecs':
dim_scores = np.zeros(D, dtype=np.float32)
for buck in buckets:
if buck.N < 2:
continue
X_buck = X[buck.point_ids]
v, lamda = subs.top_principal_component(
X_buck, return_eigenval=True)
v *= lamda
dim_scores += np.abs(v)
# X_buck -= X_buck.mean(axis=0)
try_dims = np.argsort(dim_scores)[-try_ndims:]
elif dim_heuristic == 'variance':
# pick out dims to consider splitting on
# try_dims = np.arange(D) # TODO restrict to subset?
col_losses[:] = 0
for buck in buckets:
col_losses += buck.col_sum_sqs()
# try_dims = np.argsort(col_losses)[-8:]
try_dims = np.argsort(col_losses)[-try_ndims:]
# try_dims = np.argsort(col_losses)[-2:]
# try_dims = np.arange(2)
# try_dims = np.arange(D) # TODO restrict to subset?
losses = np.zeros(len(try_dims), dtype=X.dtype)
losses_for_vals = np.zeros(try_nquantiles, dtype=X.dtype)
all_split_vals = [] # vals chosen by each bucket/group for each dim
# determine for this dim what the best split vals are for each
# group and what the loss is when using these split vals
for d, dim in enumerate(try_dims):
if verbose > 2:
# print("---------------------- dim = ", dim)
print("======== dim = {}, ({:.5f}, {:.5f})".format(
dim, np.min(X[:, dim]), np.max(X[:, dim])))
# just let each bucket pick its optimal split val for this dim;
# special case of below where each "group" is one bucket, and
# instead of having to pick val from fixed set, can be anything
if nbuckets_per_group == 1:
split_vals = [] # each bucket contributes one split val
for buck in buckets:
val, loss = buck.optimal_split_val(X, dim)
losses[d] += loss
split_vals.append(val)
all_split_vals.append(split_vals)
# buckets have to pick from fixed set of possible values; each
# group of buckets (defined by common prefix) have to agree on
# one val, so we sum loss for each possible value across all
# buckets in the group, and then take val with lowest sum
else:
split_vals = [] # each group contributes one split val
for g in range(ngroups):
# print("------------------------ group #", g)
start_idx = g * nbuckets_per_group
end_idx = start_idx + nbuckets_per_group
group_buckets = buckets[start_idx:end_idx]
# print("bucket ids, counts: ",
# [buck.id for buck in group_buckets],
# [buck.N for buck in group_buckets])
# compute loss for each possible split value, summed
# across all buckets in this group; then choose best
possible_vals = possible_split_vals[dim]
# print("possible split vals: ", possible_vals)
losses_for_vals[:] = 0
# losses_for_vals = np.zeros_like(losses_for_vals)
# print("losses for vals: ", losses_for_vals)
for b, buck in enumerate(group_buckets):
_, _, val_losses = buck.optimal_split_val(
X, dim, possible_vals=possible_vals,
return_possible_vals_losses=True)
losses_for_vals += val_losses
best_val_idx = np.argmin(losses_for_vals)
best_val = possible_vals[best_val_idx]
best_loss = losses_for_vals[best_val_idx]
losses[d] += best_loss
# print("best {val idx, val, loss} = ",
# best_val_idx, best_val, best_loss)
split_vals.append(best_val)
all_split_vals.append(split_vals)
# determine best dim to split on, and pull out associated split
# vals for all buckets
best_tried_dim_idx = np.argmin(losses)
best_dim = try_dims[best_tried_dim_idx]
use_split_vals = all_split_vals[best_tried_dim_idx]
split = MultiSplit(dim=best_dim, vals=use_split_vals)
if learn_quantize_params:
# if len(use_split_vals) > 1: # after 1st split
# minsplitval = np.min(use_split_vals)
# maxsplitval = np.max(use_split_vals)
# gap = maxsplitval - minsplitval
# offset = minsplitval - .02 * gap
# scale = 250. / gap # slightly below 255. / gap
# else: # 1st split; only one bucket, so no intersplit range
# assert np.min(use_split_vals) == np.max(use_split_vals)
# x = X[:, best_dim]
# offset = np.min(x)
# scale = 255. / np.max(x - offset)
# # x -= offset
# # scale = 128. / np.max(split.vals - offset)
# # scale = 1 # TODO rm
# # x = X[:, best_dim].copy()
# x = X[:, best_dim]
# offset = np.min(x)
# # scale = 255. / np.max(x - offset)
# scale = 250. / np.max(use_split_vals) # slightly below 255
# simple version, which also handles 1 bucket: just set min
# value to be avg of min splitval and xval, and max value to
# be avg of max splitval and xval
x = X[:, best_dim]
offset = (np.min(x) + np.min(use_split_vals)) / 2
upper_val = (np.max(x) + np.max(use_split_vals)) / 2 - offset
scale = 254. / upper_val
if learn_quantize_params == 'int16':
scale = 2. ** int(np.log2(scale))
split.offset = offset
split.scaleby = scale
split.vals = (split.vals - split.offset) * split.scaleby
split.vals = np.clip(split.vals, 0, 255).astype(np.int32)
splits.append(split)
# apply this split to get next round of buckets
new_buckets = []
for i, buck in enumerate(buckets):
group_idx = i // nbuckets_per_group
val = use_split_vals[group_idx]
new_buckets += list(buck.split(X, dim=best_dim, val=val))
buckets = new_buckets
if verbose > 1:
print("bucket counts: ", [buck.N for buck in buckets])
print("loss from buckets: ",
sum([bucket.loss for bucket in buckets]))
print("dim losses: ", losses)
if verbose > 2:
print("loss from sse computation: ",
losses[best_tried_dim_idx])
print("using dim, split_vals:", best_dim, use_split_vals)
# maybe return centroids in addition to set of MultiSplits and loss
loss = sum([bucket.loss for bucket in buckets])
if verbose > 0:
print("learn_multisplits(): returning loss: ", loss)
if return_centroids:
centroids = np.vstack([buck.col_means() for buck in buckets])
assert centroids.shape == (len(buckets), X.shape[1])
return splits, loss, centroids
return splits, loss
@_memory.cache
def learn_multisplits(
X, nsplits=4, return_centroids=True, return_buckets=False,
# learn_quantize_params=False,
# learn_quantize_params='int16', X_orig=None, try_ndims=1,
# learn_quantize_params='int16', X_orig=None, try_ndims=2,
learn_quantize_params='int16', X_orig=None, try_ndims=4,
# learn_quantize_params='int16', X_orig=None, try_ndims=8,
# learn_quantize_params='int16', X_orig=None, try_ndims=16,
# learn_quantize_params=True,
# verbose=3):
# verbose=2):
verbose=1):
assert nsplits <= 4 # >4 splits means >16 split_vals for this func's impl
X = X.astype(np.float32)
N, D = X.shape
X_orig = X if X_orig is None else X_orig
X_hat = np.zeros_like(X)
# initially, one big bucket with everything
buckets = [Bucket(sumX=X.sum(axis=0), sumX2=(X * X).sum(axis=0),
point_ids=np.arange(N))]
total_loss = sum([bucket.loss for bucket in buckets])
if verbose > 0:
print("================================")
# print("learn_multisplits(): initial loss: ", total_loss)
print("learn_multisplits(): initial loss: ", total_loss)
# print("learn_multisplits(): trying ndims: ", min(D, try_ndims))
splits = []
col_losses = np.zeros(D, dtype=np.float32) # TODO rm?
for s in range(nsplits):
if verbose > 1:
print("------------------------ finding split #:", s)
# dim_heuristic = 'eigenvec'
# dim_heuristic = 'bucket_eigenvecs'
dim_heuristic = 'bucket_sse'
# dim_heuristic = 'kurtosis'
if dim_heuristic == 'eigenvec':
# compute current reconstruction of X, along with errs
if s > 0:
for buck in buckets:
# print("point ids: ", buck.point_ids)
if len(buck.point_ids):
centroid = buck.col_means()
# X_hat[np.array(buck.point_ids)] = centroid
X_hat[buck.point_ids] = centroid
X_res = X - X_hat
else:
X_res = X
# pick dims by looking at top principal component
v = subs.top_principal_component(X_res)
try_dims = np.argsort(np.abs(v))[-try_ndims:]
elif dim_heuristic == 'bucket_eigenvecs':
dim_scores = np.zeros(D, dtype=np.float32)
for buck in buckets:
if buck.N < 2:
continue
X_buck = X[buck.point_ids]
v, lamda = subs.top_principal_component(
X_buck, return_eigenval=True)
v *= lamda
dim_scores += np.abs(v)
# X_buck -= X_buck.mean(axis=0)
try_dims = np.argsort(dim_scores)[-try_ndims:]
elif dim_heuristic == 'bucket_sse':
col_losses[:] = 0
for buck in buckets:
col_losses += buck.col_sum_sqs()
try_dims = np.argsort(col_losses)[-try_ndims:]
elif dim_heuristic == 'kurtosis':
# compute X_res
if s > 0:
for buck in buckets:
# print("point ids: ", buck.point_ids)
if len(buck.point_ids):
centroid = buck.col_means()
# X_hat[np.array(buck.point_ids)] = centroid
X_hat[buck.point_ids] = centroid
X_res = X - X_hat
else:
X_res = X
col_losses[:] = 0
for buck in buckets:
col_losses += buck.col_sum_sqs()
try_dims = np.argsort(col_losses)[-try_ndims:]
from scipy import stats
col_losses *= col_losses # just 4th central moment
col_losses *= stats.kurtosis(X_res, axis=0)
try_dims = np.argsort(col_losses)[-try_ndims:]
losses = np.zeros(len(try_dims), dtype=X.dtype)
all_split_vals = [] # vals chosen by each bucket/group for each dim
# determine for this dim what the best split vals are for each
# group and what the loss is when using these split vals
# print("try_dims: ", try_dims)
for d, dim in enumerate(try_dims):
# print("s, d, dim = ", s, d, dim)
if verbose > 2:
# print("---------------------- dim = ", dim)
print("======== dim = {}, ({:.5f}, {:.5f})".format(
dim, np.min(X[:, dim]), np.max(X[:, dim])))
split_vals = [] # each bucket contributes one split val
for b, buck in enumerate(buckets):
val, loss = buck.optimal_split_val(X, dim, X_orig=X_orig)
losses[d] += loss
if d > 0 and losses[d] >= np.min(losses[:d]):
if verbose > 2:
print("early abandoning after bucket {}!".format(b))
break # this dim already can't be the best
split_vals.append(val)
all_split_vals.append(split_vals)
# determine best dim to split on, and pull out associated split
# vals for all buckets
best_tried_dim_idx = np.argmin(losses)
best_dim = try_dims[best_tried_dim_idx]
use_split_vals = all_split_vals[best_tried_dim_idx]
split = MultiSplit(dim=best_dim, vals=use_split_vals)
if learn_quantize_params:
# simple version, which also handles 1 bucket: just set min
# value to be avg of min splitval and xval, and max value to
# be avg of max splitval and xval
x = X[:, best_dim]
offset = (np.min(x) + np.min(use_split_vals)) / 2
upper_val = (np.max(x) + np.max(use_split_vals)) / 2 - offset
scale = 254. / upper_val
if learn_quantize_params == 'int16':
scale = 2. ** int(np.log2(scale))
split.offset = offset
split.scaleby = scale
split.vals = (split.vals - split.offset) * split.scaleby
split.vals = np.clip(split.vals, 0, 255).astype(np.int32)
splits.append(split)
# apply this split to get next round of buckets
new_buckets = []
for i, buck in enumerate(buckets):
group_idx = i
val = use_split_vals[group_idx]
new_buckets += list(buck.split(X, dim=best_dim, val=val,
X_orig=X_orig))
buckets = new_buckets
if verbose > 1:
print("bucket counts: ", [buck.N for buck in buckets])
# print("loss from buckets: ",
# sum([bucket.loss for bucket in buckets]))
print("dim losses: ", losses)
if verbose > 2:
print("loss from sse computation: ",
losses[best_tried_dim_idx])
print("using dim, split_vals:", best_dim, use_split_vals)
# maybe return centroids in addition to set of MultiSplits and loss
loss = sum([bucket.loss for bucket in buckets])
if verbose > 0:
print("learn_multisplits(): returning loss: ", loss)
ret = [splits, loss]
if return_centroids:
centroids = np.vstack([buck.col_means() for buck in buckets])
assert centroids.shape == (len(buckets), X.shape[1])
ret.append(centroids)
# return splits, loss, centroids
if return_buckets:
# print("returning buckets!")
ret.append(buckets)
return tuple(ret)
@numba.njit(fastmath=True, cache=True)
def _XtX_encoded(X_enc, K=16):
N, C = X_enc.shape
D = C * K # note that this is total number of centroids, not orig D
out = np.zeros((D, D), np.int32)
# out = np.zeros((D, D), np.float32)
# D = int(C * K) # note that this is total number of centroids, not orig D
# out = np.zeros((D, D), np.int8)
for n in range(N):
for c in range(C):
code_left = X_enc[n, c]
dim_left = (K * c) + code_left
out[dim_left, dim_left] += 1
for cc in range(c + 1, C):
code_right = X_enc[n, cc]
dim_right = (K * cc) + code_right
out[dim_left, dim_right] += 1
# populate lower triangle
for d in range(D):
for dd in range(d + 1, D):
out[dd, d] = out[d, dd]
return out
@numba.njit(fastmath=True, cache=True)
def _XtY_encoded(X_enc, Y, K=16):
N, C = X_enc.shape
N, M = Y.shape
D = int(C * K) # note that this is total number of centroids, not orig D
out = np.zeros((D, M), Y.dtype)
for n in range(N):
for c in range(C):
code_left = X_enc[n, c]
dim_left = (K * c) + code_left
for m in range(M):
out[dim_left, m] += Y[n, m]
return out
@numba.njit(fastmath=True, cache=True)
def _XW_encoded(X_enc, W, K=16):
N, C = X_enc.shape
D, M = W.shape
out = np.zeros((N, M), W.dtype)
for n in range(N):
for c in range(C):
code_left = X_enc[n, c]
dim_left = (K * c) + code_left
for m in range(M):
out[n, m] += W[dim_left, m]
return out
@numba.njit(fastmath=True, cache=True)
def _densify_X_enc(X_enc, K=16):
N, C = X_enc.shape
D = C * K
out = np.zeros((N, D), np.int8)
for n in range(N):
for c in range(C):
code_left = X_enc[n, c]
dim_left = (K * c) + code_left
out[n, dim_left] = 1
return out
def _fit_ridge_enc(X_enc=None, Y=None, K=16, lamda=1, X_bin=None):
if X_bin is None:
X_bin = _densify_X_enc(X_enc, K=K)
est = linear_model.Ridge(fit_intercept=False, alpha=lamda)
est.fit(X_bin, Y)
return est.coef_.T
def encoded_lstsq(X_enc=None, X_bin=None, Y=None, K=16, XtX=None, XtY=None,
precondition=True, stable_ridge=True):
if stable_ridge:
return _fit_ridge_enc(X_enc=X_enc, Y=Y, X_bin=X_bin, K=K, lamda=1)
if XtX is None:
XtX = _XtX_encoded(X_enc, K=K).astype(np.float32)
lamda = 1 # TODO cross-validate to get lamda
# N = X_enc.shape[0]
# # lamda = N / (K * K)
# Y_bar = Y - Y.mean(axis=0)
# lamda = N * np.var(Y - Y.mean(axis=0)) / (K * K)
# # lamda = N * np.var(Y - Y.mean(axis=0)) / K
# lamda = N * np.var(Y) / K
# lamda = N * np.var(Y) / (K * K)
# # lamda = N * 1e4 # should shrink coeffs to almost 0
# # alpha = unscaled_alpha * np.var(X - X.mean(axis=0)) * N / D
# lamda = N / (1e5) # sorta works
# lamda = N / (1e4) # sorta works
lamda = max(1, lamda)
print("using lamda = ", lamda)
# lamda = max(1, len(X_enc) / 1e6)
# lamda = max(1, len(X_enc) / 1e5)
# lamda = max(1, len(X_enc) / 1e4)
# lamda = max(1, len(X_enc) / float(K * K))
# lamda = len(X_enc) / float(K)
# print("computing and regularizing XtX using lambda = ", lamda)
XtX += np.diag(np.ones(XtX.shape[0]) * lamda).astype(np.float32) # ridge
if XtY is None:
XtY = _XtY_encoded(X_enc, Y, K=K)
XtX = XtX.astype(np.float64)
XtY = XtY.astype(np.float64)
# preconditioning to avoid numerical issues (seemingly unnecessary, but
# might as well do it)
# scale = 1. / np.std(XtX)
if precondition:
# # pretend cols of X were scaled differently
# xscales = np.linalg.norm(XtX, axis=0) + 1e-20
# mulby = (1. / xscales)
# XtX *= mulby * mulby
# XtY *= mulby.reshape(-1, 1)
# yscales = np.linalg.norm(XtY, axis=1) + 1e-20
# yscales = np.linalg.norm(XtY, axis=0) + 1e-20
# yscales = yscales.reshape(-1, 1)
# xscales = np.mean(np.linalg.norm(XtX, axis=0))
# xscales = 7
# xscales = 1
# XtY *= (1. / yscales)
# XtY *= (1. / yscales.reshape(-1, 1))
# scale = 1. / len(X_enc)
scale = 1. / np.linalg.norm(XtX, axis=0).max()
XtX = XtX * scale
XtY = XtY * scale
# W = np.linalg.solve(XtX, XtY)
W, _, _, _ = np.linalg.lstsq(XtX, XtY, rcond=None) # doesn't fix it
# W, _, _, _ = np.linalg.lstsq(X_bin, Y, rcond=None)
# import torch
# import torch.nn.functional as F
# import torch.optim as optim
# def _to_np(A):
# return A.cpu().detach().numpy()
# niters = 10
# for it in range(niters):
# if precondition:
# pass
# # W *= xscales
# # W *= xscales.reshape(-1, 1)
# # W /= xscales.reshape(-1, 1)
# # W *= yscales.ravel()
# # W *= yscales
# W *= yscales # undo preconditioning
# import matplotlib.pyplot as plt
# _, axes = plt.subplots(2, 2, figsize=(13, 10))
# axes[0, 0].imshow(_densify_X_enc(X_enc[:1000]), interpolation='nearest')
# axes[0, 1].imshow(XtX, interpolation='nearest')
# axes[1, 0].imshow(XtY, interpolation='nearest', cmap='RdBu')
# axes[1, 1].imshow(W, interpolation='nearest', cmap='RdBu')
# # plt.colorbar()
# plt.tight_layout()
# plt.show()
# import sys; sys.exit()
return W
def _sparse_encoded_lstsq_gomp(X_enc, Y, nnz_blocks, K=16):
assert nnz_blocks >= 1
ncodebooks = X_enc.shape[1]
M = Y.shape[1]
# precompute XtX and XtY and create initial dense W
XtX = _XtX_encoded(X_enc, K=K).astype(np.float32)
XtX += np.diag(np.ones(XtX.shape[0])).astype(np.float32) # ridge
XtY = _XtY_encoded(X_enc, Y, K=K)
W = encoded_lstsq(X_enc, Y, XtX=XtX, XtY=XtY)
XtX = np.asfarray(XtX) # since we'll be slicing columns
keep_codebook_idxs = np.empty((M, nnz_blocks), dtype=np.int)
XtX_G = np.empty((ncodebooks, K * ncodebooks, K), dtype=np.float32)
for c in range(ncodebooks):
start_idx = c * K
end_idx = start_idx + K
# use_XtX = XtX[start_idx:end_idx][:, start_idx:end_idx]
use_XtX = XtX[:, start_idx:end_idx]
XtX_G[c], _ = np.linalg.qr(use_XtX) # KC x K
codebook_scores = np.zeros(ncodebooks)
for m in range(M): # fully solve one output col at a time
# xty = np.ascontiguousarray(XtY[:, m])
targets = np.copy(XtY[:, m])
residuals = targets
keep_codebooks = set()
w = np.copy(W[:, m])
pq_codebook_idx = int(m / float(M) * ncodebooks)
# print("---- m = ", m)
for b in range(nnz_blocks):
# targets_normed = targets
# score each codebook to pick new one to add
if b > 0:
for c in range(ncodebooks):
if c in keep_codebooks:
codebook_scores[c] = -np.inf
continue
X_G = XtX_G[c]
codebook_scores[c] = np.linalg.norm(X_G.T @ residuals)
keep_codebooks.add(np.argmax(codebook_scores))
else:
keep_codebooks.add(pq_codebook_idx) # seed with pq idx
# refit model using all the groups selected so far
keep_idxs = [np.arange(i * K, (i + 1) * K)
for i in sorted(list(keep_codebooks))]
keep_idxs = np.hstack(keep_idxs)
XtX_subs = XtX[keep_idxs][:, keep_idxs]
targets_subs = targets[keep_idxs]
w_subs = np.linalg.solve(XtX_subs, targets_subs)
# XtX_subs = XtX[:, keep_idxs]
# targets_subs = targets[keep_idxs]
# w_subs = np.linalg.solve(XtX_subs, targets)
# w_subs, resid, _, _ = np.linalg.lstsq(XtX_subs, targets)
w[:] = 0
w[keep_idxs] = w_subs
# resid_norm_sq = np.linalg.norm(residuals)**2
# print("resid norm sq: ", resid_norm_sq)
# print("lstsq mse: ", resid / resid_norm_sq)
# residuals = targets - (XtX_subs @ w_subs)
residuals = targets - (XtX[:, keep_idxs] @ w_subs)
# resid_norm_sq = np.linalg.norm(residuals)**2
# print("new resid norm sq: ", resid_norm_sq)
# targets = np.copy(XtY[:, m]) - (XtX @ w)
# update return arrays
keep_codebook_idxs[m] = np.array(list(keep_codebooks))
W[:, m] = w
return W, keep_codebook_idxs
# each codebook has const number of nonzero idxs
def _sparse_encoded_lstsq_elim_v2(X_enc, Y, nnz_per_centroid, K=16,
# uniform_sparsity=False): # never better
uniform_sparsity=True, pq_perm_algo='start',
stable_ridge=True):
ncodebooks = X_enc.shape[1]
M = Y.shape[1]
nnz_per_centroid = min(M, int(nnz_per_centroid))
nnz_per_centroid = max(1, nnz_per_centroid)
assert nnz_per_centroid >= int(np.ceil(M / ncodebooks))
assert nnz_per_centroid <= M
X_bin = _densify_X_enc(X_enc, K=K)
if not stable_ridge:
# precompute XtX and XtY and create initial dense W
XtX = _XtX_encoded(X_enc, K=K).astype(np.float32)
lamda = 1
# # alpha = unscaled_alpha * np.var(X - X.mean(axis=0)) * N / D
# # lamda = np.sqrt(ncodebooks)
# N = XtX.shape[0]
# lamda = N / (K * K)
# lamda = max(1, lamda)
# print("using lamda = ", lamda)
# lamda = max(1, len(X_enc) / 1e4)
# lamda = max(1, len(X_enc) / float(K * K))
XtX += np.diag(np.ones(XtX.shape[0]) * lamda).astype(np.float32) # ridge
# XtX += np.diag(np.ones(XtX.shape[0])).astype(np.float32) # ridge
XtY = _XtY_encoded(X_enc, Y, K=K)
# scale = 1. / len(X_enc)
scale = 1. / np.linalg.norm(XtX, axis=0).max()
XtX = XtX * scale
XtY = XtY * scale
W = encoded_lstsq(X_bin=X_bin, Y=Y, XtX=XtX, XtY=XtY, precondition=False,
stable_ridge=stable_ridge) # KC x M
XtX = np.asfarray(XtX) # since we'll be slicing columns
else: # stable_ridge is True
W = encoded_lstsq(X_bin=X_bin, Y=Y, stable_ridge=stable_ridge)
# score all blocks of W
all_scores = np.empty((ncodebooks, M), dtype=np.float) # C x M
for c in range(ncodebooks):
Xc = X_enc[:, c].reshape(-1, 1)
start_idx = c * K
end_idx = start_idx + K
Wc = W[start_idx:end_idx]
Yc = _XtY_encoded(Xc, Wc, K=K) # N x M
all_scores[c] = np.linalg.norm(Yc, axis=0)
# pq_idxs = _pq_codebook_start_end_idxs(M, ncodebooks)
pq_idxs = _pq_codebook_start_end_idxs(Y, ncodebooks, algo=pq_perm_algo)
# now pick which cols to keep in each codebook
keep_mask = np.zeros((ncodebooks, M), dtype=np.bool)
# subvec_len = int(np.ceil(M / ncodebooks))
for c in range(ncodebooks):
# initialize with PQ
start_idx, end_idx = pq_idxs[c]
keep_mask[c, start_idx:end_idx] = 1
subvec_len = end_idx - start_idx
assert subvec_len >= 1
keep_nidxs_extra = nnz_per_centroid - subvec_len
scores = all_scores[c]
scores[start_idx:end_idx] = 0
if uniform_sparsity and keep_nidxs_extra > 0:
# take as many other (best) nonzero idxs as we we're allowed to
assert len(scores) >= keep_nidxs_extra
best_idxs = np.argsort(scores)[-keep_nidxs_extra:]
if len(best_idxs) != keep_nidxs_extra:
print("len(best_idxs)", len(best_idxs))
print("keep_nidxs_extra", keep_nidxs_extra)
assert len(best_idxs) == keep_nidxs_extra
keep_mask[c, best_idxs] = True
if not uniform_sparsity:
scores = all_scores.ravel()
nkept_idxs = M # number of nonzeros used already
keep_nidxs_total = nnz_per_centroid * ncodebooks
keep_nidxs_extra = keep_nidxs_total - nkept_idxs
keep_idxs = np.argsort(scores)[-keep_nidxs_extra:]
flat_mask = keep_mask.ravel()
flat_mask[keep_idxs] = 1
keep_mask = flat_mask.reshape(keep_mask.shape)
# at this point, we have the mask for which cols of each centroid to keep;
# now we just need to go from a mask to a set of indices and a sparse
# matrix of centroids
W_sparse = np.empty((ncodebooks * K, M), dtype=np.float32)
if uniform_sparsity:
ret_idxs = np.empty((ncodebooks, nnz_per_centroid), dtype=np.int)
else:
ret_idxs = []
# else:
# ret_idxs = np.zeros((ncodebooks, M), dtype=np.int) - 1
for c in range(ncodebooks):
idxs = np.where(keep_mask[c] != 0)[0]
if uniform_sparsity:
if len(idxs) != nnz_per_centroid:
print("c: ", c)
print("len(idxs): ", len(idxs))
print("nnz_per_centroid: ", nnz_per_centroid)
print("keep_mask counts:", keep_mask.sum(axis=1))
assert len(idxs) == nnz_per_centroid
ret_idxs[c] = idxs
else:
ret_idxs.append(idxs)
zero_idxs = np.where(keep_mask[c] == 0)[0]
start_idx = c * K
end_idx = start_idx + K
Wc = W[start_idx:end_idx]
Wc[:, zero_idxs] = 0
W_sparse[start_idx:end_idx] = Wc
# now refit W_sparse to each output col; right now it's just the original
# W with a bunch of entries zeroed
for m in range(M):
w = W_sparse[:, m]
keep_idxs = np.where(w != 0)[0]
if stable_ridge:
X_bin_subs = X_bin[:, keep_idxs]
w_subs = _fit_ridge_enc(X_bin=X_bin_subs, Y=Y[:, m])
else:
xty = XtY[:, m]
use_XtX = XtX[keep_idxs][:, keep_idxs]
use_xty = xty[keep_idxs]
w_subs = np.linalg.solve(use_XtX, use_xty)
w[:] = 0
w[keep_idxs] = w_subs
W_sparse[:, m] = w
# nnzs = [len(idxs) for idxs in ret_idxs]
# print("nnzs: ", nnzs)
# print(f"returning {ret_idxs.shape[1]} nonzeros per centroid...")
return W_sparse, ret_idxs
def _sparse_encoded_lstsq_backward_elim(X_enc, Y, nnz_blocks, K=16):
ncodebooks = X_enc.shape[1]
eliminate_nblocks = ncodebooks - nnz_blocks
M = Y.shape[1]
# precompute XtX and XtY and create initial dense W
XtX = _XtX_encoded(X_enc, K=K).astype(np.float32)
XtX += np.diag(np.ones(XtX.shape[0])).astype(np.float32) # ridge
XtY = _XtY_encoded(X_enc, Y, K=K)
W = encoded_lstsq(X_enc, Y, XtX=XtX, XtY=XtY)
XtX = np.asfarray(XtX) # since we'll be slicing columns
keep_codebook_idxs = np.empty((M, nnz_blocks), dtype=np.int)
codebook_scores = np.zeros(ncodebooks)
for m in range(M): # fully solve one output col at a time
xty = np.ascontiguousarray(XtY[:, m])
rm_codebook_idxs = set()
w = np.copy(W[:, m])
for b in range(eliminate_nblocks):
# evaluate contribution of each codebook
for c in range(ncodebooks):
# if c in rm_codebook_idxs or c == pq_codebook_idx:
if c in rm_codebook_idxs:
codebook_scores[c] = np.inf
continue
start_idx = c * K
end_idx = start_idx + K
# XtX_subs = XtX[:, start_idx:end_idx] # CK x K
# w_subs = w[start_idx:end_idx] # K
# xtyhat_subs = XtX_subs @ w_subs # CK x 1
# codebook_scores[c] = np.linalg.norm(xtyhat_subs)
XtX_subs = XtX[start_idx:end_idx][:, start_idx:end_idx]
w_subs = w[start_idx:end_idx] # K
xtyhat_subs = XtX_subs @ w_subs # K x 1
codebook_scores[c] = np.linalg.norm(xtyhat_subs)
# rm least helpful codebook and refit the least squares
rm_codebook_idxs.add(np.argmin(codebook_scores))
keep_codebooks = [i for i in range(ncodebooks)
if i not in rm_codebook_idxs]
keep_idxs = [np.arange(i * K, (i + 1) * K)
for i in keep_codebooks]
keep_idxs = np.hstack(keep_idxs)
use_XtX = XtX[keep_idxs][:, keep_idxs]
use_xty = xty[keep_idxs]
w_subs = np.linalg.solve(use_XtX, use_xty)
# print("w shape: ", w.shape)
# print("rm codebooks: ", rm_codebook_idxs)
# print("keep codebooks: ", keep_codebooks)
# print("keep idxs: ", keep_idxs)
# print("type(keep idxs): ", type(keep_idxs))
# print("w[keep idxs]: ", w[keep_idxs])
# print("resid: ", resid)
w[:] = 0
w[keep_idxs] = w_subs
# update return arrays
keep_idxs = [i for i in range(ncodebooks) if i not in rm_codebook_idxs]
keep_codebook_idxs[m] = np.array(keep_codebooks)
W[:, m] = w
return W, keep_codebook_idxs # CK x M, M x nnz
def sparse_encoded_lstsq(X_enc, Y, K=16, nnz_blocks=-1, **kwargs):
ncodebooks = X_enc.shape[1]
if nnz_blocks < 1:
# nnz_per_centroid = Y.shape[1]
# default to returning dense centroids
W = encoded_lstsq(X_enc, Y, K=16)
ncodebooks = X_enc.shape[1]
M = Y.shape[1]
keep_codebook_idxs = np.empty((ncodebooks, M), dtype=np.int)
all_idxs = np.arange(M)
for c in range(ncodebooks):
keep_codebook_idxs[c] = all_idxs
return W, keep_codebook_idxs
else:
nnz_per_centroid = int(nnz_blocks * Y.shape[1] / ncodebooks)
# nnz_blocks = int(np.sqrt(ncodebooks) + .5)
# return _sparse_encoded_lstsq_backward_elim(
# X_enc, Y, nnz_blocks=nnz_blocks, K=K)
# return _sparse_encoded_lstsq_gomp(X_enc, Y, nnz_blocks=nnz_blocks, K=K)
# print("nnz_per_centroid: ", nnz_per_centroid)
return _sparse_encoded_lstsq_elim_v2(
X_enc, Y, nnz_per_centroid=nnz_per_centroid, K=K, **kwargs)
# def _pq_codebook_start_end_idxs(D, ncodebooks):
def _pq_codebook_start_end_idxs(X, ncodebooks, algo='start'):
assert algo in ('start', 'end') # TODO do something smarter here
# D = int(D)
_, D = X.shape
ncodebooks = int(ncodebooks)
assert D >= ncodebooks
idxs = np.empty((ncodebooks, 2), dtype=np.int)
full_subvec_len = D // ncodebooks
start_idx = 0
for c in range(ncodebooks):
subvec_len = full_subvec_len
if algo == 'start': # wider codebooks at the start
if c < (D % ncodebooks):
subvec_len += 1
elif algo == 'end': # wider codebooks at the end
if (ncodebooks - c - 1) < (D % ncodebooks):
subvec_len += 1
end_idx = min(D, start_idx + subvec_len)
# print("c, start_idx, end_idx: ", c, start_idx, end_idx)
# print("start_idx, end_idx: ", c, start_idx, end_idx)
idxs[c, 0] = start_idx
idxs[c, 1] = end_idx
start_idx = end_idx
assert idxs[0, 0] == 0
assert idxs[-1, -1] == D
return idxs
@_memory.cache
def _learn_mithral_initialization(X, ncodebooks,
pq_perm_algo='start', **kwargs):
N, D = X.shape
ncentroids_per_codebook = 16
X = X.astype(np.float32)
X_res = X.copy()
X_orig = X
all_centroids = np.zeros(
(ncodebooks, ncentroids_per_codebook, D), dtype=np.float32)
all_splits = []
pq_idxs = _pq_codebook_start_end_idxs(X, ncodebooks, algo=pq_perm_algo)
subvec_len = int(np.ceil(D / ncodebooks)) # for non-pq heuristics
nonzeros_heuristic = 'pq'
# ------------------------ 0th iteration; initialize all codebooks
all_splits = []
all_buckets = []
for c in range(ncodebooks):
if nonzeros_heuristic == 'pq':
start_idx, end_idx = pq_idxs[c]
idxs = np.arange(start_idx, end_idx)
elif nonzeros_heuristic == 'pca':
v = subs.top_principal_component(X_res)
idxs = np.argsort(np.abs(v))[:-subvec_len]
elif nonzeros_heuristic == 'disjoint_pca':
use_X_res = X_res.copy()
if c > 0: # not the first codebook
use_X_res[:, idxs] = 0 # can't use same subspace
v = subs.top_principal_component(use_X_res)
idxs = np.argsort(np.abs(v))[:-subvec_len]
use_X_res = X_res[:, idxs]
use_X_orig = X_orig[:, idxs]
# learn codebook to soak current residuals
multisplits, _, buckets = learn_multisplits(
use_X_res, X_orig=use_X_orig,
return_centroids=False, return_buckets=True, **kwargs)
for split in multisplits:
split.dim = idxs[split.dim]
all_splits.append(multisplits)
all_buckets.append(buckets)
# update residuals and store centroids
centroid = np.zeros(D, dtype=np.float32)
for b, buck in enumerate(buckets):
if len(buck.point_ids):
centroid[:] = 0
centroid[idxs] = buck.col_means()
X_res[buck.point_ids] -= centroid
# update centroid here in case we want to regularize it somehow
all_centroids[c, b] = centroid
# print("X_res mse / X mse: ",
# (X_res * X_res).mean() / (X_orig * X_orig).mean())
return X_res, all_splits, all_centroids, all_buckets
@_memory.cache
def learn_mithral(X, ncodebooks, return_buckets=False,
lut_work_const=-1, **kwargs):
N, D = X.shape
ncentroids_per_codebook = 16
X_orig = X.astype(np.float32)
X_res0, all_splits0, all_centroids0, all_buckets0 = \
_learn_mithral_initialization(X, ncodebooks, pq_perm_algo='start')
mse_orig = (X_orig * X_orig).mean()
mse0 = (X_res0 * X_res0).mean()
print("X_res mse / X mse: ", mse0 / mse_orig)
used_perm_algo = 'start'
if False:
# choose between having wider codebooks at the start vs the end (if
# there might be a meaningful difference)
X_res1, all_splits1, all_centroids1, all_buckets1 = \
_learn_mithral_initialization(X, ncodebooks, pq_perm_algo='end')
mse1 = (X_res1 * X_res1).mean()
if mse0 <= mse1:
X_res, all_splits, all_centroids, all_buckets = (
X_res0, all_splits0, all_centroids0, all_buckets0)
else:
X_res, all_splits, all_centroids, all_buckets = (
X_res1, all_splits1, all_centroids1, all_buckets1)
used_perm_algo = 'end'
print("X_res1 mse / X mse: ", mse1 / mse_orig)
else:
X_res, all_splits, all_centroids, all_buckets = (
X_res0, all_splits0, all_centroids0, all_buckets0)
# optimize centroids discriminatively conditioned on assignments
X_enc = mithral_encode(X, all_splits)
if lut_work_const != 1: # if it's 1, equivalent to just doing PQ
#
# shrink W towards 0
#
# if lut_work_const < 0:
# W = encoded_lstsq(X_enc, X)
# else:
# W, nonzero_blocks = sparse_encoded_lstsq(
# X_enc, X, nnz_blocks=lut_work_const)
#
# shrink W towards initial centroids
#
if lut_work_const < 0:
print("fitting dense lstsq to X_res")
W = encoded_lstsq(X_enc=X_enc, Y=X_res)
else:
W, _ = sparse_encoded_lstsq(
X_enc, X_res, nnz_blocks=lut_work_const,
pq_perm_algo=used_perm_algo)
all_centroids_delta = W.reshape(ncodebooks, ncentroids_per_codebook, D)
all_centroids += all_centroids_delta
# check how much improvement we got
X_res -= _XW_encoded(X_enc, W) # if we fit to X_res
mse_res = (X_res * X_res).mean()
print("X_res mse / X mse after lstsq: ", mse_res / mse_orig)
# print("min, median, max, std, of all centroids after lstsq:\n",
# all_centroids.min(), np.median(all_centroids),
# all_centroids.max(), all_centroids.std())
if return_buckets:
return all_splits, all_centroids, all_buckets
return all_splits, all_centroids
def learn_mithral_v1(X, ncodebooks, niters=1, return_buckets=False, **kwargs):
# print("called learn_mithral!"); import sys; sys.exit()
N, D = X.shape
ncentroids_per_codebook = 16
X = X.astype(np.float32)
X_res = X.copy()
X_orig = X
X_hat = np.zeros_like(X)
all_centroids = np.zeros(
(ncodebooks, ncentroids_per_codebook, D), dtype=np.float32)
all_splits = []
subvec_len = int(np.ceil(D / ncodebooks))
# use_X_res = np.zeros_like(X_res)
# TODO multiple iters; also store assignments from each codebook, so
# that we can undo effect of its X_hat (can't store X_hat directly for
# large data, so recompute on the fly using assignments and centroids)
nonzeros_heuristic = 'pq'
# nonzeros_heuristic = 'pca'
# nonzeros_heuristic = 'disjoint_pca'
# TODO store assignments (or maybe just buckets directly)
# TODO update just centroids (not assignments) at iter end
# ------------------------ 0th iteration; initialize all codebooks
all_splits = []
all_buckets = []
for c in range(ncodebooks):
if nonzeros_heuristic == 'pq':
start_idx = c * subvec_len
end_idx = min(D, start_idx + subvec_len)
idxs = np.arange(start_idx, end_idx)
elif nonzeros_heuristic == 'pca':
v = subs.top_principal_component(X_res)
idxs = np.argsort(np.abs(v))[:-subvec_len]
elif nonzeros_heuristic == 'disjoint_pca':
use_X_res = X_res.copy()
if c > 0: # not the first codebook
use_X_res[:, idxs] = 0 # can't use same subspace
v = subs.top_principal_component(use_X_res)
idxs = np.argsort(np.abs(v))[:-subvec_len]
use_X_res = X_res[:, idxs]
use_X_orig = X_orig[:, idxs]
# learn codebook to soak current residuals
multisplits, _, buckets = learn_multisplits(
use_X_res, X_orig=use_X_orig,
return_centroids=False, return_buckets=True, **kwargs)
for split in multisplits:
split.dim = idxs[split.dim]
all_splits.append(multisplits)
all_buckets.append(buckets)
# use_X_res[:, start_idx:end_idx] = 0
# use_X_res[:] = 0
# update residuals and store centroids
centroid = np.zeros(D, dtype=np.float32)
for b, buck in enumerate(buckets):
if len(buck.point_ids):
centroid[:] = 0
centroid[idxs] = buck.col_means()
# centroid /= 2 # TODO rm
X_hat[buck.point_ids] = centroid
# update centroid here in case we want to regularize it somehow
all_centroids[c, b] = centroid
X_res -= X_hat
print("X res var / X var: ", X_res.var() / X_orig.var())
# ------------------------ remaining iters
for t in range(niters):
# now update centroids given assignments and all other centroids
# for _ in range(5):
# for _ in range(20):
for _ in range(10):
for c in range(ncodebooks):
# print("c: ", c)
# undo effect of this codebook
buckets = all_buckets[c]
for b, buck in enumerate(buckets):
if len(buck.point_ids):
X_hat[buck.point_ids] = all_centroids[c, b]
X_res += X_hat
# update centroids based on residuals given all other codebooks
for b, buck in enumerate(buckets):
if len(buck.point_ids):
centroid = X_res[buck.point_ids].mean(axis=0)
# keep_ndims = D // 2
# zero_idxs = np.argsort(np.abs(centroid))[:-keep_ndims]
# centroid[zero_idxs] = 0
# true_centroid = X_res[buck.point_ids].mean(axis=0)
# old_centroid = all_centroids[c, b]
# centroid = (true_centroid + old_centroid) / 2
X_hat[buck.point_ids] = centroid
all_centroids[c, b] = centroid
X_res -= X_hat
print("X res var / X var after centroid updates: ",
X_res.var() / X_orig.var())
# now update assignments
if t == niters - 1:
break # end after updating centroids, not assignments
for c in range(ncodebooks):
# print("c: ", c)
# undo effect of this codebook
buckets = all_buckets[c]
# orig_loss = sum([buck.loss for buck in buckets])
orig_loss = np.sum(X_res * X_res)
for b, buck in enumerate(buckets):
if len(buck.point_ids):
X_hat[buck.point_ids] = all_centroids[c, b]
X_res += X_hat
multisplits, loss, buckets = learn_multisplits(
X_res, X_orig=X_orig,
return_centroids=False, return_buckets=True, **kwargs)
print("orig loss, loss: ", orig_loss, loss)
if loss > orig_loss:
X_res -= X_hat
continue
all_splits[c] = multisplits
all_buckets[c] = buckets
# update residuals and store centroids
# centroid = np.zeros(D, dtype=np.float32)
for b, buck in enumerate(buckets):
if len(buck.point_ids):
centroid = buck.col_means()
# centroid /= 2 # TODO rm
X_hat[buck.point_ids] = centroid
# update centroid here in case we want to regularize it somehow
all_centroids[c, b] = centroid
X_res -= X_hat
print("new X res var / X var: ", X_res.var() / X_orig.var())
if return_buckets:
return all_splits, all_centroids, all_buckets
return all_splits, all_centroids
def mithral_encode(X, multisplits_lists):
N, D = X.shape
ncodebooks = len(multisplits_lists)
X_enc = np.empty((N, ncodebooks), dtype=np.int, order='f')
for c in range(ncodebooks):
X_enc[:, c] = assignments_from_multisplits(X, multisplits_lists[c])
return np.ascontiguousarray(X_enc)
def mithral_lut(q, all_centroids):
q = q.reshape(1, 1, -1) # all_centroids is shape ncodebooks, ncentroids, D
return (q * all_centroids).sum(axis=2) # ncodebooks, ncentroids
def learn_splits_greedy(X, nsplits, verbose=2):
N, D = X.shape
assert nsplits <= D
# # improve numerical stability
# scale = np.std(X)
# X *= (1. / scale)
# precompute sorted lists of values within each dimension,
# along with which row they originally were so look can look
# up the whole vector (and bucket) associated with each value
dim2sorted = []
for dim in range(D):
sorted_with_idx = _sort_and_append_orig_idx(X[:, dim])
dim2sorted.append(sorted_with_idx)
splits = []
# buckets = [Bucket(N=N, sumX=X.sum(axis=0), sumX2=(X * X).sum(axis=0),
buckets = [Bucket(sumX=X.sum(axis=0), sumX2=(X * X).sum(axis=0),
point_ids=np.arange(N))]
# all_point_infos = [PointInfo(data=row, bucket_id=0) for row in X]
bucket_assignments = np.zeros(N, dtype=np.int)
# Z = X - X.mean(axis=0)
# total_loss = np.sum(Z * Z)
# print("initial SSE: ", total_loss)
total_loss = sum([bucket.loss for bucket in buckets])
if verbose > 0:
print("learn_splits(): initial loss: ", total_loss)
# unused_dims = set(np.arange(X.shape[1]))
# all_dims = np.arange(D)
col_losses = np.zeros(D, dtype=np.float32) # TODO rm?
for s in range(nsplits):
if verbose > 1:
print("================================ finding split #:", s)
best_split = Split(dim=-1, val=-np.inf, loss_change=0)
# for d in unused_dims:
# for d in all_dims:
# for d in all_dims[:2]: # TODO rm
col_losses[:] = 0
for buck in buckets:
col_losses += buck.col_sum_sqs()
# try_dims = [np.argmax(col_losses)]
# try_dims = np.argsort(col_losses)[-nsplits:]
try_dims = np.argsort(col_losses)[-4:]
# for d in [dim]: # TODO multiple dim options?
if verbose > 1:
print("trying dims: ", try_dims)
print("with losses: ", col_losses[try_dims])
for d in try_dims:
vals_and_point_ids = dim2sorted[d]
new_buckets = _split_existing_buckets(buckets)
new_total_loss = total_loss
if verbose > 2:
print("---------------------- dim = ", d)
# for i, (val, point_id) in enumerate(vals_and_point_ids):
# skip last point since that just puts everything in one bucket,
# which is the same as what we're starting out with
for val, point_id in vals_and_point_ids[:-1]:
# if verbose > 1:
# print("i: {}/{}".format(i, len(vals_and_point_ids) - 1))
# info = all_point_infos[point_id]
# point, bucket_id = info.data, info.bucket_id
point = X[point_id]
bucket_id = bucket_assignments[point_id]
bucket0 = new_buckets[bucket_id][0]
bucket1 = new_buckets[bucket_id][1]
old_loss = bucket0.loss + bucket1.loss
bucket0.remove_point(point, point_id=point_id)
bucket1.add_point(point, point_id=point_id)
new_loss = bucket0.loss + bucket1.loss
new_total_loss -= old_loss # sub old loss from these buckets
new_total_loss += new_loss # add new loss from these buckets
loss_change = new_total_loss - total_loss
# if loss_change > .1: # should be nonincreasing
# print("got loss change: ", loss_change)
# print("old total loss:", total_loss)
# print("new total loss:", new_total_loss)
# assert loss_change <= .1 # should be nonincreasing
# # loss should be no worse than having new buckets unused
# assert loss_change <= .1
# if verbose > 2:
# print("-------- split point_id, val = ", point_id, val)
# print("bucket0 point ids, loss after update: ",
# bucket0.point_ids, bucket0.loss)
# print("bucket1 point ids, loss after update: ",
# bucket1.point_ids, bucket1.loss)
# print("loss change = {:.3f};\tnew_loss = {:.3f} ".format(
# loss_change, new_total_loss))
if loss_change < best_split.loss_change:
best_split.dim = d
best_split.val = val
best_split.loss_change = loss_change
if verbose > 2:
print("---------------------- split on dim={}, val={:.3f} ".format(
best_split.dim, best_split.val))
buckets = [buck.split(X, dim=best_split.dim, val=best_split.val)
for buck in buckets]
buckets = reduce(lambda b1, b2: b1 + b2, buckets) # flatten pairs
for i, buck in enumerate(buckets):
ids = np.asarray(list(buck.point_ids), dtype=np.int)
bucket_assignments[ids] = i
total_loss = sum([bucket.loss for bucket in buckets])
# unused_dims.remove(best_split.dim)
splits.append(best_split)
if verbose > 3:
print('learn_splits(): new loss: {:.3f} from split at dim {}, '
'value {:.3f}'.format(
total_loss, best_split.dim, best_split.val))
if verbose > 2:
print('bucket losses: ')
print([bucket.loss for bucket in buckets])
print('bucket N, sumX, sumX2')
print([bucket.N for bucket in buckets])
print([list(bucket.sumX) for bucket in buckets])
print([list(bucket.sumX2) for bucket in buckets])
# for split in splits:
# split.val *= scale # undo preconditioning
# total_loss *= scale * scale
return splits, total_loss
def learn_splits_conditional(X, nsplits, dim_algo='greedy_var',
split_algo='mean', **sink):
N, D = X.shape
assert nsplits <= D
# unused_dims = set(np.arange(X.shape[1]))
col_means = X.mean(axis=0)
# dims = np.arange(D)
used_mask = np.ones(D, dtype=np.float32)
splits = []
buckets = [Bucket(sumX=X.sum(axis=0), sumX2=(X * X).sum(axis=0),
point_ids=np.arange(N))]
col_losses = np.zeros(D, dtype=np.float32)
for s in range(nsplits):
print("---- learning split {}/{}...".format(s + 1, nsplits))
print("current number of buckets: ", len(buckets))
# col_vars = X.var(axis=0)
col_losses[:] = 0
for buck in buckets:
col_losses += buck.col_sum_sqs()
col_losses *= used_mask
if dim_algo == 'greedy_var':
dim = np.argmax(col_losses)
used_mask[dim] = 0
if split_algo == 'mean':
val = col_means[dim]
new_buckets = []
for buck in buckets:
new_buckets += list(buck.split(X=X, dim=dim, val=val))
buckets = new_buckets
splits.append(Split(dim=dim, val=val))
return splits, -1
# def learn_splits_simple(X, nsplits, dim_algo='randunif', split_algo='mean',
# def learn_splits_simple(X, nsplits, dim_algo='greedy_var', split_algo='median',
def learn_splits_simple(X, nsplits, dim_algo='greedy_var', split_algo='mean',
**sink):
# unused_dims = set(np.arange(X.shape[1]))
unused_dims = list(np.arange(X.shape[1])) # random.choice can't use set
col_means = X.mean(axis=0)
col_vars = X.var(axis=0)
col_medians = np.median(X, axis=0)
# overall_mean = np.mean(col_means)
# overall_median = np.median(col_medians)
# overall_var = X.var()
var_idxs_descending = np.argsort(col_vars)[::-1]
splits = []
for s in range(nsplits):
if dim_algo == 'randunif':
dim = np.random.choice(unused_dims)
unused_dims.remove(dim)
elif dim_algo == 'greedy_var':
dim = var_idxs_descending[s]
if split_algo == 'mean':
val = col_means[dim]
elif split_algo == 'median':
val = col_medians[dim]
splits.append(Split(dim=dim, val=val))
return splits, -1
def learn_splits(X, nsplits, return_centroids=True, algo='multisplits',
**kwargs):
# indirect to particular func; will likely need to try something simpler
# for debugging and/or as experimental control
# return learn_splits_greedy(X, nsplits, **kwargs)
# return learn_splits_simple(X, nsplits, **kwargs)
# return learn_splits_conditional(X, nsplits, **kwargs)
# return learn_splits_greedy(X, nsplits) # TODO fwd kwargs
if algo == 'multisplits':
return learn_multisplits(
X, nsplits, return_centroids=return_centroids)
if algo == 'splits':
splits, loss = learn_splits_greedy(X, nsplits)
if return_centroids:
centroids = centroids_from_splits(X, splits)
return splits, loss, centroids
return splits, loss
def assignments_from_splits(X, splits):
nsplits = len(splits)
indicators = np.empty((nsplits, len(X)), dtype=np.int)
for i, split in enumerate(splits):
indicators[i] = X[:, split.dim] > split.val
# compute assignments by treating indicators in a row as a binary num
# scales = (2 ** np.arange(nsplits)).astype(np.int)
scales = (1 << np.arange(nsplits)).astype(np.int)
return (indicators.T * scales).sum(axis=1).astype(np.int)
def assignments_from_multisplits(X, splits):
N, _ = X.shape
nsplits = len(splits)
# indicators = np.zeros((nsplits, len(X)), dtype=np.int)
assert len(splits) >= 1
# dim0 = splits[0].dim
# assert len(splits[0].vals) == 1 # only 1 initial split
# indicators[0] = X > splits[0].vals[0]
max_ngroups = len(splits[-1].vals)
nsplits_affecting_group_id = int(np.log2(max_ngroups))
assert 1 << nsplits_affecting_group_id == max_ngroups # power of 2
# np.log2(max_nsplits)
# determine group ids for each point; this is the one that's annoying
# because the number of bits changes after split
group_ids = np.zeros(N, dtype=np.int)
for i in range(min(nsplits, nsplits_affecting_group_id)):
split = splits[i]
vals = split.vals[group_ids]
# x = X[:, split.dim]
# if split.offset is not None:
# x = x - split.offset
# if split.scaleby is not None:
# x = x * split.scaleby
# indicators = x > vals
indicators = split.preprocess_x(X[:, split.dim]) > vals
group_ids = (group_ids * 2) + indicators
if nsplits <= nsplits_affecting_group_id:
return group_ids
# compute remaining bits
assignments = np.copy(group_ids)
for i in range(nsplits_affecting_group_id, nsplits):
split = splits[i]
vals = split.vals[group_ids]
# x = X[:, split.dim]
# if split.offset is not None:
# x = x - split.offset
# if split.scaleby is not None:
# x = x * split.scaleby
# indicators = x > vals
indicators = split.preprocess_x(X[:, split.dim]) > vals
assignments = (assignments * 2) + indicators
return assignments
def _centroids_from_assignments(X, assignments, ncentroids):
centroids = np.empty((ncentroids, X.shape[1]), dtype=X.dtype)
for c in range(ncentroids):
centroids[c] = X[assignments == c].mean(axis=0)
return centroids
def centroids_from_splits(X, splits):
ncentroids = int(1 << len(splits))
assignments = assignments_from_splits(X, splits)
return _centroids_from_assignments(X, assignments, ncentroids=ncentroids)
@_memory.cache
def learn_splits_in_subspaces(X, subvect_len, nsplits_per_subs,
return_centroids=True, algo='multisplits',
verbose=2):
N, D = X.shape
# N /= 100 # TODO rm after debug
splits_lists = []
nsubs = int(np.ceil(D) / subvect_len)
# stuff for sse stats
tot_sse = 0
X_bar = X - np.mean(X, axis=0)
col_sses = np.sum(X_bar * X_bar, axis=0) + 1e-14
tot_sse_using_mean = np.sum(col_sses)
if verbose > 1:
print("original sum of sses within each col: ", tot_sse_using_mean)
if return_centroids:
ncentroids = int(2 ** nsplits_per_subs)
# this order seems weird, but matches _learn_centroids, etc; helps with
# eventual vectorized lookups
centroids = np.empty((ncentroids, nsubs, subvect_len), dtype=X.dtype)
for m in range(nsubs):
start_col = m * subvect_len
end_col = start_col + subvect_len
X_subs = X[:, start_col:end_col]
splits, sse, subs_centroids = learn_splits(
X_subs, nsplits=nsplits_per_subs, verbose=(verbose - 1),
return_centroids=True, algo=algo)
centroids[:, m, :] = subs_centroids
splits_lists.append(splits)
tot_sse += sse
if verbose > 1:
# print("col sses in subspace: ", col_sses[start_col:end_col])
# print("sum col sses in subspace: ", col_sses[start_col:end_col].sum())
# print("buckets claim sse:", sse)
# print("N: ", N)
# print("(sse / N)", (sse / N))
# print("np.var(X_subs)", np.var(X_subs))
orig_sse_in_subs = col_sses[start_col:end_col].sum()
# print("learning splits: mse / var(X) in subs {}/{} = {:3g}".format(
# m + 1, nsubs, (sse / N) / np.var(X_subs)))
print("learning splits: sse / orig sse in subs {}/{} = {:3g}".format(
m + 1, nsubs, sse / orig_sse_in_subs))
# import sys; sys.exit()
# print("exiting after one subspace")
# import sys; sys.exit()
if verbose > 0:
print("-- learn_splits_in_subspaces: new / orig mse: {:.3g}".format(
tot_sse / tot_sse_using_mean))
# print("tot_sse_using_mean: ", tot_sse_using_mean)
if return_centroids:
return splits_lists, centroids
return splits_lists
def encode_using_splits(X, subvect_len, splits_lists, split_type='single'):
N, D = X.shape
nsubs = int(np.ceil(D) / subvect_len)
X_enc = np.empty((X.shape[0], nsubs), dtype=np.int, order='f')
for m in range(nsubs):
start_col = m * subvect_len
end_col = start_col + subvect_len
X_subs = X[:, start_col:end_col]
if split_type == 'single':
X_enc[:, m] = assignments_from_splits(X_subs, splits_lists[m])
elif split_type == 'multi':
X_enc[:, m] = assignments_from_multisplits(X_subs, splits_lists[m])
return np.ascontiguousarray(X_enc)
def _plot_stuff_on_trace():
import matplotlib as mpl
import matplotlib.pyplot as plt
from joblib import Memory
_memory = Memory('.', verbose=0)
mpl.rcParams['lines.linewidth'] = .5
@_memory.cache
def _load_trace():
return np.loadtxt('assets/debug/Trace/Trace_TRAIN.txt')
# try_ndims = 128
# try_ndims = 64
try_ndims = 4
# limit_n = 20
# limit_n = 50
# limit_n = 200
limit_n = 500
# X = np.loadtxt('assets/debug/Trace/Trace_TRAIN.txt')[:limit_n]
X = _load_trace()[:limit_n]
y = (X[:, 0] - 1).astype(np.int)
X = X[:, 1:]
_, axes = plt.subplots(3, 4, figsize=(13, 9), sharey=True)
colors = ('blue', 'red', 'green', 'black')
axes[0, 0].set_title('Trace Dataset\n(colored by class)')
for lbl in np.unique(y):
X_subset = X[y == lbl]
axes[0, 0].plot(X_subset.T, color=colors[lbl])
# visualize output with only 1 codebook (no need for updates)
ncodebooks = 1
splits, centroids, buckets = learn_mithral(
X, ncodebooks, return_buckets=True, try_ndims=try_ndims, niters=1)
centroids = centroids[0] # only one codebook
axes[0, 1].set_title('centroids')
axes[0, 1].plot(centroids.T)
X_hat = np.zeros_like(X)
for c, splitlist in enumerate(splits):
for s, split in enumerate(splitlist):
assert len(splitlist) == 4
vals = (split.vals / split.scaleby) + split.offset
for val in vals:
axes[0, c].scatter(split.dim, val, color=colors[s], marker='o', zorder=5)
for b in buckets[0]: # only one codebook, so use first list
if b.N > 0:
X_hat[b.point_ids] = b.col_means()
X_res = X - X_hat
axes[0, 2].set_title('reconstructions')
axes[0, 2].plot(X_hat.T)
# axes[0, 3].set_title('residuals (mean={:.2f})'.format(X_res.mean()))
axes[0, 3].set_title('residuals (var={:.2f})'.format(X_res.var()))
axes[0, 3].plot(X_res.T)
# visualize output with only 2 codebooks, no updates
ncodebooks = 2
splits, centroids, buckets = learn_mithral(
X, ncodebooks, return_buckets=True, try_ndims=try_ndims, niters=1)
# centroids = centroids[0] # only one codebook
axes[1, 0].set_title('centroids[0]')
axes[1, 0].plot(centroids[0].T)
axes[1, 1].set_title('centroids[1]')
axes[1, 1].plot(centroids[1].T)
X_hat = np.zeros_like(X)
# print("splits: ", splits)
for c, splitlist in enumerate(splits):
for s, split in enumerate(splitlist):
assert len(splitlist) == 4
vals = (split.vals / split.scaleby) + split.offset
for val in vals:
axes[1, c].scatter(split.dim, val, color=colors[s])
for c in range(len(buckets)): # for each codebook
for b, buck in enumerate(buckets[c]):
if buck.N > 0:
X_hat[buck.point_ids] += centroids[c, b]
X_res = X - X_hat
axes[1, 2].set_title('reconstructions')
axes[1, 2].plot(X_hat.T)
# axes[1, 3].set_title('residuals (mean={:.2f})'.format(X_res.mean()))
axes[1, 3].set_title('residuals (var={:.2f})'.format(X_res.var()))
axes[1, 3].plot(X_res.T)
# visualize output with only 2 codebooks, with centroid updates
ncodebooks = 2
splits, centroids, buckets = learn_mithral(
X, ncodebooks, return_buckets=True, try_ndims=try_ndims, niters=1)
axes[2, 0].set_title('centroids[0]')
axes[2, 0].plot(centroids[0].T)
axes[2, 1].set_title('centroids[1]')
axes[2, 1].plot(centroids[1].T)
X_hat = np.zeros_like(X)
for c in range(len(buckets)): # for each codebook
for b, buck in enumerate(buckets[c]):
if buck.N > 0:
X_hat[buck.point_ids] += centroids[c, b]
X_res = X - X_hat
axes[2, 2].set_title('reconstructions')
axes[2, 2].plot(X_hat.T)
# axes[2, 3].set_title('residuals (mean={:.2f})'.format(X_res.mean()))
axes[2, 3].set_title('residuals (var={:.2f})'.format(X_res.var()))
axes[2, 3].plot(X_res.T)
plt.tight_layout()
plt.show()
def test_encoded_ops():
N, C, K = 100, 8, 16
X_enc = np.random.randint(K, size=(N, C))
# print(X_enc)
X_bin = _densify_X_enc(X_enc)
# print(X_enc_binary)
assert np.all(X_bin.sum(axis=1) == C)
XtX = _XtX_encoded(X_enc)
XtX2 = X_bin.T @ X_bin
assert np.all(XtX == XtX2)
M = 17
Y = np.random.randn(N, M).astype(np.float32)
XtY = _XtY_encoded(X_enc, Y)
XtY2 = X_bin.T @ Y
# print(XtY[:2])
# print(XtY2[:2])
assert np.all(XtY == XtY2)
D = C * K
W = np.random.randn(D, M).astype(np.float32)
XW = _XW_encoded(X_enc, W)
XW2 = X_bin @ W
assert np.all(XW == XW2)
def main():
test_encoded_ops()
# print(_pq_codebook_start_end_idxs(6, 3))
# print(_pq_codebook_start_end_idxs(8, 3))
# print(_pq_codebook_start_end_idxs(9, 3))
# print(_pq_codebook_start_end_idxs(10, 3))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
from __future__ import division, absolute_import
import abc
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sb
from . import product_quantize as pq
from . import subspaces as subs
from . import clusterize
from .utils import kmeans
# ================================================================ misc funcs
def dists_elemwise_sq(x, q):
diffs = x - q
return diffs * diffs
def dists_elemwise_l1(x, q):
return np.abs(x - q)
def dists_elemwise_dot(x, q):
return x * q
def extract_random_rows(X, how_many, remove_from_X=True):
split_start = np.random.randint(len(X) - how_many - 1)
split_end = split_start + how_many
rows = np.copy(X[split_start:split_end])
if remove_from_X:
return np.vstack((X[:split_start], X[split_end:])), rows
return X, rows
# XXX: not clear whether this function is correct in general, but
# does always pass the asserts (which capture the invariants we want)
def _insert_zeros(X, nzeros):
N, D = X.shape
D_new = D + nzeros
X_new = np.zeros((N, D_new), dtype=X.dtype)
# print("attempting to insert {} zeros into X of shape {}".format(nzeros, X.shape))
step = int(D / (nzeros + 1)) - 1
step = max(1, step)
# print("using step: ", step)
for i in range(nzeros):
in_start = step * i
in_end = in_start + step
# out_start = in_start + i + 1
out_start = (step + 1) * i
out_end = out_start + step
X_new[:, out_start:out_end] = X[:, in_start:in_end]
# out_start = out_end
# out_end += step
out_end += 1 # account for the last 0
remaining_len = D - in_end
out_remaining_len = D_new - out_end
# print "step", step
# print "in_start, in_end", in_start, in_end
# print "out_start, out_end", out_start, out_end
# print "D, D_new", D, D_new
# print "remaining_len, out_remaining_len", remaining_len, out_remaining_len
assert remaining_len == out_remaining_len
assert remaining_len >= 0
if remaining_len:
# X_new[:, out_end:out_end+remaining_len] = X[:, in_end:D]
X_new[:, out_end:] = X[:, in_end:]
# print("first cols of old and new X:")
# print(X[:, 0])
# print(X_new[:, 0])
# print(X_new.shape)
# print((X_new.sum(axis=0) != 0).sum())
assert X.shape[0] == X_new.shape[0]
cols_nonzero = X_new.sum(axis=0) != 0
orig_cols_nonzero = X.sum(axis=0) != 0
# new_cols_nonzero = cols_nonzero & (~orig_cols_nonzero)
# print("zero cols: ", np.where(~cols_nonzero)[0])
assert cols_nonzero.sum() == orig_cols_nonzero.sum()
nzeros_added = (~cols_nonzero).sum() - (~orig_cols_nonzero).sum()
assert nzeros_added == nzeros
# assert np.array_equal(X[:, 0], X_new[:, 0])
# assert np.array_equal(X[:, -1], X_new[:, -1])
return X_new
# def ensure_num_cols_multiple_of(X, multiple_of, min_ncols=-1):
def ensure_num_cols_multiple_of(X, multiple_of):
remainder = X.shape[1] % multiple_of
if remainder > 0:
return _insert_zeros(X, multiple_of - remainder)
# # TODO rm and uncomment above after debug
# add_ncols = multiple_of - remainder
# new_ncols = X.shape[1] + add_ncols
# new_X = np.zeros((X.shape[0], new_ncols), dtype=X.dtype)
# new_X[:, :X.shape[1]] = X
# return new_X
return X
def _learn_best_quantization(luts):
assert luts.ndim == 2 # luts can be a bunch of vstacked luts, but not 3D
best_loss = np.inf
best_alpha = None
best_floors = None
best_scale_by = None
for alpha in [.001, .002, .005, .01, .02, .05, .1]:
# alpha_pct = int(100 * alpha)
alpha_pct = 100 * alpha
# compute quantized luts this alpha would yield
floors = np.percentile(luts, alpha_pct, axis=0)
luts_offset = np.maximum(0, luts - floors)
ceil = np.percentile(luts_offset, 100 - alpha_pct)
scale_by = 255. / ceil
# if only_shift:
# scale_by = 1 << int(np.log2(scale_by))
luts_quantized = np.floor(luts_offset * scale_by).astype(np.int)
luts_quantized = np.minimum(255, luts_quantized)
# compute err
luts_ideal = (luts - luts_offset) * scale_by
diffs = luts_ideal - luts_quantized
loss = np.sum(diffs * diffs)
if loss <= best_loss:
best_loss = loss
best_alpha = alpha
best_floors = floors
best_scale_by = scale_by
return best_floors, best_scale_by, best_alpha
# ================================================================ Quantizers
# ------------------------------------------------ Abstract Base Class
class MultiCodebookEncoder(abc.ABC):
def __init__(self, ncodebooks, ncentroids=256,
quantize_lut=False, upcast_every=-1, accumulate_how='sum'):
self.ncodebooks = ncodebooks
self.ncentroids = ncentroids
self.quantize_lut = quantize_lut
self.upcast_every = upcast_every if upcast_every >= 1 else 1
self.upcast_every = min(self.ncodebooks, self.upcast_every)
assert self.upcast_every in (1, 2, 4, 8, 16, 32, 64, 128, 256)
self.accumulate_how = accumulate_how
self.code_bits = int(np.log2(self.ncentroids))
# for fast lookups via indexing into flattened array
self.offsets = (np.arange(self.ncodebooks, dtype=np.int) *
self.ncentroids)
def name(self):
return "{}_{}x{}b_quantize={}".format(
self.preproc, self.ncodebooks, self.code_bits,
int(self.quantize_lut))
def params(self):
return {'ncodebooks': self.ncodebooks,
'code_bits': self.code_bits, 'quantize': self.quantize_lut}
def _learn_lut_quantization(self, X, Q=None):
if self.quantize_lut: # TODO put this logic in separate function
print("learning quantization...")
# print("initial Q: ", Q)
if Q is None:
# num_rows = min(10 * 1000, len(X) // 2)
# _, queries = extract_random_rows(
# X[num_rows:], how_many=1000, remove_from_X=False)
# X = X[:num_rows] # limit to first 10k rows of X
_, Q = extract_random_rows(
X, how_many=1000, remove_from_X=False)
Q = Q.T # want each row to be one query, not each col
# Q = self._pad_ncols(Q)
# if self.preproc == 'OPQ':
# Q = pq.opq_rotate(Q, self.R)
# elif self.preproc == 'BOPQ':
# Q = pq.bopq_rotate(Q, self.rotations)
# elif self.preproc == 'GEHT':
# Q = Q[:, self.perm]
# print("Q shape: ", Q.shape)
# compute luts for all the queries
# luts = [self.encode_Q(q, quantize=False) for q in Q]
luts = self.encode_Q(Q, quantize=False)
# luts = np.vstack(luts)
# print("ncodebooks: ", self.ncodebooks)
# print("luts shape: ", luts.shape)
assert luts.shape == (len(Q), self.ncodebooks, self.ncentroids)
luts = np.moveaxis(luts, 2, 1)
assert luts.shape == (len(Q), self.ncentroids, self.ncodebooks)
luts = luts.reshape(len(Q) * self.ncentroids, self.ncodebooks)
self.lut_offsets, self.scale_by, _ = _learn_best_quantization(luts)
# print("self.lut_offsets.shape", self.lut_offsets.shape)
# print("self.scale_by.shape", self.scale_by.shape)
# print("self.scale_by", self.scale_by)
assert self.lut_offsets.shape == (self.ncodebooks,)
# self.lut_offsets = self.lut_offsets[:, np.newaxis]
self.total_lut_offset = np.sum(self.lut_offsets)
# print("lut offsets: ", self.lut_offsets)
def dists_enc(self, X_enc, Q_luts, unquantize=True,
offset=None, scale=None):
X_enc = np.ascontiguousarray(X_enc)
if unquantize:
offset = self.total_lut_offset if offset is None else offset
scale = self.scale_by if scale is None else scale
all_dists = np.empty((len(Q_luts), len(X_enc)), dtype=np.float32)
for i, lut in enumerate(Q_luts):
centroid_dists = lut.ravel()[X_enc.ravel()]
dists = centroid_dists.reshape(X_enc.shape)
if self.upcast_every < 2 or not self.quantize_lut:
dists = dists.sum(axis=-1)
else:
dists = dists.reshape(dists.shape[0], -1, self.upcast_every)
if self.accumulate_how == 'sum':
# sum upcast_every vals, then clip to mirror saturating
# unsigned addition, then sum without saturation (like u16)
dists = dists.sum(2)
dists = np.clip(dists, 0, 255).sum(axis=-1)
elif self.accumulate_how == 'mean':
# mirror hierarchical avg_epu8
# print("reducing using mean!")
# print("fraction of low bits that are 1: ",
# np.mean(dists % 2 == 1)) # ya, ~.5, or maybe ~.495
while dists.shape[-1] > 2:
dists = (dists[:, :, ::2] + dists[:, :, 1::2] + 1) // 2
dists = (dists[:, :, 0] + dists[:, :, 1] + 1) // 2
dists = dists.sum(axis=-1) # clipping not needed
# undo biasing; if low bits are {0,0} or {1,1}, no bias
# from the averaging; but if {0,1}, then rounds up by
# .5; happens with prob ~=~ .5, so each avg op adds .25;
# the other tricky thing here is that rounding up when
# you're averaging averages biases it even farther
# base_bias = .5 * .5
# assert self.upcast_every >= 2
# bias_per_upcast = 0
# nlevels = int(np.log2(self.upcast_every))
# for level in range(nlevels):
# num_avg_ops = self.upcast_every / (2 << level)
# print("num_avg_ops: ", num_avg_ops)
# bias_per_op = (1 << level) * base_bias
# print("level multiplier: ", 1 << level)
# bias_per_upcast += num_avg_ops * bias_per_op
# bias = bias_per_upcast * (self.ncodebooks / self.upcast_every)
# num_avg_ops = (self.upcast_every - 1) * (
# self.ncodebooks / self.upcast_every)
# num_avg_ops = (self.upcast_every - 1) * np.sqrt(
# self.ncodebooks / self.upcast_every)
# num_avg_ops = (self.upcast_every - 1)
# bias = num_avg_ops * base_bias
# bias = (self.ncodebooks / 2) * int(np.log2(self.upcast_every))
# bias = (self.ncodebooks / 2) * int(np.log2(self.upcast_every))
# bias = 0
# dists -= int(bias * self.upcast_every)
dists *= self.upcast_every # convert mean to sum
# I honestly don't know why this is the formula, but wow
# does it work well
bias = self.ncodebooks / 4 * np.log2(self.upcast_every)
dists -= int(bias)
else:
raise ValueError("accumulate_how must be 'sum' or 'mean'")
if self.quantize_lut and unquantize:
# dists = (dists / self.scale_by) + self.total_lut_offset
dists = (dists / scale) + offset
all_dists[i] = dists
return all_dists.T
# ------------------------------------------------ Product Quantization
def _learn_centroids(X, ncentroids, ncodebooks, subvect_len):
ret = np.empty((ncentroids, ncodebooks, subvect_len))
# print("_learn_centroids(): running kmeans...")
tot_sse = 0
X_bar = X - np.mean(X, axis=0)
col_sses = np.sum(X_bar * X_bar, axis=0) + 1e-14
tot_sse_using_mean = np.sum(col_sses)
for i in range(ncodebooks):
print("running kmeans in subspace {}/{}...".format(
i + 1, ncodebooks), end=" ")
start_col = i * subvect_len
end_col = start_col + subvect_len
X_in = X[:, start_col:end_col]
# centroids, labels = kmeans(X_in, ncentroids)
centroids, labels, sse = kmeans(X_in, ncentroids, return_sse=True)
# X_bar = X_in - np.mean(X_in, axis=0)
# sse_using_mean = np.sum(X_bar * X_bar) + 1e-14
subspace_sse = np.sum(col_sses[start_col:end_col])
print("mse / {{var(X_subs), var(X)}}: {:.3g}, {:.3g}".format(
sse / subspace_sse, sse * ncodebooks / tot_sse_using_mean))
tot_sse += sse
# print("centroids shape: ", centroids.shape)
# print("ret shape: ", ret.shape)
ret[:, i, :] = centroids
print("--- total mse / var(X): {:.3g}".format(tot_sse / tot_sse_using_mean))
return ret
def _parse_codebook_params(D, code_bits=-1, bits_per_subvect=-1, ncodebooks=-1):
if ncodebooks < 0:
ncodebooks = code_bits // bits_per_subvect
elif code_bits < 1:
code_bits = bits_per_subvect * ncodebooks
elif bits_per_subvect < 1:
bits_per_subvect = code_bits // ncodebooks
ncentroids = int(2 ** bits_per_subvect)
subvect_len = D // ncodebooks
assert code_bits % bits_per_subvect == 0
if D % subvect_len:
print("D, ncodebooks, subvect_len = ", D, ncodebooks, subvect_len)
assert D % subvect_len == 0 # TODO rm this constraint
return ncodebooks, ncentroids, subvect_len
def _fit_pq_lut(q, centroids, elemwise_dist_func):
_, ncodebooks, subvect_len = centroids.shape
q = q.reshape((1, ncodebooks, subvect_len))
q_dists = np.sum(centroids * q, axis=-1)
return q_dists # ncentroids, ncodebooks, row-major
class PQEncoder(MultiCodebookEncoder):
def __init__(self, ncodebooks, ncentroids=256,
elemwise_dist_func=dists_elemwise_dot,
preproc='PQ', encode_algo=None, quantize_lut=False,
upcast_every=-1, accumulate_how='sum',
**preproc_kwargs):
super().__init__(
ncodebooks=ncodebooks, ncentroids=ncentroids,
quantize_lut=quantize_lut, upcast_every=upcast_every,
accumulate_how=accumulate_how)
self.elemwise_dist_func = elemwise_dist_func
self.preproc = preproc
self.encode_algo = encode_algo
self.preproc_kwargs = preproc_kwargs
def _pad_ncols(self, X):
return ensure_num_cols_multiple_of(X, self.ncodebooks)
def fit(self, X, Q=None):
self.subvect_len = int(np.ceil(X.shape[1] / self.ncodebooks))
X = self._pad_ncols(X)
self.centroids = None
if self.preproc == 'BOPQ':
self.centroids, _, self.rotations = pq.learn_bopq(
X, ncodebooks=self.ncodebooks, codebook_bits=self.code_bits,
**self.preproc_kwargs)
elif self.preproc == 'OPQ':
self.centroids, _, self.R = pq.learn_opq(
X, ncodebooks=self.ncodebooks, codebook_bits=self.code_bits,
**self.preproc_kwargs)
elif self.preproc == 'GEHT':
self.perm = subs.greedy_eigenvector_threshold(
X, subspace_len=self.subvect_len, **self.preproc_kwargs)
assert X.shape[1] == len(set(self.perm))
X = X[:, self.perm]
if self.centroids is None:
if self.encode_algo in ('splits', 'multisplits'):
assert self.encode_algo != 'splits' # TODO rm
self.splits_lists, self.centroids = \
clusterize.learn_splits_in_subspaces(
X, subvect_len=self.subvect_len,
nsplits_per_subs=self.code_bits, algo=self.encode_algo)
# print("centroids shape: ", self.centroids.shape)
# # TODO rm
# # yep, yields identical errs as mithral with pq_perm_algo='end'
# self.splits_lists, self.centroids = clusterize.learn_mithral(
# X, ncodebooks=self.ncodebooks)
# print("centroids shape: ", self.centroids.shape)
else:
self.centroids = _learn_centroids(
X, self.ncentroids, self.ncodebooks, self.subvect_len)
self._learn_lut_quantization(X, Q)
def name(self):
return "{}_{}".format(self.preproc, super().name())
def params(self):
d = super().params()
d['_preproc'] = self.preproc
return d
def encode_Q(self, Q, quantize=True):
# quantize param enables quantization if set in init; separate since
# quantization learning needs to call this func, but vars like
# lut_offsets aren't set when this function calls it
Q = np.atleast_2d(Q)
Q = self._pad_ncols(Q)
if self.preproc == 'OPQ':
Q = pq.opq_rotate(Q, self.R)
elif self.preproc == 'BOPQ':
Q = pq.bopq_rotate(Q, self.rotations)
elif self.preproc == 'GEHT':
Q = Q[:, self.perm]
luts = np.zeros((Q.shape[0], self.ncodebooks, self.ncentroids))
# print("Q shape: ", Q.shape)
for i, q in enumerate(Q):
lut = _fit_pq_lut(q, centroids=self.centroids,
elemwise_dist_func=self.elemwise_dist_func)
if self.quantize_lut and quantize:
lut = np.maximum(0, lut - self.lut_offsets)
lut = np.floor(lut * self.scale_by).astype(np.int)
lut = np.minimum(lut, 255)
luts[i] = lut.T
return luts
def encode_X(self, X, **sink):
X = self._pad_ncols(X)
if self.preproc == 'OPQ':
X = pq.opq_rotate(X, self.R)
elif self.preproc == 'BOPQ':
X = pq.bopq_rotate(X, self.rotations)
elif self.preproc == 'GEHT':
X = X[:, self.perm]
if self.encode_algo in ('splits', 'multisplits'):
split_type = ('multi' if self.encode_algo == 'multisplits'
else 'single')
idxs = clusterize.encode_using_splits(
X, self.subvect_len, self.splits_lists, split_type=split_type)
else:
idxs = pq._encode_X_pq(X, codebooks=self.centroids)
return idxs + self.offsets # offsets let us index into raveled dists
# ------------------------------------------------ Mithral
# def _mithral_quantize_luts(luts, lut_work_const, force_power_of_2=False):
def _mithral_quantize_luts(luts, lut_work_const, force_power_of_2=True):
nqueries, ncodebooks, ncentroids = luts.shape
# if lut_work_const < 0: # not time constrained
# assert luts.shape == (nqueries, ncodebooks, ncentroids)
# luts2d = np.moveaxis(luts, 2, 1)
# assert luts2d.shape == (nqueries, ncentroids, ncodebooks)
# luts2d = luts2d.reshape(nqueries * ncentroids, ncodebooks)
# # if True:
# if False:
# # ax = sb.distplot(luts.ravel(), hist=False, rug=True)
# _, ax = plt.subplots(1, figsize=(13, 5))
# # sb.violinplot(data=luts2d, inner='point', ax=ax)
# # sb.boxenplot(data=luts2d, ax=ax)
# means = luts2d.mean(axis=0)
# # # rm largest and smallest entry in each col
# # argmaxs = np.argmax(luts2d, axis=0)
# # argmins = np.argmax(luts2d, axis=0)
# # for c in range(luts.shape[1]):
# # luts2d[argmins[c], c] = means[c]
# # luts2d[argmaxs[c], c] = means[c]
# maxs = luts2d.max(axis=0)
# mins = luts2d.min(axis=0)
# gaps = maxs - mins
# max_idx = np.argmax(gaps)
# print(f"biggest gap = {np.max(gaps)} at idx {max_idx}")
# gaps[max_idx] = 0
# max_idx = np.argmax(gaps)
# print(f"2nd biggest gap = {np.max(gaps)} at idx {max_idx}")
# gaps[max_idx] = 0
# max_idx = np.argmax(gaps)
# print(f"3rd biggest gap = {np.max(gaps)} at idx {max_idx}")
# gaps[max_idx] = 0
# max_idx = np.argmax(gaps)
# print(f"4th biggest gap = {np.max(gaps)} at idx {max_idx}")
# gaps[max_idx] = 0
# max_idx = np.argmax(gaps)
# print(f"5th biggest gap = {np.max(gaps)} at idx {max_idx}")
# # for i in range(len(luts2d)):
# # row = luts2d[i]
# # luts2d[i, row == mins] = means
# # luts2d[i, row == maxs] = means
# luts2d -= mins
# # luts2d -= means
# # luts2d *= 255 / (maxs - mins).max()
# luts2d *= 255 / gaps.max()
# luts2d = np.minimum(luts2d, 255)
# sb.stripplot(data=luts2d, ax=ax, size=4)
# ax.set_xlabel('Query dist to centroids (lut dist histogram)')
# ax.set_ylabel('Fraction of queries')
# plt.show()
# import sys; sys.exit()
# offsets, scale, _ = _learn_best_quantization(luts2d)
# offsets = offsets[np.newaxis, :, np.newaxis]
# luts = np.maximum(0, luts - offsets) * scale
# luts = np.floor(luts).astype(np.int)
# luts = np.minimum(255, luts)
# return luts, offsets.sum(), scale
# luts = np.zeros((Q.shape[0], self.ncodebooks, self.ncentroids))
mins = luts.min(axis=(0, 2))
maxs = luts.max(axis=(0, 2))
gaps = maxs - mins
# gaps[np.argmax(gaps)] = 0 # use 2nd highest
gap = np.max(gaps)
if force_power_of_2:
exponent = np.ceil(np.log2(gap))
scale = 2 ** int(-exponent) # scale is a power of 2, so can just shift
scale *= (255.5 - 1e-10) # so max val is at most 255
else:
scale = (255.5 - 1e-10) / gap
offsets = mins[np.newaxis, :, np.newaxis]
luts_quantized = (luts - offsets) * scale
luts_quantized = (luts_quantized + .5).astype(np.int)
# luts_quantized = np.minimum(luts_quantized, 255)
assert np.min(luts_quantized) >= 0
assert np.max(luts_quantized) <= 255.
# print("total offset: ", mins.sum())
return luts_quantized, offsets.sum(), scale
# # compute offset taking into account stuff getting rounded down
# luts_hat = (luts / scale) + offsets
# diffs = luts - luts_hat
# print("mean of diffs: ", diffs.mean())
# offset = diffs.mean() + offsets.sum()
# return luts_quantized, offset, scale
class MithralEncoder(MultiCodebookEncoder):
def __init__(self, ncodebooks, lut_work_const=-1):
super().__init__(
ncodebooks=ncodebooks, ncentroids=16,
# quantize_lut=True, upcast_every=64,
# quantize_lut=True, upcast_every=32,
quantize_lut=True, upcast_every=16,
# quantize_lut=True, upcast_every=8,
# quantize_lut=True, upcast_every=4,
# quantize_lut=True, upcast_every=2,
# quantize_lut=True, upcast_every=1,
accumulate_how='mean')
self.lut_work_const = lut_work_const
def name(self):
return "{}_{}".format('mithral', super().name())
def params(self):
return {'ncodebooks': self.ncodebooks,
'lut_work_const': self.lut_work_const}
def fit(self, X, Q=None):
self.splits_lists, self.centroids = clusterize.learn_mithral(
X, self.ncodebooks, lut_work_const=self.lut_work_const)
# self._learn_lut_quantization(X, Q)
def encode_X(self, X):
idxs = clusterize.mithral_encode(X, self.splits_lists)
return idxs + self.offsets
def encode_Q(self, Q, quantize=True):
Q = np.atleast_2d(Q)
luts = np.zeros((Q.shape[0], self.ncodebooks, self.ncentroids))
for i, q in enumerate(Q):
luts[i] = clusterize.mithral_lut(q, self.centroids)
if self.quantize_lut:
luts, offset, scale = _mithral_quantize_luts(luts, self.lut_work_const)
return luts, offset, scale
return luts, 0, 1
def main():
X = np.ones((3, 75), dtype=np.int)
_insert_zeros(X, 53)
if __name__ == '__main__':
main()
|
#!/bin/env/python
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
import pandas as pd
import pathlib as pl
# from . import files
from . import amm_results as res
from . import amm_methods as ameth
sb.set_context('poster')
# sb.set_context('talk')
# sb.set_cmap('tab10')
RESULTS_DIR = pl.Path('results/amm')
FIGS_SAVE_DIR = pl.Path('../figs/amm')
if not os.path.exists(FIGS_SAVE_DIR):
FIGS_SAVE_DIR.mkdir(parents=True)
def save_fig(name):
plt.savefig(os.path.join(FIGS_SAVE_DIR, name + '.png'),
dpi=300, bbox_inches='tight')
def _xlabel_for_xmetric(x_metric):
return {'d': 'Sketch Size',
'secs': 'Time (s)',
'muls': 'Number of Multiplies',
'nlookups': 'Number of Lookups',
'ops': 'Number of Operations',
'Latency': 'Latency (ms)',
'Throughput': 'Throughput (elements/s)'}[x_metric]
# if x_metric == 'd':
# return 'Log2(Sketch Size)'
# elif x_metric == 'secs':
# return 'Time (s)'
# elif x_metric == 'muls':
# # return 'Log10(# of Multiplies)'
# return 'Number of Multiplies'
# elif x_metric == 'nlookups':
# # return 'Log10(# of Table Lookups)'
# return 'Number of Table Lookups'
# elif x_metric == 'ops':
# # return 'Log10(# of Operations)'
# return 'Number of Operations'
# elif x_metric == 'Latency':
# return 'Latency (ms)'
def _clean_results_df(df, default_D=None):
# for Exact, set d = D
if default_D is not None and ('d' in df):
mask = df['d'].isna()
df.loc[mask, 'd'] = default_D
# clean up column names + other strings
for old, new in [('method', 'Method'), ('acc_amm', 'Accuracy'),
('r_sq', 'R-Squared'), ('nmultiplies', 'muls')]:
try:
df.rename({old: new}, axis=1, inplace=True)
except KeyError:
pass
# replace_dict = {'Bolt+MultiSplits': 'Ours',
# replace_dict = {'Mithral': 'Ours',
replace_dict = {'Mithral': 'Ours',
'MithralPQ': 'OursPQ',
'Exact': 'Brute Force',
'CooccurSketch': 'CD'}
# def _replace_method_name(name):
# return replace_dict.get(name, name)
df['Method'] = df['Method'].apply(lambda s: replace_dict.get(s, s))
# create ops column that sums number of multiplies + lookups
df['muls'] = df['muls'].fillna(0)
mask = ~df['nlookups'].isna()
df['ops'] = df['muls']
df['ops'].loc[mask] += df['nlookups'].loc[mask]
# df['muls'] = np.log10(df['muls'])
# df['ops'] = np.log10(df['ops'])
# join with cpp timing results
matmul_latencies, matmul_thruputs = res.load_matmul_times_for_n_d_m()
sketch_latencies, sketch_thruputs = res.load_sketch_times_for_n_d_m()
# multisplit_latencies, multisplit_thruputs = \
# res.load_multisplit_times_for_n_d_m()
mithral_latencies, mithral_thruputs = res.load_mithral_times_for_n_d_m()
bolt_latencies, bolt_thruputs = res.load_bolt_times_for_n_d_m()
# row_dicts = []
all_latencies = []
all_thruputs = []
# for _, row in df.itertuples():
# print("d col: ")
# print(df['d'])
fast_sketch_methods = set([m.lower() for m in ameth.FAST_SKETCH_METHODS])
slow_sketch_methods = set([m.lower() for m in ameth.SLOW_SKETCH_METHODS])
for _, row in df.iterrows():
# row = dict(*row)
N, D, M = [int(row[k]) for k in ('N', 'D', 'M')]
method = row['Method'].lower()
# if 'split' in method.lower():
# print("using method: ", method)
if method in ('bolt', 'ours', 'ourspq'):
# TODO check if in vq methods, instead of hardcoding
ncodebooks = int(row['ncodebooks'])
key = (N, D, M, ncodebooks)
if method in ('ours', 'ourspq'):
# latencies = multisplit_latencies[key]
# thruputs = multisplit_thruputs[key]
latencies = mithral_latencies[key]
thruputs = mithral_thruputs[key]
elif method == 'bolt':
latencies = bolt_latencies[key]
thruputs = bolt_thruputs[key]
# all_latencies.append(np.median(latencies))
# all_thruputs.append(np.median(thruputs))
elif method == 'brute force':
key = (N, D, M)
latencies = matmul_latencies[key]
thruputs = matmul_thruputs[key]
elif method in fast_sketch_methods:
d = int(row['d'])
key = (N, D, M, d)
latencies = sketch_latencies[key]
thruputs = sketch_thruputs[key]
else: # slow sketch-based methods
# print("method: ", method)
# assert method in slow_sketch_methods
# print("method: ", method)
# print("fast sketch methods: ", fast_sketch_methods)
# assert False # TODO rm
secs = row['secs']
lat = secs * 1000
thruput = N * M / secs
latencies = [lat]
thruputs = [thruput]
# print("d: ", d)
# print("key:", key)
# print("sketch_latencies:")
# import pprint
# pprint.pprint(sketch_latencies)
# secs = row['secs']
# lat = secs * 1000
# thruput = N * M / secs
# # # version where we pretend same efficiency as matmul
# # nmuls = int(row['muls'])
# # exact_nmuls = N * D * M
# # scale = nmuls / exact_nmuls
# # lat *= scale
# # thruput /= scale
# all_latencies.append(lat)
# all_thruputs.append(thruput)
all_latencies.append(np.mean(latencies))
all_thruputs.append(np.mean(thruputs))
# print("len latencies: ", len(all_latencies))
# print("len thruputs: ", len(all_thruputs))
# print("df len: ", df.shape[0])
df['Latency'] = all_latencies
df['Throughput'] = all_thruputs
print("cleaned df:\n", df)
# print(df)
# print(df.loc[:11])
# print(df.loc[10:])
# for row in df.iterrows():
# print(row)
# import sys; sys.exit()
# make stuff log scale
# if 'd' in df:
# df['d'] = np.log2(df['d']).astype(np.int32)
df['Log10(MSE)'] = np.log10(1. - df['R-Squared'] + 1e-10)
df = df.sort_values('Method', axis=0)
return df
def make_cifar_fig(x_metric='d', y_metric='Accuracy'):
# fig, axes = plt.subplots(2, 1, figsize=(6, 9), sharex=True)
fig, axes = plt.subplots(2, 1, figsize=(11, 13.5), sharex=True)
df10 = pd.read_csv(RESULTS_DIR / 'cifar10.csv')
df100 = pd.read_csv(RESULTS_DIR / 'cifar100.csv')
# dfs = (df10, df100)
# for df in dfs:
df10 = df10.loc[~(df10['ncodebooks'] < 4)]
df100 = df100.loc[~(df100['ncodebooks'] < 4)]
# if x_metric in ('Latency', 'Throughput'):
# # TODO get results for PQ + Bolt
# # df10 = df10.loc[~df10['method'].isin(['PQ', 'Bolt'])]
# # include_methods = ('Bolt+MultiSplits', 'Bolt', 'Exact')
# include_methods = ['Bolt+MultiSplits', 'Bolt', 'Exact']
# include_methods += 'PQ SVD FD-AMM CooccurSketch'.split() # TODO rm
# # print("uniq methods: ", df10['method'].unique())
# # df10 = df10.loc[~df10['method'].isin(['PQ'])]
# df10 = df10.loc[df10['method'].isin(include_methods)]
# # df100 = df100.loc[~df100['method'].isin(['PQ', 'Bolt'])]
# # df100 = df100.loc[~df100['method'].isin(['PQ'])]
# df100 = df100.loc[df100['method'].isin(include_methods)]
df10 = _clean_results_df(df10, default_D=512)
df100 = _clean_results_df(df100, default_D=512)
def lineplot(data, ax):
# order = 'Ours Bolt Exact PQ SVD FD-AMM CD'.split()
# order = [m for m in order if m in data['Method'].unique()]
order = list(data['Method'].unique())
move_methods_to_front = ['Ours', 'OursPQ', 'Brute Force']
for elem in move_methods_to_front[:]:
if elem in order:
order.remove(elem)
else:
move_methods_to_front.remove(elem)
order = move_methods_to_front + order
# order = None
# print("uniq methods:\n", data['Method'].unique())
# print("using order:\n", order)
# cmap = plt.get_cmap('tab10')
# palette = {'Ours': 'red', 'Bolt': cmap(0), 'Exact': cmap(1),
# 'PQ': cmap(2), 'SVD': cmap(4), 'FD-AMM': cmap(5),
# 'CD': cmap(6)}
palette = None
# have to specify markers or seaborn freaks out because it doesn't
# have enough of them
filled_markers = ('o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h',
'H', 'D', 'd', 'P', 'X')
sb.lineplot(data=data, x=x_metric, y=y_metric, hue='Method',
style='Method', style_order=order, hue_order=order,
# markers=True, dashes=False, ax=ax, palette=palette)
markers=filled_markers, dashes=False, ax=ax, palette=palette)
# palette='tab10')
lineplot(df10, axes[0])
lineplot(df100, axes[1])
# plt.suptitle('Sketch size vs Classification Accuracy')
xlbl = _xlabel_for_xmetric(x_metric)
# plt.suptitle('{} vs {}'.format(xlbl, y_metric))
plt.suptitle('Approximating Softmax Layers')
axes[0].set_title('CIFAR-10')
for ax in axes:
ax.set_ylabel(y_metric)
axes[0].set_xlabel(None)
axes[1].set_xlabel(xlbl)
axes[1].set_title('CIFAR-100')
handles, labels = axes[0].get_legend_handles_labels()
handles, labels = handles[1:], labels[1:] # rm 'Method' title
axes[0].legend(handles, labels, fontsize='small')
# axes[1].legend(handles, labels, fontsize='small')
# plt.figlegend(handles, labels, loc='lower center', ncol=1)
# plt.figlegend(handles, labels, loc='center right', ncol=1)
axes[1].get_legend().remove()
# axes[1].get_legend().remove()
if x_metric in ('muls', 'ops', 'nlookups', 'Latency', 'Throughput'):
axes[0].semilogx()
plt.tight_layout()
# plt.subplots_adjust(top=.92, bottom=.2)
plt.subplots_adjust(top=.92, bottom=.22)
save_fig('cifar_{}_{}'.format(x_metric, y_metric))
# def make_ecg_fig(y_metric='R-Squared'):
def make_ecg_fig(x_metric='d'):
fig, axes = plt.subplots(2, 1, figsize=(6, 9))
df = pd.read_csv(RESULTS_DIR / 'ecg.csv')
df = _clean_results_df(df, default_D=24)
# D = 24
# if 'd' in df:
# mask = df['d'].isna()
# df.loc[mask, 'd'] = D
# df['d'] = np.log2(df['d'])
# df.rename({'method': 'Method', 'acc_amm': 'Accuracy',
# 'r_sq': 'R-Squared', 'nmultiplies': 'muls'},
# axis=1, inplace=True)
# df['Log10(MSE)'] = np.log10(1. - df['R-Squared'] + 1e-10) # avoid log10(0)
# df['muls'] = df['muls'].fillna(0)
# df['nlookups'] = df['nlookups'].fillna(0)
# # mask = ~df['nlookups'].isna()
# # print("mask: ", mask)
# # print('muls, nlookups')
# # print(df[['muls', 'nlookups']])
# # add_to_muls = df['nlookups'].loc[mask]
# equivalent_muls = df['muls'].add(df['nlookups'])
# # df['muls'] = equivalent_muls
# df['muls'] = equivalent_muls
# # import sys; sys.exit()
# df['muls'] = np.log10(df['muls'])
df['Compression Ratio'] = df['nbytes_orig'] / df['nbytes_blosc_byteshuf']
def lineplot(data, ycol, ax):
sb.lineplot(data=data, hue='Method', x=x_metric, y=ycol,
style='Method', markers=True, dashes=False, ax=ax)
lineplot(df, ycol='R-Squared', ax=axes[0])
lineplot(df, ycol='Compression Ratio', ax=axes[1])
xlbl = _xlabel_for_xmetric(x_metric)
axes[0].set_title('ECG: {} vs R-Squared'.format(xlbl))
axes[1].set_title('ECG: {} vs Compression Ratio'.format(xlbl))
axes[0].set_ylim([0, 1])
axes[0].set_ylabel('R-Squared')
axes[1].set_ylabel('Compression Ratio')
axes[1].set_xlabel(xlbl)
if x_metric in ('muls', 'ops', 'nlookups'):
axes[0].semilogx()
# axes[0].semilogx()
plt.tight_layout()
plt.subplots_adjust(top=.92, bottom=.2)
save_fig('ecg_{}'.format(x_metric))
def make_caltech_fig(x_metric='d'):
"""x_metric should be in {'d', 'secs', 'muls'}"""
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
df = pd.read_csv(RESULTS_DIR / 'caltech.csv')
df = _clean_results_df(df, default_D=27)
sb.lineplot(data=df, hue='Method', x=x_metric, y='Log10(MSE)',
style='Method', markers=True, dashes=False, ax=ax)
ax.set_ylabel('Log10(MSE + 1e-10)')
if x_metric == 'd':
ax.set_title('Caltech: Sketch Size vs Log Squared Error')
ax.set_xlabel('Log2(Sketch Size)')
elif x_metric == 'secs':
ax.set_title('Caltech: Computation Time vs Log Squared Error')
ax.set_xlabel('Time (s)')
elif x_metric == 'muls':
ax.set_title('Caltech: # of Multiplies vs Log Squared Error')
ax.set_xlabel('Log10(# of Multiplies)')
plt.tight_layout()
plt.subplots_adjust(top=.92, bottom=.2)
save_fig('caltech_{}'.format(x_metric))
def main():
# for x_metric in 'd secs muls'.split():
# for x_metric in ['muls']:
# for y_metric in ('Accuracy', 'R-Squared'):
# make_cifar_fig(x_metric, y_metric)
# make_cifar_fig('d', 'Accuracy')
# make_cifar_fig('muls', 'Accuracy')
make_cifar_fig('ops', 'Accuracy')
make_cifar_fig('Latency', 'Accuracy')
make_cifar_fig('Throughput', 'Accuracy')
# make_cifar_fig('Accuracy')
# make_cifar_fig('Accuracy')
# make_cifar_fig('R-Squared')
# make_ecg_fig(x_metric='d')
# make_ecg_fig(x_metric='secs')
# make_ecg_fig(x_metric='muls')
# make_caltech_fig(x_metric='d')
# make_caltech_fig(x_metric='secs')
# make_caltech_fig(x_metric='muls')
print("done")
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import time
import numpy as np
from .utils import kmeans, orthonormalize_rows, random_rotation
from joblib import Memory
_memory = Memory('.', verbose=0)
# ================================================================ PQ
@_memory.cache
def learn_pq(X, ncentroids, nsubvects, subvect_len, max_kmeans_iters=16):
codebooks = np.empty((ncentroids, nsubvects, subvect_len))
assignments = np.empty((X.shape[0], nsubvects), dtype=np.int)
# print "codebooks shape: ", codebooks.shape
for i in range(nsubvects):
start_col = i * subvect_len
end_col = start_col + subvect_len
X_in = X[:, start_col:end_col]
centroids, labels = kmeans(X_in, ncentroids, max_iter=max_kmeans_iters)
codebooks[:, i, :] = centroids
assignments[:, i] = labels
return codebooks, assignments # [2**nbits x M x D/M], [N x M]
def reconstruct_X_pq(assignments, codebooks):
"""assignments: N x M ints; codebooks: 2**nbits x M x D/M floats"""
_, M = assignments.shape
subvect_len = codebooks.shape[2]
assert assignments.shape[1] == codebooks.shape[1]
D = M * subvect_len
pointsCount = assignments.shape[0]
points = np.zeros((pointsCount, D), dtype=np.float32)
for i in range(M):
subspace_start = subvect_len * i
subspace_end = subspace_start + subvect_len
subspace_codes = assignments[:, i]
points[:, subspace_start:subspace_end] = codebooks[subspace_codes, i, :]
return points
def _dists_elemwise_sq(x, q):
diffs = x - q
return diffs * diffs
def _dists_elemwise_l1(x, q):
return np.abs(x - q)
def _encode_X_pq(X, codebooks, elemwise_dist_func=_dists_elemwise_sq):
ncentroids, nsubvects, subvect_len = codebooks.shape
assert X.shape[1] == (nsubvects * subvect_len)
idxs = np.empty((X.shape[0], nsubvects), dtype=np.int)
X = X.reshape((X.shape[0], nsubvects, subvect_len))
for i, row in enumerate(X):
row = row.reshape((1, nsubvects, subvect_len))
dists = elemwise_dist_func(codebooks, row)
dists = np.sum(dists, axis=2)
idxs[i, :] = np.argmin(dists, axis=0)
return idxs # [N x nsubvects]
def compute_reconstruction_error(X, X_hat, subvect_len=-1):
diffs = X - X_hat
diffs_sq = diffs * diffs
if subvect_len > 0:
errs = []
for i in range(0, diffs_sq.shape[1], subvect_len):
errs_block = diffs_sq[:, i:i+subvect_len]
errs.append(np.mean(errs_block))
print(" errors in each block: {} ({})".format(
np.array(errs), np.sum(errs)))
X_bar = X - np.mean(X, axis=0)
col_sses = np.sum(X_bar * X_bar, axis=0) + 1e-14
tot_sse_using_mean = np.sum(col_sses)
errors = np.mean(diffs_sq, axis=1)
# variances = np.var(X, axis=1)
# return np.mean(errors) / np.mean(variances)
return np.mean(errors) / (tot_sse_using_mean / X_bar.size)
# ================================================================ Gaussian OPQ
# https://github.com/yahoo/lopq/blob/master/python/lopq/model.py; see
# https://github.com/yahoo/lopq/blob/master/LICENSE. For this function only:
#
# Copyright 2015, Yahoo Inc.
# Licensed under the terms of the Apache License, Version 2.0.
# See the LICENSE file associated with the project for terms.
#
@_memory.cache
def eigenvalue_allocation(num_buckets, eigenvalues, shuffle=False):
"""
Compute a permutation of eigenvalues to balance variance accross buckets
of dimensions.
Described in section 3.2.4 in http://research.microsoft.com/pubs/187499/cvpr13opq.pdf
Note, the following slides indicate this function will break when fed eigenvalues < 1
without the scaling trick implemented below:
https://www.robots.ox.ac.uk/~vgg/rg/slides/ge__cvpr2013__optimizedpq.pdf
:param int num_buckets:
the number of dimension buckets over which to allocate eigenvalues
:param ndarray eigenvalues:
a vector of eigenvalues
:param bool shuffle:
whether to randomly shuffle the order of resulting buckets
:returns ndarray:
a vector of indices by which to permute the eigenvectors
"""
D = len(eigenvalues)
dims_per_bucket = D // num_buckets
eigenvalue_product = np.zeros(num_buckets, dtype=float)
bucket_size = np.zeros(num_buckets, dtype=int)
permutation = np.zeros((num_buckets, dims_per_bucket), dtype=int)
# We first must scale the eigenvalues by dividing by their
# smallets non-zero value to avoid problems with the algorithm
# when eigenvalues are less than 1.
min_non_zero_eigenvalue = np.min(np.abs(eigenvalues[np.nonzero(eigenvalues)]))
eigenvalues = eigenvalues / min_non_zero_eigenvalue
# this is not actually a requirement, but I'm curious about whether this
# condition is ever violated
if not np.all(eigenvalues > 0):
print("WARNING: some eigenvalues were nonpositive")
# Iterate eigenvalues in descending order
sorted_inds = np.argsort(eigenvalues)[::-1]
log_eigs = np.log2(abs(eigenvalues))
for ind in sorted_inds:
# Find eligible (not full) buckets
eligible = (bucket_size < dims_per_bucket).nonzero()
# Find eligible bucket with least eigenvalue product
i = eigenvalue_product[eligible].argmin(0)
bucket = eligible[0][i]
# Update eigenvalue product for this bucket
eigenvalue_product[bucket] = eigenvalue_product[bucket] + log_eigs[ind]
# Store bucket assignment and update size
permutation[bucket, bucket_size[bucket]] = ind
bucket_size[bucket] += 1
if shuffle:
shuffle_idxs = np.arange(num_buckets, dtype=np.int)
np.random.shuffle(shuffle_idxs)
permutation = permutation[shuffle_idxs]
# wow, these are within <1% of each other
# print "opq eigenvalue log prods: ", eigenvalue_product
return np.reshape(permutation, D)
def learn_opq_gaussian_rotation(X_train, ncodebooks, shuffle=False):
means = np.mean(X_train, axis=0)
cov = np.dot(X_train.T, X_train) - np.outer(means, means)
eigenvals, eigenvects = np.linalg.eigh(cov)
order_idxs = eigenvalue_allocation(ncodebooks, eigenvals, shuffle=shuffle)
assert len(order_idxs) == X_train.shape[1]
return eigenvects[:, order_idxs].T # rows are projections
# ================================================================ OPQ
def _update_centroids_opq(X, assignments, ncentroids): # [N x D], [N x M]
nsubvects = assignments.shape[1]
subvect_len = X.shape[1] // nsubvects
assert X.shape[0] == assignments.shape[0]
assert X.shape[1] % nsubvects == 0
codebooks = np.zeros((ncentroids, nsubvects, subvect_len), dtype=np.float32)
for i, row in enumerate(X):
for m in range(nsubvects):
start_col = m * subvect_len
end_col = start_col + subvect_len
codebooks[assignments[i, m], m, :] += row[start_col:end_col]
for m in range(nsubvects):
code_counts = np.bincount(assignments[:, m], minlength=ncentroids)
codebooks[:, m] /= np.maximum(code_counts, 1).reshape((-1, 1)) # no div by 0
return codebooks
class NumericalException(Exception):
pass
def _debug_rotation(R):
D = np.max(R.shape)
identity = np.identity(D, dtype=np.float32)
RtR = np.dot(R.T, R)
R_det = np.linalg.det(RtR)
print("determinant of R*R: ", R_det)
R_trace = np.trace(RtR)
print("trace of R*R, trace divided by D: {}, {}".format(R_trace, R_trace / D))
off_diagonal_abs_mean = np.mean(np.abs(RtR - identity))
print("mean(abs(off diagonals of R*R)): ", off_diagonal_abs_mean)
if R_det < .999 or R_det > 1.001:
raise NumericalException("Bad determinant")
if R_trace < .999 * D or R_trace > 1.001 * D:
raise NumericalException("Bad trace")
if off_diagonal_abs_mean > .001:
raise NumericalException("Bad off-diagonals")
def opq_rotate(X, R): # so other code need not know what to transpose
return np.dot(np.atleast_2d(X), R.T)
def opq_undo_rotate(X, R): # so other code need not know what to transpose
return np.dot(np.atleast_2d(X), R)
# @_memory.cache
def opq_initialize(X_train, ncodebooks, init='gauss'):
X = X_train
_, D = X.shape
if init == 'gauss' or init == 'gauss_flat' or init == 'gauss_shuffle':
permute = (init == 'gauss_shuffle')
R = learn_opq_gaussian_rotation(X_train, ncodebooks, shuffle=permute)
R = R.astype(np.float32)
if init == 'gauss_flat':
# assert R.shape[0] == R.shape[1]
D = R.shape[1]
d = D // ncodebooks
assert d * ncodebooks == D # same # of dims in each subspace
local_r = random_rotation(int(d))
tiled = np.zeros((D, D))
for c in range(ncodebooks):
start = c * d
end = start + d
tiled[start:end, start:end] = local_r
R = np.dot(R, tiled)
X_rotated = opq_rotate(X, R)
elif init == 'identity':
R = np.identity(D, dtype=np.float32) # D x D
X_rotated = X
elif init == 'random':
R = np.random.randn(D, D).astype(np.float32)
R = orthonormalize_rows(R)
X_rotated = opq_rotate(X, R)
else:
raise ValueError("Unrecognized initialization method: ".format(init))
return X_rotated, R
# loosely based on:
# https://github.com/arbabenko/Quantizations/blob/master/opqCoding.py
@_memory.cache
def learn_opq(X_train, ncodebooks, codebook_bits=8, niters=10,
initial_kmeans_iters=1, init='gauss', debug=False):
"""init in {'gauss', 'identity', 'random'}"""
print("OPQ: Using init '{}'".format(init))
t0 = time.time()
X = X_train.astype(np.float32)
N, D = X.shape
ncentroids = int(2**codebook_bits)
subvect_len = D // ncodebooks
assert D % subvect_len == 0 # equal number of dims for each codebook
X_rotated, R = opq_initialize(X_train, ncodebooks=ncodebooks, init=init)
# initialize codebooks by running kmeans on each rotated dim; this way,
# setting niters=0 corresponds to normal PQ
codebooks, assignments = learn_pq(X_rotated, ncentroids=ncentroids,
nsubvects=ncodebooks,
subvect_len=subvect_len,
max_kmeans_iters=1)
for it in np.arange(niters):
# compute reconstruction errors
X_hat = reconstruct_X_pq(assignments, codebooks)
# err = compute_reconstruction_error(X_rotated, X_hat, subvect_len=subvect_len)
err = compute_reconstruction_error(X_rotated, X_hat)
print("---- OPQ {}x{}b iter {}: mse / variance = {:.5f}".format(
ncodebooks, codebook_bits, it, err))
# update rotation matrix based on reconstruction errors
U, s, V = np.linalg.svd(np.dot(X_hat.T, X), full_matrices=False)
R = np.dot(U, V)
# update centroids using new rotation matrix
X_rotated = opq_rotate(X, R)
assignments = _encode_X_pq(X_rotated, codebooks)
codebooks = _update_centroids_opq(X_rotated, assignments, ncentroids)
X_hat = reconstruct_X_pq(assignments, codebooks)
err = compute_reconstruction_error(X_rotated, X_hat)
t = time.time() - t0
print("---- OPQ {}x{}b final mse / variance = {:.5f} ({:.3f}s)".format(
ncodebooks, codebook_bits, err, t))
return codebooks, assignments, R
# ================================================================ Block OPQ
def bopq_rotate(X, rotations):
X = np.atleast_2d(X)
_, D = X.shape
R_sz = len(rotations[0])
nrots = int(D / R_sz)
assert nrots == len(rotations)
rot_starts = R_sz * np.arange(nrots)
rot_ends = rot_starts + R_sz
X_out = np.copy(X)
for i, R in enumerate(rotations):
start, end = rot_starts[i], rot_ends[i]
X_out[:, start:end] = np.dot(X[:, start:end], R.T)
return X_out
@_memory.cache # opq with block diagonal rotations
def learn_bopq(X_train, ncodebooks, codebook_bits=4, niters=20,
initial_kmeans_iters=1, R_sz=16, **sink):
t0 = time.time()
X = X_train.astype(np.float32)
N, D = X.shape
ncentroids = int(2**codebook_bits)
subvect_len = D // ncodebooks
assert D % subvect_len == 0 # equal number of dims for each codebook
# compute number of rotations and subspaces associated with each
nrots = int(D / R_sz)
rot_starts = R_sz * np.arange(nrots)
rot_ends = rot_starts + R_sz
# X_rotated, R = opq_initialize(X_train, ncodebooks=ncodebooks, init=init)
X_rotated = X # hardcode identity init # TODO allow others
rotations = [np.eye(R_sz) for i in range(nrots)]
# initialize codebooks by running kmeans on each rotated dim; this way,
# setting niters=0 corresponds to normal PQ
codebooks, assignments = learn_pq(X_rotated, ncentroids=ncentroids,
nsubvects=ncodebooks,
subvect_len=subvect_len,
max_kmeans_iters=1)
for it in np.arange(niters):
# compute reconstruction errors
X_hat = reconstruct_X_pq(assignments, codebooks)
# err = compute_reconstruction_error(X_rotated, X_hat, subvect_len=subvect_len)
err = compute_reconstruction_error(X_rotated, X_hat)
print("---- BOPQ {} {}x{}b iter {}: mse / variance = {:.5f}".format(
R_sz, ncodebooks, codebook_bits, it, err))
rotations = []
for i in range(nrots):
start, end = rot_starts[i], rot_ends[i]
X_sub = X[:, start:end]
X_hat_sub = X_hat[:, start:end]
# update rotation matrix based on reconstruction errors
U, s, V = np.linalg.svd(np.dot(X_hat_sub.T, X_sub), full_matrices=False)
R = np.dot(U, V)
rotations.append(R)
X_rotated[:, start:end] = np.dot(X_sub, R.T)
# update assignments and codebooks based on new rotations
assignments = _encode_X_pq(X_rotated, codebooks)
codebooks = _update_centroids_opq(X_rotated, assignments, ncentroids)
X_hat = reconstruct_X_pq(assignments, codebooks)
err = compute_reconstruction_error(X_rotated, X_hat)
t = time.time() - t0
print("---- BOPQ {} {}x{}b final mse / variance = {:.5f} ({:.3f}s)".format(
R_sz, ncodebooks, codebook_bits, err, t))
return codebooks, assignments, rotations
|
#!/bin/env/python
import os
import shutil
def ls(dir='.'):
return os.listdir(dir)
def is_hidden(path):
return os.path.basename(path).startswith('.')
def is_visible(path):
return not is_hidden(path)
def join_paths(dir, contents):
return [os.path.join(dir, f) for f in contents]
def files_matching(dir, prefix=None, suffix=None, abs_paths=False,
only_files=False, only_dirs=False, recursive=False,
only_visible=False, only_hidden=False):
files = os.listdir(dir)
if recursive:
abs_dir = dir
paths = join_paths(abs_dir, files)
for path in paths:
if not os.path.isdir(path):
continue
matches = files_matching(
path, prefix=prefix, suffix=suffix,
abs_paths=abs_paths, only_files=only_files,
only_dirs=only_dirs, recursive=True)
matches = join_paths(path, matches)
matches = [os.path.relpath(m, start=dir) for m in matches]
files += matches
if prefix:
files = [f for f in files if f.startswith(prefix)]
if suffix:
files = [f for f in files if f.endswith(suffix)]
if only_files or only_dirs:
paths = join_paths(dir, files)
if only_files:
files = [f for f, p in zip(files, paths) if os.path.isfile(p)]
if only_dirs:
files = [f for f, p in zip(files, paths) if os.path.isdir(p)]
if abs_paths:
files = join_paths(os.path.abspath(dir), files)
if only_visible:
files = [f for f in files if is_visible(f)]
if only_hidden:
files = [f for f in files if is_hidden(f)]
return sorted(files)
def list_subdirs(dir, startswith=None, endswith=None, abs_paths=False,
recursive=False, only_visible=False):
return files_matching(dir, startswith, endswith, abs_paths,
only_dirs=True, recursive=recursive,
only_visible=only_visible)
def list_files(dir, startswith=None, endswith=None, abs_paths=False,
recursive=False, only_visible=False):
return files_matching(dir, startswith, endswith, abs_paths,
only_files=True, recursive=recursive,
only_visible=only_visible)
def remove(path):
if os.path.exists(path):
try:
os.remove(path)
except (OSError):
shutil.rmtree(path)
def force_create_dir(dir):
if os.path.exists(dir):
remove(dir)
os.makedirs(dir)
def ensure_dir_exists(dir_or_file):
if '.' in os.path.basename(dir_or_file): # this looks like a file
dirname = os.path.dirname(dir_or_file)
else:
dirname = dir_or_file
if not os.path.exists(dirname):
os.makedirs(dirname)
def basename(f, noext=False):
name = os.path.basename(f)
if noext:
name = name.split('.')[0]
return name
|
#!/bin/env python
from __future__ import absolute_import, division, print_function
import numpy as np
import os
import PIL
from PIL import Image
from PIL import ImageOps # can't just do PIL.ImageOps for some reason
from . import files
# ================================ TODO rm duplicate code from imagenet.py
# adapted from https://github.com/keras-team/keras-preprocessing/blob/master/
# keras_preprocessing/image/utils.py under MIT license
def img_to_array(img, layout='nhwc', dtype='float32', mode='RGB'):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
layout: Image data format, either "nchw" or "nhwc".
dtype: Dtype to use for the returned array.
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `layout` is passed.
"""
# print("img info:", img.format, img.size, img.mode)
# if img.mode == 'L':
if img.mode != mode:
img = img.convert(mode=mode)
if layout not in ('nchw', 'nhwc'):
raise ValueError('Unknown layout: %s' % layout)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=dtype)
if len(x.shape) == 3:
if layout == 'nchw':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
# print("x is only rank 2...WTF!?")
if layout == 'nchw':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: %s' % (x.shape,))
return x
def resize_img(img, ratio_or_size):
if ratio_or_size is None or np.max(ratio_or_size) < 0:
return img
try:
nrows = ratio_or_size[0]
ncols = ratio_or_size[1]
nrows = img.height if nrows < 0 else nrows
ncols = img.width if ncols < 0 else ncols
except AttributeError:
nrows = img.height * ratio_or_size
ncols = img.width * ratio_or_size
new_size = (nrows, ncols)
is_downsampling = (nrows < img.height) or (ncols < img.width)
interp = PIL.Image.LANCZOS if is_downsampling else PIL.Image.BICUBIC
return img.resize(new_size, resample=interp)
def crop_img(img, crop_how=None, new_size=(224, 224), resize_shorter_to=256):
if crop_how is None:
return img
assert crop_how in ('center', 'square')
height, width = img.height, img.width
if (height == width) and (new_size is None):
return img
if crop_how == 'center':
return center_crop(img, new_size=new_size,
resize_shorter_to=resize_shorter_to)
if new_size is None:
new_width = min(width, height)
new_height = new_width
else:
new_height, new_width = new_size
assert new_width <= width
assert new_height <= height
left = (width - new_width) // 2
top = (height - new_height) // 2
# right = (width + new_width) // 2
# bottom = (height + new_height) // 2
right = left + new_width
bottom = top + new_height
return img.crop((left, top, right, bottom))
def center_crop(img, new_size=(224, 224), resize_shorter_to=256):
height, width = img.height, img.width
minsize = min(height, width)
new_height = (height * resize_shorter_to) // minsize
new_width = (width * resize_shorter_to) // minsize
img = img.resize(
(new_width, new_height), resample=Image.BICUBIC)
assert min(new_width, new_height) == resize_shorter_to
return crop_img(img, crop_how='square', new_size=new_size)
def pad_img(img, pad_how='square', fill_value=0):
if pad_how is None:
return img
assert pad_how == 'square' # no other kinds of cropping supported
height, width = img.height, img.width
if height == width:
return img
new_size = max(height, width)
delta_w = new_size - width
pad_left = delta_w // 2
pad_right = delta_w - pad_left
delta_h = new_size - height
pad_top = delta_h // 2
pad_bottom = delta_h - pad_top
padding = pad_left, pad_top, pad_right, pad_bottom
return ImageOps.expand(img, border=padding, fill=fill_value)
def load_jpg(path, layout='nhwc', dtype=None, resample=None,
crop=None, pad=None):
img = PIL.Image.open(path)
img = pad_img(img, pad)
img = crop_img(img, crop)
img = resize_img(img, ratio_or_size=resample)
return img_to_array(img, layout=layout, dtype=dtype)
# assumes one subdir for each class, with class name equal to subdir name
# @_memory.cache
def load_jpegs_from_dir(dirpath, remove_classes=None, require_suffix=None,
layout='nhwc', dtype=None, resample=(224, 224),
crop='center', pad=None, verbose=1,
limit_per_class=None, only_return_path=False):
subdirs = sorted(files.list_subdirs(dirpath, only_visible=True))
if remove_classes is not None:
if isinstance(remove_classes, str):
remove_classes = [remove_classes]
for classname in remove_classes:
subdirs.remove(classname)
if verbose > 0:
print("found {} classes in directory: {}".format(len(subdirs), dirpath))
classname_to_label = {name: i for i, name in enumerate(subdirs)} # noqa
label_to_classname = {i: name for name, i in classname_to_label.items()}
all_imgs = []
all_labels = []
for subdir in subdirs:
subdir_path = os.path.join(dirpath, subdir)
img_paths = files.list_files(
subdir_path, endswith=require_suffix, abs_paths=True,
only_visible=True)
if limit_per_class is not None and limit_per_class > 0:
img_paths = img_paths[:limit_per_class]
if verbose > 1:
print("loading {:4d} images for class '{}'".format(
len(img_paths), subdir))
# not certain += [...] version was working
label = classname_to_label[subdir]
for i in range(len(img_paths)):
all_labels.append(label)
# all_labels += [] * len(img_paths)
if only_return_path:
imgs = img_paths
else:
imgs = [load_jpg(f, layout=layout, dtype=dtype, resample=resample,
crop=crop, pad=pad)[np.newaxis, :, :, :]
for f in img_paths]
all_imgs += imgs
if only_return_path:
X = all_imgs
else:
try:
# this works iff resampled/padded/cropped to same size
X = np.concatenate(all_imgs, axis=0)
except ValueError:
# otherwise strip batch dim (so each image is 3D)
X = [img.reshape(img.shape[1:]) for img in all_imgs]
y = np.array(all_labels, dtype=np.int32)
return (X, y), label_to_classname
|
#!/bin/env python
from __future__ import print_function
import numpy as np
import os
import warnings
import h5py
from sklearn.datasets import load_digits
import keras
from keras.preprocessing import image
# from python import imagenet, svhn, caltech
# from python.datasets import caltech
from . import imagenet
from . import svhn
from .data_utils import stratified_split_train_test
from joblib import Memory
_memory = Memory('.', verbose=1)
# DATA_DIR = os.path.expanduser('~/Desktop/datasets/nn-search')
DATA_DIR = os.path.expanduser('data')
join = os.path.join
DEFAULT_AUG_KWARGS = {
'shear_range': 0.2,
'zoom_range': 0.2,
'horizontal_flip': True
}
class LabeledDataset(object):
__slots__ = 'name X_train y_train X_test y_test _collection'.split()
def __init__(self, name, X_train, y_train, X_test=None, y_test=None):
self.name = name
self.X_train = X_train
self.y_train = y_train
self.X_test = X_test
self.y_test = y_test
def generators(self, batch_size, augment=True,
preprocessing_function=None, aug_kwargs=None):
_aug_kwargs = DEFAULT_AUG_KWARGS
if aug_kwargs is not None:
_aug_kwargs.update(aug_kwargs)
if not augment:
_aug_kwargs = {}
nclasses = len(np.unique(self.y_train))
y_train = keras.utils.to_categorical(self.y_train, num_classes=nclasses)
y_test = keras.utils.to_categorical(self.y_test, num_classes=nclasses)
train_datagen = image.ImageDataGenerator(
preprocessing_function=preprocessing_function, **_aug_kwargs)
train_generator = train_datagen.flow(
self.X_train, y_train, batch_size=batch_size)
test_datagen = image.ImageDataGenerator(
preprocessing_function=preprocessing_function)
test_generator = test_datagen.flow(
self.X_test, y_test, batch_size=batch_size)
return train_generator, test_generator
class HugeLabeledDataset(object):
def __init__(self, name, train_dir, test_dir,
train_nsamples=None, test_nsamples=None):
self.name = name
# self.train_dir = os.path.abspath(train_dir)
# self.test_dir = os.path.abspath(test_dir)
self.train_dir = train_dir
self.test_dir = test_dir
self.train_nsamples = int(train_nsamples or -1)
self.test_nsamples = int(test_nsamples or -1)
def generators(self, batch_size=None, augment=True,
preprocessing_function=None, aug_kwargs=None,
train_batch_size=None, test_batch_size=None,
**flow_kwargs):
_aug_kwargs = DEFAULT_AUG_KWARGS
if aug_kwargs is not None:
_aug_kwargs.update(aug_kwargs)
if not augment:
_aug_kwargs = {}
flow_kwargs = flow_kwargs or {}
flow_kwargs.setdefault('target_size', (224, 224))
flow_kwargs.setdefault('class_mode', 'categorical')
train_generator = None
test_generator = None
if self.train_dir:
train_batch_size = int(train_batch_size or batch_size)
flow_kwargs['batch_size'] = train_batch_size
print("HugeLabeledDataset: creating flow from train dir: ",
self.train_dir)
train_datagen = image.ImageDataGenerator(
preprocessing_function=preprocessing_function, **_aug_kwargs)
train_generator = train_datagen.flow_from_directory(
self.train_dir, **flow_kwargs)
if self.test_dir:
test_batch_size = int(test_batch_size or batch_size)
flow_kwargs['batch_size'] = test_batch_size
print("HugeLabeledDataset: creating flow from test dir: ",
self.test_dir)
test_datagen = image.ImageDataGenerator(
preprocessing_function=preprocessing_function)
test_generator = test_datagen.flow_from_directory(
self.test_dir, **flow_kwargs)
return train_generator, test_generator
class Random:
UNIFORM = 'uniform'
GAUSS = 'gauss'
WALK = 'walk'
BLOBS = 'blobs'
DIGITS = 'Digits'
MNIST = 'MNIST'
FASHION_MNIST = 'FashionMNIST'
CIFAR10 = 'Cifar10'
CIFAR100 = 'Cifar100'
SVHN = 'SVHN'
CALTECH101 = 'Caltech101'
CALTECH256 = 'Caltech256'
CUB200 = 'CUB200'
FLOWERS102 = 'Flowers102'
INDOOR67 = 'Indoor67'
IMAGENET_TINY = 'TinyImagenet' # 64x64, 200? classes
IMAGENET_10_CLASSES = 'ImageNet-10-Classes' # full res, 10cls, 1k/cls
IMAGENET_100_CLASSES = 'ImageNet-100-Classes' # full res, 100cls, 1k/cls
IMAGENET_1_EXAMPLE = 'ImageNet-1-Example' # full res, 1k cls, 1/cls
IMAGENET_10_EXAMPLES = 'ImageNet-10-Examples' # full res, 1k cls, 10/cls
IMAGENET_25_EXAMPLES = 'ImageNet-25-Examples' # full res, 1k cls, 25/cls
IMAGENET_50_EXAMPLES = 'ImageNet-50-Examples' # full res, 1k cls, 50/cls
IMAGENET_100_EXAMPLES = 'ImageNet-100-Examples' # full res, 1k cls, 100/cls
IMAGENET_64PX = 'ImageNet64' # 64x64, all examples
IMAGENET = 'ImageNet'
IMAGENET_ONE_OF_EACH = 'ImagenetOneOfEach'
MINIPLACES = 'Miniplaces'
ALL_IMAGENET_DATASETS = [
IMAGENET, IMAGENET_64PX, IMAGENET_TINY, IMAGENET_ONE_OF_EACH,
IMAGENET_10_CLASSES, IMAGENET_100_CLASSES,
IMAGENET_1_EXAMPLE, IMAGENET_10_EXAMPLES, IMAGENET_100_EXAMPLES]
ALL_KERAS_DATASETS = [MNIST, CIFAR10, CIFAR100, FASHION_MNIST]
def _load_file(fname, *args, **kwargs):
fname = os.path.join(DATA_DIR, fname)
print("trying to load file at path: {}".format(fname))
if fname.split('.')[-1] == 'txt':
return np.loadtxt(fname, *args, **kwargs)
return np.load(fname, *args, **kwargs)
def _load_digits_X_y(ntrain=1000):
X, y = load_digits(return_X_y=True)
X_train, X_test = X[:ntrain], X[ntrain:]
y_train, y_test = y[:ntrain], y[ntrain:]
return LabeledDataset('Digits', X_train, y_train, X_test, y_test)
# return X[:-nqueries], X[-nqueries:] # X, Q
def _load_keras_dset(which_dataset):
from keras import datasets as kd
dataClass = {CIFAR10: kd.cifar10,
CIFAR100: kd.cifar100,
MNIST: kd.mnist,
FASHION_MNIST: kd.fashion_mnist}[which_dataset]
(X_train, y_train), (X_test, y_test) = dataClass.load_data()
pretty_name = str(which_dataset).split('.')[-1].split("'")[0]
return LabeledDataset(pretty_name, X_train, y_train, X_test, y_test)
def load_imagenet_64(limit_ntrain=-1):
# if we're not going to use the whole training set, don't even load in all
# the files it's split into (necessary unless you have >18GB of free RAM)
which_file_idxs = None
if limit_ntrain > 0:
nchunks = int(np.ceil(
limit_ntrain / imagenet.IMAGENET_64_TRAIN_CHUNK_NSAMPLES))
which_file_idxs = np.arange(1, nchunks + 1)
X_train, y_train = imagenet.load_train_data_64x64(
which_file_idxs=which_file_idxs)
X_test, y_test = imagenet.load_test_data_64x64()
return LabeledDataset(IMAGENET_64PX, X_train, y_train, X_test, y_test,
train_nsamples=1e6)
def load_imagenet_tiny():
X_train, y_train = imagenet.load_train_data_tiny()
X_test, y_test = imagenet.load_test_data_tiny()
return LabeledDataset(IMAGENET_TINY, X_train, y_train, X_test, y_test)
def load_imagenet_one_of_each():
X, y = imagenet.load_data_one_of_each()
return LabeledDataset(IMAGENET_ONE_OF_EACH, X, y, X, y, train_nsamples=1e3)
def load_imagenet(load_train=True, load_val=True):
train_path = imagenet.IMAGENET_TRAIN_PATH if load_train else None
test_path = imagenet.IMAGENET_TEST_PATH if load_val else None
return HugeLabeledDataset(
IMAGENET, train_path, test_path,
train_nsamples=1281167, test_nsamples=50e3)
def load_imagenet_10_classes(load_train=True, load_val=True):
train_path = imagenet.IMAGENET_10_CLASSES_TRAIN_PATH if load_train else None
test_path = imagenet.IMAGENET_10_CLASSES_TEST_PATH if load_val else None
return HugeLabeledDataset(
IMAGENET_10_CLASSES, train_path, test_path,
train_nsamples=13000, test_nsamples=500)
def load_imagenet_100_classes(load_train=True, load_val=True):
train_path = imagenet.IMAGENET_100_CLASSES_TRAIN_PATH \
if load_train else None
test_path = imagenet.IMAGENET_100_CLASSES_TEST_PATH if load_val else None
return HugeLabeledDataset(IMAGENET_100_CLASSES, train_path, test_path,
train_nsamples=129395, test_nsamples=5000)
def load_imagenet_1_example(load_train=True, load_val=True):
train_path = imagenet.IMAGENET_1_EXAMPLE_TRAIN_PATH \
if load_train else None
test_path = imagenet.IMAGENET_TEST_PATH if load_val else None
return HugeLabeledDataset(IMAGENET_10_EXAMPLES, train_path, test_path,
train_nsamples=1e3, test_nsamples=50e3)
def load_imagenet_10_examples(load_train=True, load_val=True):
train_path = imagenet.IMAGENET_10_EXAMPLES_TRAIN_PATH \
if load_train else None
test_path = imagenet.IMAGENET_TEST_PATH if load_val else None
return HugeLabeledDataset(IMAGENET_10_EXAMPLES, train_path, test_path,
train_nsamples=10e3, test_nsamples=50e3)
def load_imagenet_25_examples(load_train=True, load_val=True):
train_path = imagenet.IMAGENET_25_EXAMPLES_TRAIN_PATH \
if load_train else None
test_path = imagenet.IMAGENET_TEST_PATH if load_val else None
return HugeLabeledDataset(IMAGENET_25_EXAMPLES, train_path, test_path,
train_nsamples=25e3, test_nsamples=50e3)
def load_imagenet_50_examples(load_train=True, load_val=True):
train_path = imagenet.IMAGENET_50_EXAMPLES_TRAIN_PATH \
if load_train else None
test_path = imagenet.IMAGENET_TEST_PATH if load_val else None
return HugeLabeledDataset(IMAGENET_50_EXAMPLES, train_path, test_path,
train_nsamples=50e3, test_nsamples=50e3)
def load_imagenet_100_examples(load_train=True, load_val=True):
train_path = imagenet.IMAGENET_100_EXAMPLES_TRAIN_PATH \
if load_train else None
test_path = imagenet.IMAGENET_TEST_PATH if load_val else None
return HugeLabeledDataset(IMAGENET_10_EXAMPLES, train_path, test_path,
train_nsamples=100e3, test_nsamples=50e3)
def _load_miniplaces():
path = '/data/ddmg/neuro/datasets/Miniplaces/miniplaces.h5'
with h5py.File(path, 'r') as hf:
X_train = hf['X_train'][()]
Y_train = hf['Y_train'][()]
X_val = hf['X_val'][()]
Y_val = hf['Y_val'][()]
return LabeledDataset(MINIPLACES, X_train, Y_train, X_val, Y_val)
def _load_svhn():
(X_train, y_train), (X_test, y_test) = svhn.load_data()
return LabeledDataset(SVHN, X_train, y_train, X_test, y_test)
def load_caltech101():
data_dir = '../datasets/caltech/101_ObjectCategories'
return HugeLabeledDataset(CALTECH101, data_dir, None)
# (X, y), _ = caltech.load_caltech101()
# return LabeledDataset(IMAGENET_ONE_OF_EACH, X, y, X, y)
def load_caltech256():
data_dir = '../datasets/caltech/256_ObjectCategories'
return HugeLabeledDataset(CALTECH256, data_dir, None)
# (X, y), _ = caltech.load_caltech256()
# return LabeledDataset(IMAGENET_ONE_OF_EACH, X, y, X, y)
def load_flowers102():
data_dir = '../datasets/flowers102'
train_dir = os.path.join(data_dir, 'train')
test_dir = os.path.join(data_dir, 'test')
return HugeLabeledDataset(FLOWERS102, train_dir, test_dir,
train_nsamples=1020, test_nsamples=6149)
def load_cub200(): # note that this is 2011 version of CUB200
data_dir = '../datasets/cub200'
train_dir = os.path.join(data_dir, 'train')
test_dir = os.path.join(data_dir, 'test')
return HugeLabeledDataset(CUB200, train_dir, test_dir,
train_nsamples=5994, test_nsamples=5794)
def load_indoor67(): # this is the subset with predefined train vs test split
data_dir = '../datasets/indoor67'
train_dir = os.path.join(data_dir, 'train')
test_dir = os.path.join(data_dir, 'test')
return HugeLabeledDataset(INDOOR67, train_dir, test_dir,
train_nsamples=(67 * 80), test_nsamples=(67 * 20))
# @_memory.cache
def load_dataset(which_dataset, norm_mean=False, norm_len=False,
flatten=False, Ntrain=-1, Ntest=-1, ensure_channels=False,
test_frac=None, scale_to_0_1=False):
if which_dataset == DIGITS:
dset = _load_digits_X_y()
elif which_dataset in ALL_KERAS_DATASETS:
dset = _load_keras_dset(which_dataset)
elif which_dataset == IMAGENET_64PX:
dset = load_imagenet_64(limit_ntrain=Ntrain)
elif which_dataset == IMAGENET_TINY:
dset = load_imagenet_tiny()
elif which_dataset == IMAGENET_ONE_OF_EACH:
dset = load_imagenet_one_of_each()
elif which_dataset == MINIPLACES:
dset = _load_miniplaces()
elif which_dataset == SVHN:
dset = _load_svhn()
elif which_dataset == CALTECH101:
return load_caltech101()
elif which_dataset == CALTECH256:
return load_caltech256()
elif which_dataset == CUB200:
return load_cub200()
elif which_dataset == FLOWERS102:
return load_flowers102()
elif which_dataset == IMAGENET:
return load_imagenet()
elif which_dataset == IMAGENET_10_CLASSES:
return load_imagenet_10_classes()
elif which_dataset == IMAGENET_100_CLASSES:
return load_imagenet_100_classes()
elif which_dataset == IMAGENET_1_EXAMPLE:
return load_imagenet_1_example()
elif which_dataset == IMAGENET_10_EXAMPLES:
return load_imagenet_10_examples()
elif which_dataset == IMAGENET_25_EXAMPLES:
return load_imagenet_25_examples()
elif which_dataset == IMAGENET_50_EXAMPLES:
return load_imagenet_50_examples()
elif which_dataset == IMAGENET_100_EXAMPLES:
return load_imagenet_100_examples()
else:
raise ValueError("unrecognized dataset {}".format(which_dataset))
if isinstance(dset, HugeLabeledDataset):
# only has flow_from_directory() generators; no postprocessing
# possible, so go ahead and return immediately
return dset
train_is_test = (dset.X_train.base is dset.X_test) or \
(dset.X_test.base is dset.X_train)
train_test_equal = np.array_equal(dset.X_train[:10], dset.X_test[:10])
train_test_same = train_is_test or train_test_equal
if train_test_same:
if test_frac is None:
warnings.warn("WARNING: Training data is also the test data! "
"Reversing order of test data. Consider passing "
"test_frac > 0 to automatically perform a "
"stratified train-test split.")
dset.X_test = dset.X_test[::-1]
else:
X_train, X_test, y_train, y_test = stratified_split_train_test(
dset.X_train, dset.y_train, train_frac=(1. - test_frac))
dset = LabeledDataset(dset.name, X_train, y_train, X_test, y_test)
train_is_test = False
train_test_equal = False
train_test_same = False
if train_is_test:
dset.X_test = np.copy(dset.X_test)
dset.y_test = np.copy(dset.y_test)
train_is_test = False
if flatten:
dset.X_train = dset.X_train.reshape(dset.X_train.shape[0], -1)
dset.X_test = dset.X_test.reshape(dset.X_test.shape[0], -1)
dset.X_train = dset.X_train.astype(np.float32)
dset.X_test = dset.X_test.astype(np.float32)
X_train = dset.X_train
X_test = dset.X_test
if Ntrain > 0:
dset.X_train = X_train[:Ntrain]
dset.y_train = dset.y_train[:Ntrain]
if Ntest > 0:
dset.X_test = np.copy(X_test[:Ntest])
dset.y_test = np.copy(dset.y_test[:Ntest])
if scale_to_0_1:
min_X = min(np.min(dset.X_train), np.min(dset.X_test))
max_X = max(np.max(dset.X_train), np.max(dset.X_test))
dset.X_train = (dset.X_train - min_X) / max_X
# if not train_is_test:
dset.X_test = (dset.X_test - min_X) / max_X
if norm_mean:
means = np.mean(dset.X_train, axis=0)
dset.X_train -= means
# if not train_is_test: # don't subtract means twice from same array
dset.X_test -= means
if norm_len:
dset.X_train /= np.linalg.norm(dset.X_train, axis=1, keepdims=True)
# if not train_is_test: # don't divide by norms twice on same array
dset.X_test /= np.linalg.norm(dset.X_test, axis=1, keepdims=True)
if ensure_channels:
import keras.backend as K # don't import keras unless we need it
if len(X_train.shape) == 3: # no channels; e.g., MNIST
img_rows, img_cols = X_train.shape[-2], X_train.shape[-1]
# K.set_image_data_format('channels_last') # for argmax layer
if K.image_data_format() == 'channels_first':
dset.X_train = dset.X_train.reshape(
X_train.shape[0], 1, img_rows, img_cols)
dset.X_test = dset.X_test.reshape(
X_test.shape[0], 1, img_rows, img_cols)
else:
dset.X_train = dset.X_train.reshape(
X_train.shape[0], img_rows, img_cols, 1)
dset.X_test = dset.X_test.reshape(
X_test.shape[0], img_rows, img_cols, 1)
return dset
# if D_multiple_of > 1:
# X_train = ensure_num_cols_multiple_of(X_train, D_multiple_of)
# X_test = ensure_num_cols_multiple_of(X_test, D_multiple_of)
# Q = ensure_num_cols_multiple_of(Q, D_multiple_of)
# return X_train, Q, X_test, true_nn
|
#!/bin/env python
from __future__ import division, print_function
import numpy as np
# import pyedflib as edf # pip install pyedflib
# import mne
from . import paths
from . import files
ECG_DIR = paths.UCD_ECG
NUM_RECORDINGS = 25
def main():
pass
print("ecg dir: ", ECG_DIR)
fpaths = files.list_files(ECG_DIR, abs_paths=True)
# fpaths = files.list_files(ECG_DIR)
assert len(fpaths) == NUM_RECORDINGS
# print("fpaths: ", "\n".join(fpaths))
# print("number of fpaths: ", len(fpaths))
for path in fpaths:
print("------------------------ ", path)
# f = edf.EdfReader(path)
# print(f.signals_in_file)
magical_start_offset = 1025 # from looking at raw binary
# raw = bytes(open(path, 'rb').read())[magical_start_offset:]
with open(path, 'rb') as f:
raw = f.read()
# raw = open(path, 'rb').read()
print("length of raw: ", len(raw))
print("type(raw)", type(raw))
a = np.frombuffer(raw, offset=magical_start_offset, dtype=np.uint16)
# a = np.frombuffer(raw, dtype=np.uint16)
print(len(a))
print(len(a) / 3)
# print("number of bytes: ", len(raw))
# with open(path, 'rb') as f:
# # f.seek(magical_start_offset)
# f.read(magical_start_offset)
# a = np.fromfile(f, dtype=np.int16)
# print(len(a))
# print(len(a) / 3)
if __name__ == '__main__':
main()
|
#!/usr/env/python
import os
DATASETS_DIR = os.path.expanduser("~/Desktop/datasets/")
def to_path(*args):
return os.path.join(DATASETS_DIR, *args)
# straightforward datasets
MSRC_12 = to_path('MSRC-12', 'origData')
UCR = to_path('ucr/UCRArchive_2018')
UCR_INFO = to_path('ucr/DataSummary.csv')
UWAVE = to_path('uWave', 'extracted')
PAMAP = to_path('PAMAP_Dataset')
PAMAP2 = to_path('PAMAP2_Dataset')
WARD = to_path('WARD1.0')
UCI_GAS = to_path('uci-gas-sensor')
# ampds2
AMPD2_POWER = to_path('ampds2', 'electric')
AMPD2_GAS = to_path('ampds2', 'gas')
AMPD2_WEATHER = to_path('ampds2', 'weather')
AMPD2_WATER = to_path('ampds2', 'water')
# caltech-{101,256}
CALTECH_101 = to_path('caltech', '101_ObjectCategories')
CALTECH_256 = to_path('caltech', '256_ObjectCategories')
# ECG data
SHAREE_ECG = to_path('sharee-ecg-database')
INCART_ECG = to_path('incart-12-lead-ecg')
|
#!/bin/env python
import os
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from joblib import Memory
_memory = Memory('.', verbose=1)
DATA_DIR = os.path.expanduser('~/Desktop/datasets/nn-search')
join = os.path.join
class Random:
UNIFORM = 'uniform'
GAUSS = 'gauss'
WALK = 'walk'
BLOBS = 'blobs'
class Gist:
DIR = join(DATA_DIR, 'gist')
TRAIN = join(DIR, 'gist_train.npy') # noqa
TEST = join(DIR, 'gist.npy') # noqa
TEST_100 = join(DIR, 'gist_100k.npy') # noqa
TEST_200 = join(DIR, 'gist_200k.npy') # noqa
QUERIES = join(DIR, 'gist_queries.npy') # noqa
TRUTH = join(DIR, 'gist_truth.npy') # noqa
class Sift1M:
DIR = join(DATA_DIR, 'sift1m')
TRAIN = join(DIR, 'sift_learn.npy') # noqa
TEST = join(DIR, 'sift_base.npy') # noqa
TEST_100 = join(DIR, 'sift_100k.txt') # noqa
TEST_200 = join(DIR, 'sift_200k.txt') # noqa
QUERIES = join(DIR, 'sift_queries.npy') # noqa
TRUTH = join(DIR, 'sift_groundtruth.npy') # noqa
class Sift10M:
DIR = join(DATA_DIR, 'sift1b')
# TRAIN = join(DIR, 'big_ann_learn_10M.npy') # noqa
TRAIN = join(DIR, 'big_ann_learn_1M.npy') # noqa # TODO use 10M?
TRAIN_1M = join(DIR, 'big_ann_learn_1M.npy') # noqa
TEST = join(DIR, 'sift_10M.npy') # noqa
QUERIES = join(DIR, 'sift_queries.npy') # noqa
TRUTH = join(DIR, 'true_nn_idxs_10M.npy') # noqa
class Deep1M:
"""256D PCA of convnet activations; see OTQ paper supporting
webiste, http://sites.skoltech.ru/compvision/projects/aqtq/"""
DIR = join(DATA_DIR, 'deep1m') # noqa
TRAIN = join(DIR, 'deep1M_learn.npy') # noqa
TEST = join(DIR, 'deep1M_base.npy') # noqa
TEST_100 = join(DIR, 'deep1M_test_100k.npy') # noqa
QUERIES = join(DIR, 'deep1M_queries.npy') # noqa
TRUTH_TRAIN = join(DIR, 'deep1M_truth_train.npy') # noqa
TRUTH = join(DIR, 'deep1M_groundtruth.npy') # noqa
class Convnet1M:
DIR = join(DATA_DIR, 'convnet1m') # noqa
TRAIN = join(DIR, 'convnet_train.npy') # noqa
TEST = join(DIR, 'convnet_test.npy') # noqa
TEST_100 = join(DIR, 'convnet_test_100k.npy') # noqa
QUERIES = join(DIR, 'convnet_queries.npy') # noqa
TRUTH_TRAIN = join(DIR, 'truth_train.npy') # noqa
TRUTH = join(DIR, 'truth_test.npy') # noqa
class Mnist:
# following other papers (eg, "revisiting additive quantization"),
# use mnist test set as queries and training set as database
DIR = join(DATA_DIR, 'mnist') # noqa
TEST = join(DIR, 'X_train.npy') # noqa
QUERIES = join(DIR, 'X_test.npy') # noqa
TRUTH = join(DIR, 'truth_Q=test_X=train.npy') # noqa
class LabelMe:
DIR = join(DATA_DIR, 'labelme') # noqa
TRAIN = join(DIR, 'labelme_train.npy') # noqa
TEST = join(DIR, 'labelme_train.npy') # noqa
QUERIES = join(DIR, 'labelme_test.npy') # noqa
TRUTH = join(DIR, 'labelme_truth.npy') # noqa
class Glove:
DIR = join(DATA_DIR, 'glove') # noqa
TEST = join(DIR, 'glove_test.npy') # noqa
TEST_100 = join(DIR, 'glove_100k.txt') # noqa
TEST_200 = join(DIR, 'glove_200k.txt') # noqa
QUERIES = join(DIR, 'glove_queries.npy') # noqa
TRUTH = join(DIR, 'glove_truth.npy') # noqa
# note that we've only run the real experiments on the ones reported
# in the paper (i.e., no cherrypicking)
ALL_REAL_DATASETS = [
Gist, Sift1M, Sift10M, Deep1M, Convnet1M, Mnist, LabelMe, Glove]
def load_file(fname, *args, **kwargs):
if fname.split('.')[-1] == 'txt':
return np.loadtxt(fname, *args, **kwargs)
return np.load(fname, *args, **kwargs)
def extract_random_rows(X, how_many, remove_from_X=True):
split_start = np.random.randint(len(X) - how_many - 1)
split_end = split_start + how_many
rows = np.copy(X[split_start:split_end])
if remove_from_X:
return np.vstack((X[:split_start], X[split_end:])), rows
return X, rows
def _load_complete_dataset(which_dataset, num_queries=10):
X_test = np.load(which_dataset.TEST)
try:
X_train = np.load(which_dataset.TRAIN)
print("using separate test set!")
except AttributeError:
print("No training set found for dataset {}".format(str(which_dataset)))
X_train = np.copy(X_test)
try:
Q = np.load(which_dataset.QUERIES)
except AttributeError:
assert num_queries > 1
X_train, Q = extract_random_rows(X_train, how_many=num_queries)
try:
true_nn = np.load(which_dataset.TRUTH).astype(np.int)
except AttributeError:
true_nn = None
return X_train, Q, X_test, true_nn
def _ground_truth_for_dataset(which_dataset):
return None # TODO
# XXX: not clear whether this function is correct in general, but works for
# 784D with the nzeros we get for 32 and 64 codebooks
def _insert_zeros(X, nzeros):
N, D = X.shape
D_new = D + nzeros
X_new = np.zeros((N, D_new), dtype=X.dtype)
step = int(D / (nzeros + 1)) - 1
for i in range(nzeros):
in_start = step * i
in_end = in_start + step
# out_start = in_start + i + 1
out_start = (step + 1) * i
out_end = out_start + step
X_new[:, out_start:out_end] = X[:, in_start:in_end]
# out_start = out_end
# out_end += step
out_end += 1 # account for the last 0
remaining_len = D - in_end
out_remaining_len = D_new - out_end
# print "step", step
# print "in_start, in_end", in_start, in_end
# print "out_start, out_end", out_start, out_end
# print "D, D_new", D, D_new
# print "remaining_len, out_remaining_len", remaining_len, out_remaining_len
assert remaining_len == out_remaining_len
assert remaining_len >= 0
if remaining_len:
# X_new[:, out_end:out_end+remaining_len] = X[:, in_end:D]
X_new[:, out_end:] = X[:, in_end:]
assert np.array_equal(X[:, 0], X_new[:, 0])
assert np.array_equal(X[:, -1], X_new[:, -1])
return X_new
def ensure_num_cols_multiple_of(X, multiple_of):
remainder = X.shape[1] % multiple_of
if remainder > 0:
return _insert_zeros(X, multiple_of - remainder)
return X
# @_memory.cache # uncomment to get same randomness each time
def load_dataset(which_dataset, N=-1, D=-1, norm_mean=False, norm_len=False,
num_queries=10, Ntrain=-1, D_multiple_of=-1):
true_nn = None
# randomly generated datasets
if which_dataset == Random.UNIFORM:
X_test = np.random.rand(N, D)
X_train = np.random.rand(Ntrain, D) if Ntrain > 0 else X_test
Q = np.random.rand(num_queries, D)
elif which_dataset == Random.GAUSS:
X_test = np.random.randn(N, D)
X_train = np.random.randn(Ntrain, D) if Ntrain > 0 else X_test
Q = np.random.randn(num_queries, D)
elif which_dataset == Random.WALK:
X_test = np.random.randn(N, D)
X_test = np.cumsum(X_test, axis=1)
X_train = np.copy(X_test)
if Ntrain > 0:
X_train = np.random.randn(Ntrain, D)
X_train = np.cumsum(X_train)
Q = np.random.randn(num_queries, D)
Q = np.cumsum(Q, axis=-1)
elif which_dataset == Random.BLOBS:
# centers is D x D, and centers[i, j] = (i + j)
centers = np.arange(D)
centers = np.sum(np.meshgrid(centers, centers), axis=0)
X_test, _ = make_blobs(n_samples=N, centers=centers)
X_train = np.copy(X_test)
if Ntrain > 0:
X_train, _ = make_blobs(n_samples=Ntrain, centers=centers)
Q, true_nn = make_blobs(n_samples=num_queries, centers=centers)
# datasets that are just one block of a "real" dataset
elif isinstance(which_dataset, str):
# assert False # TODO rm after real experiments
X_test = load_file(which_dataset)
X_test, Q = extract_random_rows(X_test, how_many=num_queries)
X_train = np.copy(X_test)
true_nn = _ground_truth_for_dataset(which_dataset)
# "real" datasets with predefined train, test, queries, truth
elif which_dataset in ALL_REAL_DATASETS:
X_train, Q, X_test, true_nn = _load_complete_dataset(
which_dataset, num_queries=num_queries)
else:
raise ValueError("unrecognized dataset {}".format(which_dataset))
N = X_test.shape[0] if N < 1 else N
D = X_test.shape[1] if D < 1 else D
X_test, X_train = np.copy(X_test)[:N, :D], X_train[:N, :D]
Q = Q[:, :D] if len(Q.shape) > 1 else Q[:D]
train_is_test = X_train.base is X_test or X_test.base is X_train
train_test_equal = np.array_equal(X_train[:100], X_test[:100])
train_test_same = train_is_test or train_test_equal
if train_test_same:
print("WARNING: Training data is also the test data!")
if train_is_test:
X_test = np.copy(X_test)
if norm_mean:
means = np.mean(X_train, axis=0)
X_train -= means
X_test -= means
Q -= means
if norm_len:
X_test /= np.linalg.norm(X_test, axis=1, keepdims=True)
X_train /= np.linalg.norm(X_train, axis=1, keepdims=True)
Q /= np.linalg.norm(Q, axis=-1, keepdims=True)
# np.set_printoptions(precision=6)
# print "start of Q:", Q[:5, :5]
# print "start of X_test:", X_test[:5, :5]
# TODO don't convert datasets that are originally uint8s to floats
X_train = X_train.astype(np.float32)
X_test = X_test.astype(np.float32)
# Q = np.squeeze(Q.astype(np.float32))
Q = Q.astype(np.float32)
if D_multiple_of > 1:
X_train = ensure_num_cols_multiple_of(X_train, D_multiple_of)
X_test = ensure_num_cols_multiple_of(X_test, D_multiple_of)
Q = ensure_num_cols_multiple_of(Q, D_multiple_of)
return X_train, Q, X_test, true_nn
def read_yael_vecs(path, c_contiguous=True, limit_rows=-1, dtype=None):
dim = np.fromfile(path, dtype=np.int32, count=2)[0]
print("vector length = {}".format(dim))
if dtype is None:
if 'fvecs' in path:
dtype = np.float32
elif 'ivecs' in path:
dtype = np.int32
elif 'bvecs' in path:
dtype = np.uint8
else:
raise ValueError("couldn't infer dtype from path {}".format(path))
itemsize = np.dtype(dtype).itemsize
assert dim > 0
assert itemsize in (1, 2, 4)
cols_for_dim = 4 // itemsize
row_size_bytes = 4 + dim * itemsize
row_size_elems = row_size_bytes // itemsize
limit = int(limit_rows) * row_size_elems if limit_rows > 0 else -1
fv = np.fromfile(path, dtype=dtype, count=limit)
fv = fv.reshape((-1, row_size_elems))
if not all(fv.view(np.int32)[:, 0] == dim):
raise IOError("Non-uniform vector sizes in " + path)
fv = fv[:, cols_for_dim:]
if c_contiguous:
fv = fv.copy()
return fv
|
#!/usr/bin/env python
import os
# import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sb
from joblib import Memory
from . import paths
from . import files
_memory = Memory('./')
def _list_csvs(directory):
return files.list_files(directory, endswith='.csv', abs_paths=True)
ELECTRIC_PATHS = _list_csvs(paths.AMPD2_POWER)
GAS_PATHS = _list_csvs(paths.AMPD2_GAS)
WATER_PATHS = _list_csvs(paths.AMPD2_WATER)
WEATHER_PATHS = _list_csvs(paths.AMPD2_WEATHER)
ELECTRIC_COLS = 'UNIX_TS,WHE,RSE,GRE,MHE,B1E,BME,CWE,DWE,EQE,FRE,HPE,OFE,' \
'UTE,WOE,B2E,CDE,DNE,EBE,FGE,HTE,OUE,TVE,UNE'.split(',')
ELECTRIC_DATA_COLS = ELECTRIC_COLS[1:]
# ELECTRIC_DATA_COLS.remove('MHE') # linear combo of other cols
# ELECTRIC_DATA_COLS.remove('UNE') # linear combo of other cols
GAS_DATA_COLS = ['counter', 'avg_rate', 'inst_rate']
WATER_DATA_COLS = ['counter', 'avg_rate']
WEATHER_TIME_COL = 'Date/Time'
WEATHER_DATA_COLS = ['Temp (C)', 'Dew Point Temp (C)', 'Rel Hum (%)',
'Wind Dir (10s deg)', 'Wind Spd (km/h)',
'Visibility (km)', 'Stn Press (kPa)']
WEATHER_ALL_COLS = [WEATHER_TIME_COL] + WEATHER_DATA_COLS
FIG_SAVE_DIR = os.path.join('figs', 'ampds')
# ================================================================ public
class HouseRecording(object):
def __init__(self, path, cols=None):
data = _read_file(path)
self.path = path
self.name = os.path.basename(path).split('.')[0]
self.col_names = cols
self.sampleTimes = data[:, 0]
self.data = data[:, 1:] # XXX have to use all cols after the first
# if 'power' in self.name:
# print "initial sample times: ", self.sampleTimes[:50]
# print
# hack to deal with DWW water not having inst_rate
# self.col_names = self.col_names[:self.data.shape[1]]
self.data = self.data[:, :len(self.col_names)]
class WeatherRecording(object):
def __init__(self):
df = _load_weather_data()
self.name = 'weather'
self.col_names = WEATHER_DATA_COLS
self.sampleTimes = _datetime_strs_to_unix_timestamps(df[WEATHER_TIME_COL])
self.data = df[WEATHER_DATA_COLS].values.astype(np.float32)
# ------------------------ top-level data loading functions
def all_power_recordings():
return [HouseRecording(path, cols=ELECTRIC_DATA_COLS) for path in ELECTRIC_PATHS]
def all_gas_recordings():
return [HouseRecording(path, cols=GAS_DATA_COLS) for path in GAS_PATHS]
def all_water_recordings():
return [HouseRecording(path, cols=WATER_DATA_COLS) for path in WATER_PATHS]
def all_weather_recordings():
return [WeatherRecording()] # just one data file, so just one recording
def all_timestamp_recordings():
all_recordings = all_power_recordings() + all_gas_recordings() + \
all_water_recordings() + all_weather_recordings()
# all_recordings = all_weather_recordings() # TODO rm
for r in all_recordings:
r.data = r.sampleTimes.astype(np.float64)
r.name += '_timestamps'
return all_recordings
# ================================================================ private
# def _read_file(path, cols=None):
@_memory.cache
def _read_file(path):
df = pd.read_csv(path).fillna(method='backfill') # hold prev val
# if cols is not None and len(cols) > 0:
# timestamps = df[df.columns[0]]
# return df.values.astype(np.int32)
return df.values.astype(np.float64) # need f64 to not lose timestamps
@_memory.cache
def _load_weather_data():
path = WEATHER_PATHS[0]
df = pd.read_csv(path, sep=',').fillna(method='backfill') # hold prev val
return df[WEATHER_ALL_COLS]
def _datetimes_to_unix_timestamps(datetimes):
# https://stackoverflow.com/q/34038273
return (datetimes.astype(np.int64) / 1e6).astype(np.uint64)
def _datetime_strs_to_unix_timestamps(strs):
return _datetimes_to_unix_timestamps(pd.to_datetime(strs))
# ================================================================ main
def save_fig_png(path):
plt.savefig(path, dpi=300, bbox_inches='tight')
def _prev_corrs_stats(corr):
assert corr.shape[0] == corr.shape[1] # needs to be a correlation mat
abs_corr = np.abs(corr)
prev_corrs = np.zeros(len(corr) - 1)
best_corrs = np.zeros(len(corr) - 1)
for i, row in enumerate(abs_corr[1:]): # each row after the first
prev_corrs[i] = row[i] # note that i is row index - 1
try:
best_corr_idx = np.nanargmax(row[:i+1])
best_corrs[i] = row[best_corr_idx]
except ValueError: # if row all nans
best_corrs[i] = prev_corrs[i]
assert not (best_corrs[i] < prev_corrs[i]) # double neg for nans
# avg corr with prev variable, avg highest corr with any preceding variable
return np.nanmean(prev_corrs), np.nanmean(best_corrs)
def _plot_corr(data, fig, ax, add_title=True):
"""assumes data is row-major; ie, each col is one variable over time"""
# cov = np.cov(data.T)
corr = np.corrcoef(data.T)
# im = ax.imshow(corr, interpolation='nearest',
# cmap=plt.cm.RdBu,
# norm=mpl.colors.Normalize(vmin=-1., vmax=1.))
# fig.colorbar(im, ax=ax)
# sb.heatmap(corr, center=0, ax=ax, square=True)
sb.heatmap(corr, vmin=-1, vmax=1, center=0, ax=ax, square=True)
if add_title:
mean_prev_corr, mean_best_corr = _prev_corrs_stats(corr)
ax.set_title("|rho| prev, best prev =\n{:.2f}, {:.2f}".format(
mean_prev_corr, mean_best_corr))
def plot_recordings(recordings, interval_len=1000, norm_means=False,
mins_zero=False, savedir=None):
for r in recordings:
print(("recording {} has data of shape {}".format(r.name, r.data.shape)))
fig, axes = plt.subplots(2, 4, figsize=(13, 7))
start_idxs = [0, len(r.data) - interval_len]
end_idxs = [interval_len, len(r.data)]
# any_nans_in_row = np.isnan(r.data).sum(axis=1)
# print np.where(any_nans_in_row)[0]
# continue
for i, (start, end) in enumerate(zip(start_idxs, end_idxs)):
timestamps = r.sampleTimes[start:end]
data = r.data[start:end]
if norm_means:
data -= np.mean(data, axis=0).astype(data.dtype)
elif mins_zero:
data -= np.min(data, axis=0).astype(data.dtype)
# print "data shape", data.shape
# print "data final vals", data[-20:]
# continue
col = i + 1
axes[0, col].plot(timestamps, data, lw=1)
axes[1, col].plot(timestamps[1:], np.diff(data, axis=0), lw=1)
axes[0, col].set_title('data')
axes[1, col].set_title('first derivs')
# plot correlation matrices for orig data and first derivs
cor_sample_length = max(10000, len(r.data) // 5)
data = r.data[:cor_sample_length]
_plot_corr(data, fig, axes[0, 0])
_plot_corr(np.diff(data, axis=0), fig, axes[1, 0])
data = r.data[-cor_sample_length:]
_plot_corr(data, fig, axes[0, -1])
_plot_corr(np.diff(data, axis=0), fig, axes[1, -1])
# _plot_corr(r.data[:cor_sample_length], fig, axes[0, 0])
# data = r.data[-cor_sample_length:]
# _plot_corr(data, fig, axes[2, 1])
plt.tight_layout()
# plt.show()
if savedir is not None:
files.ensure_dir_exists(savedir)
# plt.savefig(os.path.join(savedir, r.name))
save_fig_png(os.path.join(savedir, r.name))
def main():
recordings = []
recordings += all_gas_recordings()
recordings += all_water_recordings()
recordings += all_power_recordings()
recordings += all_weather_recordings()
norm_means = False
# norm_means = True
mins_zero = True
plot_recordings(recordings, norm_means=norm_means, mins_zero=mins_zero,
savedir=FIG_SAVE_DIR)
# plt.show()
if __name__ == '__main__':
main()
|
#!/bin/env python
from __future__ import absolute_import, division, print_function
import numpy as np
import os
import PIL
import pickle
import psutil # pip install psutil
import shutil
import sys # just for stderr for warnings
# import warnings
from PIL import Image
from python import files
from python import image_utils
from joblib import Memory
_memory = Memory('.', verbose=1)
IMAGENET_ONE_OF_EACH_PATH = '../datasets/one-of-each-imagenet'
IMAGENET_ONE_OF_EACH_FLOW_PATH = '../datasets/one-of-each-imagenet-as-folders'
# IMAGENET_64_PATH = os.path.expanduser("~/Desktop/datasets/imagenet64")
# IMAGENET_TINY_PATH = os.path.expanduser("~/Desktop/datasets/tiny-imagenet-200")
IMAGENET_64_PATH = '../datasets/imagenet64'
IMAGENET_TINY_PATH = '../datasets/tiny-imagenet-200'
IMAGENET_64_TRAIN_CHUNK_NSAMPLES = 128116
IMAGENET_TRAIN_PATH = '../datasets/ILSVRC2012/ILSVRC2012_img_train'
IMAGENET_TEST_PATH = '/home/dblalock/datasets/ILSVRC2012/ILSVRC2012_img_val'
if not os.path.exists(IMAGENET_TEST_PATH): # try to load local version
IMAGENET_TEST_PATH = '../datasets/ILSVRC2012/ILSVRC2012_img_val'
IMAGENET_10_CLASSES_TRAIN_PATH = '../datasets/ILSVRC2012_10/ILSVRC2012_img_train'
IMAGENET_10_CLASSES_TEST_PATH = '../datasets/ILSVRC2012_10/ILSVRC2012_img_val'
IMAGENET_100_CLASSES_TRAIN_PATH = '../datasets/ILSVRC2012_100/ILSVRC2012_img_train'
IMAGENET_100_CLASSES_TEST_PATH = '../datasets/ILSVRC2012_100/ILSVRC2012_img_val'
IMAGENET_1_EXAMPLE_TRAIN_PATH = '../datasets/imagenet-001-of-each'
IMAGENET_10_EXAMPLES_TRAIN_PATH = '../datasets/imagenet-010-of-each'
IMAGENET_25_EXAMPLES_TRAIN_PATH = '../datasets/imagenet-025-of-each'
IMAGENET_50_EXAMPLES_TRAIN_PATH = '../datasets/imagenet-050-of-each'
IMAGENET_100_EXAMPLES_TRAIN_PATH = '../datasets/imagenet-100-of-each'
# ================================================================ Downsampled
def _unpickle_file(path):
with open(path, 'rb') as f:
pydict = pickle.load(f)
return pydict
# @_memory.cache
def _load_downsampled_data_file(path, layout='nhwc', dtype=None,
X_out=None, y_out=None, start_row=None):
d = _unpickle_file(path)
X = d['data']
# NOTE: subtracting 1 so min idx is 0; this breaks synset lookup
y = np.array(d['labels'], dtype=np.int32) - 1
y = y.ravel() # shouldn't be necessary, but might as well
assert X.shape[0] == y.shape[0]
assert len(X.shape) == 2
nchan = 3
npixels = X.shape[1] / nchan
assert npixels * nchan == X.shape[1] # each row not one img?
side_len = int(np.sqrt(npixels))
assert side_len * side_len == npixels
X = X.reshape(X.shape[0], nchan, side_len, side_len)
layout = 'nhwc' if layout is None else layout
assert layout in ('nhwc', 'nchw')
if layout == 'nhwc':
X = np.moveaxis(X, 1, -1) # make channels last axis
X = np.ascontiguousarray(X)
if X_out is not None:
assert dtype in (None, X_out.dtype)
dtype = X_out.dtype
if dtype is not None:
X = X.astype(dtype)
# print("X shape: ", X.shape)
# print("y shape: ", y.shape)
if start_row is not None:
end_row = start_row + X.shape[0]
if X_out is not None:
assert start_row is not None
X_out[start_row:end_row] = X
if y_out is not None:
assert start_row is not None
y_out[start_row:end_row] = y
return X, y
def load_train_file_64x64(idx, verbose=0, **kwargs):
assert idx in np.arange(1, 11) # valid indices are 1 thru 10
path = os.path.join(IMAGENET_64_PATH, "train_data_batch_{}".format(idx))
if verbose > 1:
print("loading train file: ", path)
return _load_downsampled_data_file(path, **kwargs)
def _clean_which_file_idxs(which_file_idxs=None, dtype=None):
if which_file_idxs is None:
which_file_idxs = np.arange(1, 11)
which_file_idxs = np.asarray(which_file_idxs, dtype=np.int32)
# don't try to load more training data then we can actually fit in RAM
mem_available = psutil.virtual_memory().available
itemsize = dtype.itemsize if dtype is not None else 1
one_img_nbytes = 64 * 64 * 3 * itemsize
one_file_nbytes = IMAGENET_64_TRAIN_CHUNK_NSAMPLES * one_img_nbytes
max_nfiles = (mem_available // one_file_nbytes) - 1
# print("one_img_nbytes", one_img_nbytes)
# print("one_file_nbytes", one_file_nbytes)
# print("available mem", mem_available)
# print("max_nfiles", max_nfiles)
if max_nfiles < 1:
raise MemoryError(
"Minimum amount of RAM needed to load one chunk of ImageNet64x64 "
"is {}B, but only {}B are available".format(
one_file_nbytes, mem_available))
requested_nfiles = len(which_file_idxs)
if max_nfiles < requested_nfiles:
requested_nbytes = (requested_nfiles + 1) * one_file_nbytes
requested_MB = requested_nbytes // int(1e6)
available_MB = mem_available // int(1e6)
print("imagenet.load_train_data_64x64: MemoryWarning: "
"Only loading {}/10 chunks of ImageNet64 instead of requested "
"{}/10 since not enough memory; would need {:}MB, but only {:}MB "
"are available".format(
max_nfiles, requested_nfiles, requested_MB, available_MB),
file=sys.stderr)
# warnings.warn(msg, UserWarning)
which_file_idxs = which_file_idxs[:max_nfiles]
assert np.min(which_file_idxs) >= 1
assert np.max(which_file_idxs) <= 10
return which_file_idxs
# NOTE: total size of training data is around 16GB
def load_train_data_64x64(which_file_idxs=None, layout='nhwc', dtype=None,
verbose=1):
which_file_idxs = _clean_which_file_idxs(which_file_idxs, dtype=dtype)
if verbose > 0:
print("load_train_data_64x64: loading file numbers: ", which_file_idxs)
# import sys; sys.exit()
if dtype is None:
dtype = np.uint8 # default dtype
# preallocate output matrix of appropriate size so that we can just
# keep one copy of the data in memory (as opposed to loading all the
# data matrices and then concatenating them)
assert layout in ('nhwc', 'nchw')
nrows_per_file = IMAGENET_64_TRAIN_CHUNK_NSAMPLES
img_shape = (64, 64, 3) if layout == 'nhwc' else (3, 64, 64)
combined_nrows = nrows_per_file * len(which_file_idxs)
combined_shape = (combined_nrows,) + img_shape
X_combined = np.zeros(combined_shape, dtype=dtype)
y_combined = np.zeros(combined_nrows, dtype=np.int32)
for i, idx in enumerate(which_file_idxs):
start_row = nrows_per_file * i
load_train_file_64x64(
idx, layout=layout, X_out=X_combined, y_out=y_combined,
start_row=start_row, verbose=verbose)
return X_combined, y_combined
def load_test_data_64x64(layout='nhwc', dtype=None):
path = os.path.join(IMAGENET_64_PATH, "val_data")
return _load_downsampled_data_file(path, layout=layout, dtype=dtype)
# ================================================================ Tiny
# # adapted from https://github.com/keras-team/keras-preprocessing/blob/master/
# # keras_preprocessing/image/utils.py under MIT license
# def img_to_array(img, layout='nhwc', dtype='float32', mode='RGB'):
# """Converts a PIL Image instance to a Numpy array.
# # Arguments
# img: PIL Image instance.
# layout: Image data format, either "nchw" or "nhwc".
# dtype: Dtype to use for the returned array.
# # Returns
# A 3D Numpy array.
# # Raises
# ValueError: if invalid `img` or `layout` is passed.
# """
# # print("img info:", img.format, img.size, img.mode)
# # if img.mode == 'L':
# if img.mode != mode:
# img = img.convert(mode=mode)
# if layout not in ('nchw', 'nhwc'):
# raise ValueError('Unknown layout: %s' % layout)
# # Numpy array x has format (height, width, channel)
# # or (channel, height, width)
# # but original PIL image has format (width, height, channel)
# x = np.asarray(img, dtype=dtype)
# if len(x.shape) == 3:
# if layout == 'nchw':
# x = x.transpose(2, 0, 1)
# elif len(x.shape) == 2:
# # print("x is only rank 2...WTF!?")
# if layout == 'nchw':
# x = x.reshape((1, x.shape[0], x.shape[1]))
# else:
# x = x.reshape((x.shape[0], x.shape[1], 1))
# else:
# raise ValueError('Unsupported image shape: %s' % (x.shape,))
# return x
# def _resize_img(img, ratio_or_size):
# if ratio_or_size is None or np.min(ratio_or_size) < 0:
# return img
# try:
# nrows = ratio_or_size[0]
# ncols = ratio_or_size[1]
# except AttributeError:
# nrows = img.height * ratio_or_size
# ncols = img.width * ratio_or_size
# new_size = (nrows, ncols)
# is_downsampling = (nrows < img.height) or (ncols < img.width)
# interp = PIL.Image.LANCZOS if is_downsampling else PIL.Image.BICUBIC
# return img.resize(new_size, resample=interp)
# def image_utils.load_jpg(path, layout='nhwc', dtype='float32', resample=None):
# img = Image.open(path)
# img = _resize_img(img, ratio_or_size=resamp)
# return img_to_array(img, layout=layout, dtype=dtype)
@_memory.cache
def _load_tiny_clsids_to_nums():
wnids_path = os.path.join(IMAGENET_TINY_PATH, 'wnids.txt')
with open(wnids_path) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
return {s: i for i, s in enumerate(lines)}
def _imagenet_tiny_cls_to_number(classnames):
if isinstance(classnames, str):
return _load_tiny_clsids_to_nums()[classnames]
return [_load_tiny_clsids_to_nums()[name] for name in classnames]
@_memory.cache
def load_train_data_tiny(layout='nhwc', dtype=None, verbose=1):
train_dir = os.path.join(IMAGENET_TINY_PATH, 'train')
subdirs = files.list_subdirs(train_dir)
all_classes = subdirs
assert len(all_classes) == 200 # wrong number of classes??
subdir_paths = files.list_subdirs(train_dir, abs_paths=True)
all_imgs = []
all_labels = []
for i, pth in enumerate(np.sort(subdir_paths)):
classname = os.path.basename(pth)
if verbose > 0:
print("loading images for class {}...".format(classname))
imgs_subdir = os.path.join(pth, 'images')
img_paths = files.list_files(
imgs_subdir, endswith='.JPEG', abs_paths=True)
assert len(img_paths) == 500 # supposed to be 500 examples per class...
imgs = [image_utils.load_jpg(f, layout=layout,
dtype=dtype)[np.newaxis, :, :, :]
for f in img_paths]
all_imgs += imgs
lbl = _imagenet_tiny_cls_to_number(classname)
all_labels += [lbl] * len(img_paths)
X = np.concatenate(all_imgs, axis=0)
y = np.array(all_labels, dtype=np.int32)
return X, y
@_memory.cache
def load_test_data_tiny(layout='nhwc', dtype=None):
# no labels given for "true" test set, so use the "val" subset as the
# test set
test_dir = os.path.join(IMAGENET_TINY_PATH, 'val')
imgs_subdir = os.path.join(test_dir, 'images')
img_paths = files.list_files(
imgs_subdir, endswith='.JPEG', abs_paths=True)
assert len(img_paths) == 10000 # wrong number of val images?
# load images
imgs = [image_utils.load_jpg(f, layout=layout,
dtype=dtype)[np.newaxis, :, :, :]
for f in img_paths]
X = np.concatenate(imgs, axis=0)
# load labels # TODO make sure this computation is correct
lbls_path = os.path.join(test_dir, 'val_annotations.txt')
with open(lbls_path, 'r') as f:
lines = f.readlines()
fnames = [line.split()[0] for line in lines]
class_ids = [line.split()[1] for line in lines]
# complicated way that doesn't rely on annotations being sorted
fname_to_class_id = dict(zip(fnames, class_ids))
img_fnames = [os.path.basename(pth) for pth in img_paths]
img_class_ids = [fname_to_class_id[fname] for fname in img_fnames]
labels = _imagenet_tiny_cls_to_number(img_class_ids)
y = np.array(labels, dtype=np.int32)
return X, y
# ================================================================ K-of-each
# def load_data_one_of_each(layout='nhwc', dtype=None, size=None):
def load_data_one_of_each(layout='nhwc', dtype=None, size=(224, 224)):
# np_save_file = os.path.join(IMAGENET_ONE_OF_EACH_PATH, 'oneOfEach.npy')
# cached_exists = os.path.exists(np_save_file)
# if cached_exists:
# return np.load(np_save_file)
img_paths = files.list_files(IMAGENET_ONE_OF_EACH_PATH, endswith='.JPEG',
abs_paths=True)
assert len(img_paths) == 1000 # should be 1000 images...
imgs = [image_utils.load_jpg(f, layout=layout, dtype=dtype, resample=size)
for f in img_paths]
if size is not None: # can only concat if same size
imgs = [img[np.newaxis, :, :, :] for img in imgs]
X = np.concatenate(imgs, axis=0)
else:
X = imgs
# XXX this is a total hack that will break if we get >1 img per class, and
# already (probably) doesn't match up with the synsets
# lbls = [os.path.basename(path).split('_')[0] for path in img_paths]
y = np.arange(len(X))
return X, y
# ================================================================ example
def load_flow_example(**kwargs):
IMAGENET_EXAMPLE_PATH = os.path.abspath('../datasets/imagenet-example')
# print("files in data dir:")
# print(files.list_subdirs(IMAGENET_EXAMPLE_PATH))
# import sys; sys.exit()
j = os.path.join
kwargs.setdefault('target_size', (224, 224))
kwargs.setdefault('batch_size', 16)
kwargs.setdefault('class_mode', 'categorical')
import keras
from keras.preprocessing import image
train_datagen = image.ImageDataGenerator()
val_datagen = image.ImageDataGenerator()
test_datagen = image.ImageDataGenerator()
# print("contents of train dir: ", files.list_subdirs(j(IMAGENET_EXAMPLE_PATH, 'train')))
train_generator = train_datagen.flow_from_directory(
j(IMAGENET_EXAMPLE_PATH, 'train'),
**kwargs)
val_generator = val_datagen.flow_from_directory(
j(IMAGENET_EXAMPLE_PATH, 'val'),
**kwargs)
test_generator = val_datagen.flow_from_directory(
j(IMAGENET_EXAMPLE_PATH, 'val'),
**kwargs)
return train_generator, val_generator, test_generator
def example_imagenet_train():
import tensorflow as tf
import keras
from python import models
from python import approx_conv_v2 as aconv
# both are necessary to actually get consistent output
np.random.seed(123)
tf.random.set_random_seed(123)
model = models.get_model(models.VGG16, weights=None, input_shape=(224, 224, 3))
# swap out normal conv layer with our custom layer
model = models.replace_layer_classes(
model, {keras.layers.Conv2D: aconv.MyConv2D})
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
model.summary()
train_generator, val_generator, test_generator = load_flow_example()
model.fit_generator(train_generator, steps_per_epoch=10, epochs=1,
validation_steps=1, validation_data=val_generator)
model.evaluate_generator(test_generator, steps=2)
def main():
# X, y = load_train_file_64x64(1)
# X, y = load_train_data_64x64([1, 2, 3])
# X, y = load_train_data_64x64([1, 2, 3, 4, 5]) # works
# X, y = load_train_data_64x64() # correctly yields mem warning
# X, y = load_train_data_64x64([1])
# X, y = load_test_data_64x64()
# X, y = load_data_one_of_each(size=None) # no resampling
# X, y = load_data_one_of_each(size=(224, 224))
# wow, imagenet-tiny looks like crap; lots of aliasing
X, y = load_train_data_tiny()
# X, y = load_test_data_tiny()
print("X, y dtypes and shapes:")
print(X.dtype)
print(y.dtype)
print(X.shape)
print(y.shape)
import matplotlib.pyplot as plt
inp = 'y'
count = 0
while inp == 'y':
_, axes = plt.subplots(3, 3, figsize=(9, 9))
idxs = np.random.choice(np.arange(len(X)), size=axes.size)
# offset = 0
# offset = 10000
# idxs = np.arange(offset + 9*count, offset + 9 + 9*count)
for i, ax in enumerate(axes.ravel()):
idx = idxs[i]
img, classId = X[idx], y[idx]
ax.imshow(img, interpolation='nearest')
ax.set_title("Idx = {}, class = {}".format(idx, classId))
# plt.imshow(X[100*1000], interpolation='nearest')
# plt.imshow(X[300*1000], interpolation='nearest')
plt.tight_layout()
plt.show()
count += 1
inp = input("enter y to plot more random imgs; anything else to stop: ")
def _folderize_imagenet_one_of_each(): # one-off script
olddir = IMAGENET_ONE_OF_EACH_PATH
newdir = IMAGENET_ONE_OF_EACH_FLOW_PATH
files.ensure_dir_exists(newdir)
old_files = files.list_files(olddir, endswith='.JPEG', abs_paths=True)
for f in files.list_files(olddir, endswith='.JPEG', abs_paths=True):
basename = os.path.basename(f)
label = basename.split('_')[0]
subdir = os.path.join(newdir, label)
files.ensure_dir_exists(subdir)
newpath = os.path.join(subdir, basename)
# newpath = os.path.join(newdir, )
# print("oldpath: ", f, os.path.exists(f))
# print("newpath: ", newpath)
shutil.copy(f, newpath)
def _make_imagenet_k_of_each(k=10):
out_path = '../datasets/imagenet-{:03d}-of-each'.format(k)
print("writing to path: ", out_path)
src_dir = IMAGENET_TRAIN_PATH
for synset in files.list_subdirs(src_dir):
subdir_path = os.path.join(src_dir, synset)
img_paths = sorted(files.list_files(subdir_path, abs_paths=True))
img_paths = img_paths[:k]
new_subdir = os.path.join(out_path, synset)
files.ensure_dir_exists(new_subdir)
for path in img_paths:
fname = os.path.basename(path)
new_path = os.path.join(new_subdir, fname)
shutil.copy(path, new_path)
if __name__ == '__main__':
# example_imagenet_train()
main()
# _folderize_imagenet_one_of_each()
# _make_imagenet_k_of_each(10)
# _make_imagenet_k_of_each(25)
# _make_imagenet_k_of_each(50)
# _make_imagenet_k_of_each(100)
|
#!/usr/bin/env/python
import os
import numpy as np
from joblib import Memory
import pandas as pd
from . import paths
_memory = Memory('.', verbose=1, compress=9)
UCR_DATASETS_DIR = paths.UCR
UCR_INFO_PATH = paths.UCR_INFO
# ================================================================
# Public
# ================================================================
def all_ucr_datasets():
for dataDir in sorted(all_ucr_dataset_dirs()):
yield UCRDataset(dataDir)
class UCRDataset(object):
def __init__(self, dataset_dir, sep='\t', precondition=True, znorm=True):
self.name = name_from_dir(dataset_dir)
self.X_train, y_train = read_ucr_train_data(dataset_dir, sep=sep)
self.X_test, y_test = read_ucr_test_data(dataset_dir, sep=sep)
# self.y_train = y_train
# self.y_test = y_test
all_lbls = np.r_[y_train, y_test]
uniq_lbls = np.unique(all_lbls)
new_lbls = np.argsort(uniq_lbls) # same if labels are 0..(nclasses-1)
mapping = dict(zip(uniq_lbls, new_lbls))
self.y_train = np.array([mapping[lbl] for lbl in y_train])
self.y_test = np.array([mapping[lbl] for lbl in y_test])
# self.nclasses = len(uniq_lbls)
# MelbournePedestrian has nans, even though not in missing data list
for X in (self.X_train, self.X_test):
for d in range(X.shape[1]):
col = X[:, d]
nan_idxs = np.isnan(col)
if nan_idxs.sum() > 0:
# print("self.name: ", self.name)
# print("original number of nans: ", np.sum(nan_idxs))
# X[nan_idxs, d] = col.mean()
fillval = np.nanmedian(col)
if np.isnan(fillval):
# handle all-nan cols, which happens in Crop
fillval = np.nanmedian(X)
col[nan_idxs] = fillval
# np.nan_to_num(col, copy=False, nan=np.median(col))
# print("new number of nans: ", np.isnan(X[:, d]).sum())
# print("new number of nans: ", np.isnan(col).sum())
if znorm:
self.X_train -= self.X_train.mean(axis=1, keepdims=True)
self.X_test -= self.X_test.mean(axis=1, keepdims=True)
eps = 1e-20
self.X_train *= 1 / (self.X_train.std(axis=1, keepdims=True) + eps)
self.X_test *= 1 / (self.X_test.std(axis=1, keepdims=True) + eps)
elif precondition:
# weaker than znormalization since one offset and scale applied
# to all dims and all samples in both train and test sets; this
# is basically just here because the values in MelbournePedestrian
# are huge and screw up numerical algorithms
self.orig_mean = np.mean(self.X_train)
self.X_train -= self.orig_mean
self.X_test -= self.orig_mean
self.orig_std = np.std(self.X_train)
self.X_train /= self.orig_std
self.X_test /= self.orig_std
assert len(self.X_train) == len(self.y_train)
assert len(self.X_test) == len(self.y_test)
# if self.name == 'MelbournePedestrian':
# print("I am MelbournePedestrian!")
# print('new labels: ', new_lbls)
# print("X_train num nans", np.sum(np.isnan(self.X_train)))
# print("X_test num nans", np.sum(np.isnan(self.X_test)))
# # import sys; sys.exit()
# if self.name == 'Wafer':
# print("original uniq labels train", np.unique(self.y_train))
# print("original uniq labels test", np.unique(self.y_test))
def all_ucr_dataset_dirs():
return _ucr_datasets_in_dir(UCR_DATASETS_DIR)
# ================================================================
# Private
# ================================================================
def _ucr_datasets_in_dir(dirpath):
datasetsPath = os.path.expanduser(dirpath)
files = os.listdir(datasetsPath)
rm_dir = 'Missing_value_and_variable_length_datasets_adjusted'
if rm_dir in files:
files.remove(rm_dir)
for i in range(len(files)):
files[i] = os.path.join(datasetsPath, files[i])
dirs = list(filter(os.path.isdir, files))
return dirs
@_memory.cache
def _readtxt(path, sep=None):
return np.genfromtxt(path, delimiter=sep).astype(np.float32)
def read_data_file(path, sep=None, mean_norm=False):
D = _readtxt(path, sep=sep)
labels = D[:, 0].astype(np.int)
X = D[:, 1:]
if mean_norm:
X -= np.mean(X, axis=1, keepdims=True)
return (X, labels)
def name_from_dir(datasetDir):
return os.path.basename(datasetDir)
def dir_from_name(datasetName):
return os.path.join(paths.UCR, datasetName)
def read_ucr_data_in_dir(datasetDir, train, sep=None):
datasetName = name_from_dir(datasetDir)
if train:
fileName = datasetName + "_TRAIN.tsv"
else:
fileName = datasetName + "_TEST.tsv"
filePath = os.path.join(datasetDir, fileName)
return read_data_file(filePath, sep=sep)
def read_ucr_train_data(datasetDir, sep=None):
return read_ucr_data_in_dir(datasetDir, train=True, sep=sep)
def read_ucr_test_data(datasetDir, sep=None):
return read_ucr_data_in_dir(datasetDir, train=False, sep=sep)
# combines train and test data
def read_all_ucr_data(ucrDatasetDir):
X_train, Y_train = read_ucr_train_data(ucrDatasetDir)
X_test, Y_test = read_ucr_test_data(ucrDatasetDir)
X = np.r_[X_train, X_test]
Y = np.r_[Y_train, Y_test]
return X, Y
@_memory.cache
def load_ucr_dset_stats():
df = pd.read_csv(UCR_INFO_PATH)
df['l2-1nn-acc'] = 1. - df['ED (w=0)']
return df
# ================================================================ Main
@_memory.cache
def _load_ucr_stats_df():
stats = []
for i, datasetDir in enumerate(all_ucr_dataset_dirs()):
# Xtrain, _ = read_ucr_train_data(datasetDir)
# Xtest, Ytest = read_ucr_test_data(datasetDir)
dset = UCRDataset(datasetDir)
N, D = dset.X_train.shape
M, D = dset.X_test.shape
nclasses = len(np.unique(dset.y_test))
stats.append({'Dataset': dset.name, 'N': N, 'D': D, 'M': M,
'nclasses': nclasses})
# print('%30s:\t%d\t%d\t%d\t%d' % (name_from_dir(datasetDir),
# N, M, D, nclasses)
return pd.DataFrame.from_records(stats)
def main():
# dsets = all_ucr_datasets()
# for dset in dsets:
# print("loaded ucr dset:", dset.name)
# # return
df = _load_ucr_stats_df()
# df = df.sort_values(axis=1)
# df = df.loc[df['N'] > 100]
# df = df.loc[df['M'] > 100]
print("ucr dset stats:")
# print(df['M'].sort_values(ascending=False))
print("number of dsets:", df.shape[0])
print("mean, median Ntrain: ", df['N'].mean(), df['N'].median())
print("mean, median Ntest: ", df['M'].mean(), df['M'].median())
print("mean, median length: ", df['D'].mean(), df['D'].median())
mvals = df['M'].to_numpy()
mvals = np.sort(mvals)
length = len(mvals)
total_sizes = np.array([m * (length - i) for i, m in enumerate(mvals)])
max_idx = np.argmax(total_sizes)
best_m_cutoff = mvals[max_idx]
print("best num dsets, m, sz = ",
length - max_idx, best_m_cutoff, total_sizes[max_idx])
print("type of mvals: ", type(mvals))
for cutoff in [100, 200, 256, 300, 400, 500, 512, 1000]:
ndsets = (mvals >= cutoff).sum()
total_sz = total_sizes[ndsets-1]
print(f"n >= {cutoff}: {ndsets} dsets, total sz = {total_sz}")
# import matplotlib.pyplot as plt
# xvals = length - np.arange(length)
# # xvals = np.arange(length)
# # plt.plot(xvals, total_sizes[::-1])
# plt.plot(xvals, total_sizes)
# plt.plot(xvals, mvals)
# plt.show()
# df = df.loc[df['M'] >= best_m_cutoff]
# print("---- after cutting off M to maximize mat sizes:")
df = df.loc[df['N'] >= 128]
print("---- after cutting off N to number of centroids:")
print("number of dsets: ", len(df))
print("mean, median Ntrain: ", df['N'].mean(), df['N'].median())
print("mean, median Ntest: ", df['M'].mean(), df['M'].median())
print("mean, median length: ", df['D'].mean(), df['D'].median())
print("mean, median nclasses: ", df['nclasses'].mean(), df['nclasses'].median())
print("min, max nclasses: ", df['nclasses'].min(), df['nclasses'].max())
if __name__ == '__main__':
np.set_printoptions(formatter={'float': lambda f: "{:.3}".format(f)})
main()
|
#!/bin/env python
from __future__ import absolute_import, division, print_function
from scipy import io
import numpy as np
import os
from joblib import Memory
_memory = Memory('.', verbose=1)
DATADIR = '../datasets/svhn'
TRAIN_PATH = os.path.join(DATADIR, 'train_32x32.mat')
TEST_PATH = os.path.join(DATADIR, 'test_32x32.mat')
EXTRA_PATH = os.path.join(DATADIR, 'extra_32x32.mat')
def extract_data_from_mat_file(path):
matlab_dict = io.loadmat(path)
X, y = matlab_dict['X'], matlab_dict['y'].ravel()
X = np.transpose(X, (3, 0, 1, 2))
# make classes be 0-9 instead of 1-10; this way the classes line up
# with the actual digits
y[y == 10] = 0
assert len(y.shape) == 1
assert X.shape[0] == len(y)
assert X.shape[1] == 32
assert X.shape[2] == 32
assert X.shape[-1] == 3
return X, y
@_memory.cache
def load_data():
X_train, y_train = extract_data_from_mat_file(TRAIN_PATH)
X_test, y_test = extract_data_from_mat_file(TEST_PATH)
return (X_train, y_train), (X_test, y_test)
def load_extra_data():
return extract_data_from_mat_file(EXTRA_PATH)
def main():
import matplotlib.pyplot as plt
(X_train, y_train), (X_test, y_test) = load_data()
# hacky way to visualize extra data using same code
# X_extra, y_extra = load_extra_data()
# X_train, X_test = X_extra, X_extra
# y_train, y_test = y_extra, y_extra
_, axes = plt.subplots(4, 4, figsize=(9, 9))
for i, ax in enumerate(axes.ravel()):
X = X_test if i % 2 else X_train
y = y_test if i % 2 else y_train
idx = np.random.choice(X.shape[0])
ax.imshow(X[idx])
ax.set_title("class = {}".format(y[idx]))
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
|
#!/bin/env/python
"""utility functions for data munging"""
from __future__ import absolute_import, division, print_function
import numpy as np
import sklearn
def split_train_test(X, Y, train_frac=.8, random_state=123):
"""Returns X_train, X_test, y_train, y_test"""
np.random.seed(123)
return sklearn.model_selection.train_test_split(
X, Y, train_size=train_frac, random_state=random_state)
def stratified_split_train_test(X, Y, train_frac=.8, random_state=123):
"""Returns X_train, X_test, y_train, y_test"""
return sklearn.model_selection.train_test_split(
X, Y, train_size=train_frac, stratify=Y, random_state=random_state)
|
#!/bin/env python
# Load 3-lead ECG recordings from SHAREE Database:
# https://physionet.org/content/shareedb/1.0.0/
from __future__ import division, print_function
import matplotlib.pyplot as plt
import numpy as np
import os
from . import paths
from . import files
from joblib import Memory
_memory = Memory('.', verbose=0)
DATA_DIR = paths.SHAREE_ECG
NUM_RECORDINGS = 139
NUM_CHANNELS = 3
RAW_DTYPE = np.uint16
# RAW_DTYPE = np.int16
SAMPLES_PER_SEC = 128
SAMPLES_PER_MIN = SAMPLES_PER_SEC * 60
SAMPLES_PER_HOUR = SAMPLES_PER_MIN * 60
@_memory.cache
def load_recording_ids():
fpaths = files.list_files(DATA_DIR, abs_paths=False, endswith='.dat')
assert len(fpaths) == NUM_RECORDINGS
return fpaths
@_memory.cache
def load_recording(rec_id, limit_nhours=None, dtype=np.float32):
# dtype = np.float32 if dtype is None else dtype
path = os.path.join(DATA_DIR, rec_id)
a = np.fromfile(path, dtype=RAW_DTYPE)
assert len(a) % NUM_CHANNELS == 0
a = a.reshape(-1, NUM_CHANNELS) # looks like it's rowmajor
# a = a.reshape(NUM_CHANNELS, -1).T # is colmajor clearly wrong? EDIT: yes
if limit_nhours and limit_nhours > 0:
a = a[:int(limit_nhours * SAMPLES_PER_HOUR)]
a = a[SAMPLES_PER_MIN:] # often a bunch of garbage at the beginning
a = a.astype(dtype)
# small amount of smoothing since heavily oversampled + noisy
# filt = np.hamming(5).astype(np.float32)
filt = np.hamming(5).astype(np.float32)
filt /= np.sum(filt)
for j in range(a.shape[1]):
a[:, j] = np.convolve(a[:, j], filt, mode='same')
return a
# def load_recordings(generator=False, plot=False, **kwargs):
def load_recordings(plot=False, **kwargs):
rec_ids = load_recording_ids()
recs = []
for i, rec_id in enumerate(rec_ids):
print("loading rec id: ", rec_id)
rec = load_recording(rec_id, **kwargs)
recs.append(rec)
if plot:
if i < 5:
offset = SAMPLES_PER_MIN
a = rec[offset:(offset + 1000)]
print('about to plot recording', rec_id)
plt.figure(figsize=(9, 7))
plt.plot(a)
plt.show()
else:
return
return recs
if __name__ == '__main__':
# print("done")
print("about to call load_recordings")
load_recordings(plot=True)
# print("rec ids: ", load_recording_ids())
print("called load_recordings")
|
#!/bin/env python
# Load 3-lead ECG recordings from SHAREE Database:
# https://physionet.org/content/shareedb/1.0.0/
from __future__ import division, print_function
import matplotlib.pyplot as plt
import numpy as np
import os
from . import paths
from . import files
from joblib import Memory
_memory = Memory('.', verbose=0)
DATA_DIR = paths.INCART_ECG
NUM_RECORDINGS = 75
NUM_CHANNELS = 12
RAW_DTYPE = np.int16
SAMPLES_PER_SEC = 257
SAMPLES_PER_MIN = SAMPLES_PER_SEC * 60
SAMPLES_PER_HOUR = SAMPLES_PER_MIN * 60
@_memory.cache
def load_recording_ids():
fpaths = files.list_files(DATA_DIR, abs_paths=False, endswith='.dat')
assert len(fpaths) == NUM_RECORDINGS
return fpaths
@_memory.cache
def load_recording(rec_id, limit_nhours=None, dtype=np.float32):
path = os.path.join(DATA_DIR, rec_id)
a = np.fromfile(path, dtype=RAW_DTYPE)
assert len(a) % NUM_CHANNELS == 0
a = a.reshape(-1, NUM_CHANNELS) # yep, clearly rowmajor when plotted
if limit_nhours and limit_nhours > 0:
a = a[:int(limit_nhours * SAMPLES_PER_HOUR)]
a = a[SAMPLES_PER_MIN:] # often a bunch of garbage at the beginning
a = a.astype(dtype)
a -= a.mean(axis=0) # just so r_sq values are more meaningful
# small amount of smoothing since heavily oversampled + noisy
# filt = np.hamming(5).astype(np.float32)
# filt = np.hamming(5).astype(np.float32)
# filt /= np.sum(filt)
# for j in range(a.shape[1]):
# a[:, j] = np.convolve(a[:, j], filt, mode='same')
return a
def load_recordings(plot=False, **kwargs):
rec_ids = load_recording_ids()
recs = []
for i, rec_id in enumerate(rec_ids):
print("loading rec id: ", rec_id)
rec = load_recording(rec_id, **kwargs)
recs.append(rec)
if plot:
if i < 5:
offset = 0
a = rec[offset:(offset + 1000)]
print("plotting recording {} with shape: {}".format(
rec_id, rec.shape))
plt.figure(figsize=(9, 7))
plt.plot(a)
plt.show()
else:
return
return recs
if __name__ == '__main__':
print("about to call load_recordings")
load_recordings(plot=True)
print("called load_recordings")
|
#!/bin/env python
# from __future__ import absolute_import, division, print_function
from __future__ import division, print_function
import numpy as np
from . import paths
from . import image_utils as imgs
from joblib import Memory
_memory = Memory('.', verbose=1)
DATADIR_101 = paths.CALTECH_101
DATADIR_256 = paths.CALTECH_256
# _DEFAULT_CALTECH_KWARGS = dict(resample=(224, 224), crop='center', verbose=2)
_DEFAULT_CALTECH_KWARGS = dict(resample=(224, 224), crop='center')
_CALTECH_101_KWARGS = dict(
dirpath=DATADIR_101, remove_classes='BACKGROUND_Google')
_CALTECH_256_KWARGS = dict(
dirpath=DATADIR_256, remove_classes='257.clutter')
@_memory.cache
def load_caltech101(**kwargs):
[kwargs.setdefault(*item) for item in _DEFAULT_CALTECH_KWARGS.items()]
return imgs.load_jpegs_from_dir(**_CALTECH_101_KWARGS, **kwargs)
@_memory.cache
def load_caltech256(**kwargs):
[kwargs.setdefault(*item) for item in _DEFAULT_CALTECH_KWARGS.items()]
return imgs.load_jpegs_from_dir(**_CALTECH_256_KWARGS, **kwargs)
@_memory.cache
def load_caltech101_ids(**kwargs):
return imgs.load_jpegs_from_dir(
**_CALTECH_101_KWARGS, only_return_path=True, **kwargs)
@_memory.cache
def load_caltech256_ids(**kwargs):
return imgs.load_jpegs_from_dir(
**_CALTECH_256_KWARGS, only_return_path=True, **kwargs)
# @_memory.cache
def load_caltech_img(img_id, **kwargs):
[kwargs.setdefault(*item) for item in _DEFAULT_CALTECH_KWARGS.items()]
path = img_id # load_jpegs_from_dir returns abs path as id
return imgs.load_jpg(path, **kwargs).astype(np.float32)
# img = imgs.load_jpg(path, **kwargs).astype(np.float32)
# print("img.shape", img.shape)
# assert img.shape[:2] == (224, 224)
# return img
def main():
import matplotlib.pyplot as plt
# caltech 101
(X, y), label2cls = imgs.load_jpegs_from_dir(
# DATADIR_101, remove_classes='BACKGROUND_Google')
# DATADIR_101, remove_classes='BACKGROUND_Google', crop='center')
DATADIR_101, remove_classes='BACKGROUND_Google', pad='square')
# # DATADIR_101, remove_classes='BACKGROUND_Google', resample=(224, 224))
# caltech 256
# (X, y), label2cls = imgs.load_jpegs_from_dir(
# DATADIR_256, remove_classes='257.clutter', verbose=2)
if isinstance(X, np.ndarray):
print("X shape: ", X.shape)
else:
print("X is a list of length", len(X))
print("X[0] has shape: ", X[0].shape)
print("y shape: ", y.shape)
_, axes = plt.subplots(4, 4, figsize=(9, 9))
for i, ax in enumerate(axes.ravel()):
idx = np.random.choice(len(X))
ax.imshow(X[idx])
label = label2cls[y[idx]]
ax.set_title(label)
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
|
#!/bin/env python
from __future__ import absolute_import, division, print_function
import numpy as np
from python import image_utils as imgs
from joblib import Memory
_memory = Memory('.', verbose=1)
DATADIR_101 = '../datasets/caltech/101_ObjectCategories'
def main():
import matplotlib.pyplot as plt
# caltech 101
(X, y), label2cls = imgs.load_jpegs_from_dir(
# TODO
)
if isinstance(X, np.ndarray):
print("X shape: ", X.shape)
else:
print("X is a list of length", len(X))
print("X[0] has shape: ", X[0].shape)
print("y shape: ", y.shape)
_, axes = plt.subplots(4, 4, figsize=(9, 9))
for i, ax in enumerate(axes.ravel()):
idx = np.random.choice(len(X))
ax.imshow(X[idx])
label = label2cls[y[idx]]
ax.set_title(label)
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
from sklearn.datasets import load_digits
import timeit
import bolt
# ================================================================ utils
def _dists_sq(X, q):
diffs = X - q
return np.sum(diffs * diffs, axis=-1)
def _dists_l1(X, q):
diffs = np.abs(X - q)
return np.sum(diffs, axis=-1)
def _element_size_bytes(x):
return np.dtype(x.dtype).itemsize
def _corr(x, y):
x, y = x.astype(np.float64), y.astype(np.float64)
x = x.ravel() - np.mean(x)
y = y.ravel() - np.mean(y)
r = np.mean(x * y) / (np.std(x) * np.std(y))
assert -1.00001 <= r <= 1.00001
return r
def _sq_dists_to_vectors(X, queries, rowNorms=None, queryNorms=None):
Q = queries.shape[0]
mat_size = X.shape[0] * Q
mat_size_bytes = _element_size_bytes(X[0] + queries[0])
if mat_size_bytes > int(1e9):
print("WARNING: _sq_dists_to_vectors: attempting to create a matrix"
"of size {} ({}B)".format(mat_size, mat_size_bytes))
if rowNorms is None:
rowNorms = np.sum(X * X, axis=1, keepdims=True)
if queryNorms is None:
queryNorms = np.sum(queries * queries, axis=1)
dotProds = np.dot(X, queries.T)
return (-2 * dotProds) + rowNorms + queryNorms # len(X) x len(queries)
def top_k_idxs(elements, k, smaller_better=True, axis=-1):
if smaller_better: # return indices of lowest elements
which_nn = np.arange(k)
return np.argpartition(elements, kth=which_nn, axis=axis)[:k]
else: # return indices of highest elements
which_nn = (elements.shape[axis] - 1 - np.arange(k))[::-1]
# print "elements.shape", elements.shape
# print "using which_nn: ", which_nn
return np.argpartition(elements, kth=which_nn, axis=axis)[-k:][::-1]
def _knn(X, Q, k=1000, print_every=5, block_sz=128):
nqueries = Q.shape[0]
nblocks = int(np.ceil(nqueries / float(block_sz)))
truth = np.full((nqueries, k), -999, dtype=np.int32)
if nqueries <= block_sz:
dists = _sq_dists_to_vectors(Q, X)
assert dists.shape == (Q.shape[0], X.shape[0])
for i in range(nqueries):
truth[i, :] = top_k_idxs(dists[i, :], k)
return truth
for b in range(nblocks):
# recurse to fill in knn for each block
start = b * block_sz
end = min(start + block_sz, nqueries)
rows = Q[start:end, :]
truth[start:end, :] = _knn(X, rows, k=k, block_sz=block_sz)
if b % print_every == 0:
print("computing top k for query block " \
"{} (queries {}-{})...".format(b, start, end))
assert np.all(truth != -999)
return truth
def _create_randn_encoder(Ntrain=100, Ntest=20, D=64):
enc = bolt.Encoder()
X_train = np.random.randn(Ntrain, D)
X_test = np.random.randn(Ntest, D)
enc.fit(X_train, just_train=True)
enc.set_data(X_test)
return enc
# ================================================================ tests
def test_smoketest():
"""Test that `bolt.Encoder`'s methods don't crash"""
D = 64
enc = _create_randn_encoder(D=D)
Nqueries = 5
Q = np.random.randn(Nqueries, D)
[enc.transform(q) for q in Q]
for k in [1, 3]:
[enc.knn(q, k) for q in Q]
def _fmt_float(x):
return '{}.'.format(int(x)) if x == int(x) else '{:.3f}'.format(x)
def _load_digits_X_Q(nqueries):
X, _ = load_digits(return_X_y=True)
return X[:-nqueries], X[-nqueries:] # X, Q
def test_time_space_savings(): # mostly to verify readme code
np.set_printoptions(formatter={'float_kind': _fmt_float})
nqueries = 20
X, Q = _load_digits_X_Q(nqueries)
enc = bolt.Encoder(accuracy='lowest', reduction=bolt.Reductions.DOT_PRODUCT)
enc.fit(X)
# massive space savings
print("original space usage: {}B".format(X.nbytes)) # 1777 * 64 * 8B = 909KB
print("bolt space usage: {}B".format(enc.nbytes)) # 1777 * 2B = 3.55KB
# massive time savings (~10x here, but often >100x on larger datasets
# with less Python overhead; see the Bolt paper)
t_np = timeit.Timer(lambda: [np.dot(X, q) for q in Q]).timeit(5) # ~8ms
t_bolt = timeit.Timer(lambda: [enc.transform(q) for q in Q]).timeit(5) # ~800us
print("Numpy / BLAS time, Bolt time: {:.3f}ms, {:.3f}ms".format(
t_np * 1000, t_bolt * 1000))
def test_unquantize():
X, Q = _load_digits_X_Q(nqueries=20)
enc = bolt.Encoder('dot', accuracy='high').fit(X)
dots_true = [np.dot(X, q) for q in Q]
dots_bolt = [enc.transform(q, unquantize=True) for q in Q]
diffs = [true_vals - bolt_vals
for true_vals, bolt_vals in zip(dots_true, dots_bolt)]
mse = np.mean([np.mean(diff*diff) for diff in diffs])
var = np.mean([np.var(true_vals) for true_vals in dots_true])
print("dot product unquantize mse / variance: ", mse / var)
assert (mse / var) < .01
# print "true, bolt dot prods"
# print dots_true[0][:20].astype(np.int32)
# print dots_bolt[0][:20].astype(np.int32)
enc = bolt.Encoder('l2', accuracy='high').fit(X)
dists_true = [_dists_sq(X, q) for q in Q]
dists_bolt = [enc.transform(q, unquantize=True) for q in Q]
diffs = [true_vals - bolt_vals
for true_vals, bolt_vals in zip(dists_true, dists_bolt)]
mse = np.mean([np.mean(diff*diff) for diff in diffs])
var = np.mean([np.var(true_vals) for true_vals in dots_true])
print("squared l2 unquantize mse / variance: ", mse / var)
assert (mse / var) < .01
def test_basic():
# np.set_printoptions(precision=3)
np.set_printoptions(formatter={'float_kind': _fmt_float})
nqueries = 20
# nqueries = 10
# nqueries = 3
X, Q = _load_digits_X_Q(nqueries)
# TODO rm this block
# shift = 100.
# shift = 100
# scaleby = 1.
# scaleby = 3.5 # acc goes to **** at accelerating rate as this gets larger...
# scaleby = 4
# scaleby = 1.0
# X, Q = X + shift, Q + shift
# X, Q = X * scaleby, Q * scaleby
# X = X[:200]
# X = X[:50]
# X = X[:20]
# X, _ = load_digits(return_X_y=True)
# Q = X[-nqueries:]
# X = X[:-nqueries]
# print "X.shape", X.shape
# print "X nbytes", X.nbytes
# ------------------------------------------------ squared l2
enc = bolt.Encoder(accuracy='low', reduction=bolt.Reductions.SQUARED_EUCLIDEAN)
enc.fit(X)
l2_corrs = np.empty(len(Q))
for i, q in enumerate(Q):
l2_true = _dists_sq(X, q).astype(np.int)
l2_bolt = enc.transform(q)
l2_corrs[i] = _corr(l2_true, l2_bolt)
if i == nqueries - 1:
print("l2 true: ", l2_true)
print("l2 bolt: ", l2_bolt)
print("corr: ", l2_corrs[i])
mean_l2 = np.mean(l2_corrs)
std_l2 = np.std(l2_corrs)
assert mean_l2 > .95
print("--> squared l2 dist correlation: {} +/- {}".format(mean_l2, std_l2))
# return
# ------------------------------------------------ dot product
enc = bolt.Encoder(accuracy='low', reduction=bolt.Reductions.DOT_PRODUCT)
enc.fit(X)
dot_corrs = np.empty(nqueries)
for i, q in enumerate(Q):
dots_true = np.dot(X, q)
dots_bolt = enc.transform(q)
dot_corrs[i] = _corr(dots_true, dots_bolt)
mean_dot = np.mean(dot_corrs)
std_dot = np.std(dot_corrs)
assert mean_dot > .95
print("--> dot product correlation: {} +/- {}".format(mean_dot, std_dot))
# ------------------------------------------------ l2 knn
enc = bolt.Encoder(accuracy='low', reduction='l2')
enc.fit(X)
k_bolt = 10 # tell bolt to search for true knn
k_true = 10 # compute this many true neighbors
true_knn = _knn(X, Q, k_true)
bolt_knn = [enc.knn(q, k_bolt) for q in Q]
contained = np.empty((nqueries, k_bolt), dtype=np.bool)
for i in range(nqueries):
true_neighbors = true_knn[i]
bolt_neighbors = bolt_knn[i]
for j in range(k_bolt):
contained[i, j] = bolt_neighbors[j] in true_neighbors
precision = np.mean(contained)
print("--> l2 knn precision@{}: {}".format(k_bolt, precision))
assert precision > .6
# # print "true_knn, bolt_knn:"
# # print true_knn[:20, :20]
# # print bolt_knn[:20]
# ------------------------------------------------ dot knn
enc = bolt.Encoder(accuracy='low', reduction='dot')
# enc = bolt.Encoder(accuracy='high', reduction='dot')
enc.fit(X)
k_bolt = 10 # tell bolt to search for true knn
k_true = 10 # compute this many true neighbors
true_dists = np.dot(X, Q.T)
# true_dists = [np.dot(X, q) for q in Q]
true_knn = np.empty((nqueries, k_true), dtype=np.int64)
for i in range(nqueries):
true_knn[i, :] = top_k_idxs(
true_dists[:, i], k_true, smaller_better=False)
bolt_knn = [enc.knn(q, k_bolt) for q in Q]
contained = np.empty((len(Q), k_bolt), dtype=np.bool)
for i in range(len(Q)):
true_neighbors = true_knn[i]
# bolt_dists = enc.transform(Q[i])
# bolt_neighbors = top_k_idxs(bolt_dists, k_bolt, smaller_better=True)
bolt_neighbors = bolt_knn[i] # TODO uncomment
for j in range(k_bolt):
contained[i, j] = bolt_neighbors[j] in true_neighbors
precision = np.mean(contained)
print("--> max inner product knn precision@{}: {}".format(
k_bolt, precision))
assert precision > .6
# print("true_knn, bolt_knn:")
# print(true_knn[:5])
# print(bolt_knn[:5])
if __name__ == '__main__':
test_basic()
|
# This file is part of Eigen, a lightweight C++ template library
# for linear algebra.
#
# Copyright (C) 2012 Keir Mierle <mierle@gmail.com>
#
# This Source Code Form is subject to the terms of the Mozilla
# Public License v. 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: mierle@gmail.com (Keir Mierle)
#
# Make the long-awaited conversion to MPL.
lgpl3_header = '''
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
'''
mpl2_header = """
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
import os
import sys
exclusions = set(['relicense.py'])
def update(text):
if text.find(lgpl3_header) == -1:
return text, False
return text.replace(lgpl3_header, mpl2_header), True
rootdir = sys.argv[1]
for root, sub_folders, files in os.walk(rootdir):
for basename in files:
if basename in exclusions:
print 'SKIPPED', filename
continue
filename = os.path.join(root, basename)
fo = file(filename)
text = fo.read()
fo.close()
text, updated = update(text)
if updated:
fo = file(filename, "w")
fo.write(text)
fo.close()
print 'UPDATED', filename
else:
print ' ', filename
|
# Intentionally empty
|
# -*- coding: utf-8 -*-
# This file is part of Eigen, a lightweight C++ template library
# for linear algebra.
#
# Copyright (C) 2009 Benjamin Schindler <bschindler@inf.ethz.ch>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Pretty printers for Eigen::Matrix
# This is still pretty basic as the python extension to gdb is still pretty basic.
# It cannot handle complex eigen types and it doesn't support any of the other eigen types
# Such as quaternion or some other type.
# This code supports fixed size as well as dynamic size matrices
# To use it:
#
# * Create a directory and put the file as well as an empty __init__.py in
# that directory.
# * Create a ~/.gdbinit file, that contains the following:
# python
# import sys
# sys.path.insert(0, '/path/to/eigen/printer/directory')
# from printers import register_eigen_printers
# register_eigen_printers (None)
# end
import gdb
import re
import itertools
class EigenMatrixPrinter:
"Print Eigen Matrix or Array of some kind"
def __init__(self, variety, val):
"Extract all the necessary information"
# Save the variety (presumably "Matrix" or "Array") for later usage
self.variety = variety
# The gdb extension does not support value template arguments - need to extract them by hand
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
self.type = type.unqualified().strip_typedefs()
tag = self.type.tag
regex = re.compile('\<.*\>')
m = regex.findall(tag)[0][1:-1]
template_params = m.split(',')
template_params = [x.replace(" ", "") for x in template_params]
if template_params[1] == '-0x00000000000000001' or template_params[1] == '-0x000000001' or template_params[1] == '-1':
self.rows = val['m_storage']['m_rows']
else:
self.rows = int(template_params[1])
if template_params[2] == '-0x00000000000000001' or template_params[2] == '-0x000000001' or template_params[2] == '-1':
self.cols = val['m_storage']['m_cols']
else:
self.cols = int(template_params[2])
self.options = 0 # default value
if len(template_params) > 3:
self.options = template_params[3];
self.rowMajor = (int(self.options) & 0x1)
self.innerType = self.type.template_argument(0)
self.val = val
# Fixed size matrices have a struct as their storage, so we need to walk through this
self.data = self.val['m_storage']['m_data']
if self.data.type.code == gdb.TYPE_CODE_STRUCT:
self.data = self.data['array']
self.data = self.data.cast(self.innerType.pointer())
class _iterator:
def __init__ (self, rows, cols, dataPtr, rowMajor):
self.rows = rows
self.cols = cols
self.dataPtr = dataPtr
self.currentRow = 0
self.currentCol = 0
self.rowMajor = rowMajor
def __iter__ (self):
return self
def next(self):
return self.__next__() # Python 2.x compatibility
def __next__(self):
row = self.currentRow
col = self.currentCol
if self.rowMajor == 0:
if self.currentCol >= self.cols:
raise StopIteration
self.currentRow = self.currentRow + 1
if self.currentRow >= self.rows:
self.currentRow = 0
self.currentCol = self.currentCol + 1
else:
if self.currentRow >= self.rows:
raise StopIteration
self.currentCol = self.currentCol + 1
if self.currentCol >= self.cols:
self.currentCol = 0
self.currentRow = self.currentRow + 1
item = self.dataPtr.dereference()
self.dataPtr = self.dataPtr + 1
if (self.cols == 1): #if it's a column vector
return ('[%d]' % (row,), item)
elif (self.rows == 1): #if it's a row vector
return ('[%d]' % (col,), item)
return ('[%d,%d]' % (row, col), item)
def children(self):
return self._iterator(self.rows, self.cols, self.data, self.rowMajor)
def to_string(self):
return "Eigen::%s<%s,%d,%d,%s> (data ptr: %s)" % (self.variety, self.innerType, self.rows, self.cols, "RowMajor" if self.rowMajor else "ColMajor", self.data)
class EigenQuaternionPrinter:
"Print an Eigen Quaternion"
def __init__(self, val):
"Extract all the necessary information"
# The gdb extension does not support value template arguments - need to extract them by hand
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
self.type = type.unqualified().strip_typedefs()
self.innerType = self.type.template_argument(0)
self.val = val
# Quaternions have a struct as their storage, so we need to walk through this
self.data = self.val['m_coeffs']['m_storage']['m_data']['array']
self.data = self.data.cast(self.innerType.pointer())
class _iterator:
def __init__ (self, dataPtr):
self.dataPtr = dataPtr
self.currentElement = 0
self.elementNames = ['x', 'y', 'z', 'w']
def __iter__ (self):
return self
def next(self):
return self.__next__() # Python 2.x compatibility
def __next__(self):
element = self.currentElement
if self.currentElement >= 4: #there are 4 elements in a quanternion
raise StopIteration
self.currentElement = self.currentElement + 1
item = self.dataPtr.dereference()
self.dataPtr = self.dataPtr + 1
return ('[%s]' % (self.elementNames[element],), item)
def children(self):
return self._iterator(self.data)
def to_string(self):
return "Eigen::Quaternion<%s> (data ptr: %s)" % (self.innerType, self.data)
def build_eigen_dictionary ():
pretty_printers_dict[re.compile('^Eigen::Quaternion<.*>$')] = lambda val: EigenQuaternionPrinter(val)
pretty_printers_dict[re.compile('^Eigen::Matrix<.*>$')] = lambda val: EigenMatrixPrinter("Matrix", val)
pretty_printers_dict[re.compile('^Eigen::Array<.*>$')] = lambda val: EigenMatrixPrinter("Array", val)
def register_eigen_printers(obj):
"Register eigen pretty-printers with objfile Obj"
if obj == None:
obj = gdb
obj.pretty_printers.append(lookup_function)
def lookup_function(val):
"Look-up and return a pretty-printer that can print va."
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
type = type.unqualified().strip_typedefs()
typename = type.tag
if typename == None:
return None
for function in pretty_printers_dict:
if function.search(typename):
return pretty_printers_dict[function](val)
return None
pretty_printers_dict = {}
build_eigen_dictionary ()
|
from attention_tensorflow_mesh.attention_tensorflow_mesh import transformer_lm, transformer, attention |
import math
import mesh_tensorflow as mtf
import tensorflow.compat.v1 as tf
# helpers
def default(val, d):
return val if val is not None else d
# simple linear layer
def linear(x, dim_out, scope = 'linear', bias = True):
with tf.variable_scope(scope):
*_, dim_in = x.shape
w_init_stdev = 1 / math.sqrt(dim_in.size)
return mtf.layers.dense(x, new_dims=[dim_out], reduced_dims=[dim_in], name=scope, use_bias=bias,
kernel_initializer=tf.random_normal_initializer(stddev=w_init_stdev, dtype=tf.float32))
# norm
def norm(x, axis = None, epsilon=1e-5):
axis = default(axis, x.shape[-1])
u = mtf.reduce_mean(x, reduced_dim=axis)
s = mtf.reduce_mean(mtf.square(x - u), reduced_dim=axis)
u = mtf.broadcast(u, x.shape)
s = mtf.broadcast(s, x.shape)
return (x - u) * mtf.rsqrt(s + epsilon)
def scale_norm(x, scope, *, axis=None, epsilon=1e-5, params=None):
if axis is None:
axis = x.shape[-1]
with tf.variable_scope(scope):
n_state = x.shape[-1]
dt = tf.float32
g = mtf.get_variable(x.mesh, 'g', [], initializer=tf.constant_initializer(1, dtype=dt), dtype=dt)
x = norm(x, axis, epsilon)
x = x * g
return x
def prenorm(fn, scope):
def inner(x, *args, **kwargs):
return fn(scale_norm(x, scope), *args, **kwargs)
return inner
def residual(fn):
def inner(x, *args, **kwargs):
return fn(x, *args, **kwargs) + x
return inner
# full multi-head attention
def attention(x, dim_head, dim_features_head, scope = 'attn', causal = False):
with tf.variable_scope(scope):
mesh, batch, seq, dim = x.mesh, *x.shape
dim_heads = mtf.Dimension('dim_heads', dim_head.size * dim_features_head.size)
dim_intermediate = mtf.Dimension('qkv_dimension', dim_heads.size * 3)
qkv = linear(x, dim_intermediate, bias = False, scope='to_qkv')
q, k, v = mtf.split(qkv, dim_intermediate, 3)
q, k, v = map(lambda t: mtf.reshape(t, [batch, seq, dim_head, dim_features_head]), (q, k, v))
q, k, v = map(lambda t: mtf.transpose(t, [batch, dim_head, seq, dim_features_head]), (q, k, v))
k, v = map(lambda t: mtf.rename_dimension(t, seq.name, 'memory_length'), (k, v))
mem_len_dim = v.shape[-2]
dots = mtf.layers.us_einsum([q, k], [batch, dim_head, seq, mem_len_dim])
if causal:
i = mtf.range(mesh, seq, tf.int32)
j = mtf.range(mesh, mem_len_dim, tf.int32)
i, j = map(lambda t: mtf.broadcast(t, [seq, mem_len_dim]), (i, j))
mask = mtf.less(i + mem_len_dim.size - seq.size, j)
mask = mtf.cast(mask, tf.float32) * -1e10
dots += mask
attn = mtf.softmax(dots, mem_len_dim)
out = mtf.einsum([attn, v], [batch, dim_head, seq, dim_features_head])
out = mtf.transpose(out, [batch, seq, dim_head, dim_features_head])
out = mtf.reshape(out, [batch, seq, dim_heads])
combined_out = linear(out, dim, scope='combine_output')
return combined_out
# feed forward
def ff(x, mult = 4, scope = 'ff'):
*_, dim = x.shape
with tf.variable_scope(scope):
dim_intermediate = mtf.Dimension('ff_intermediate', dim.size * mult)
h = linear(x, dim_intermediate, scope='w1')
h = mtf.gelu(h)
h = linear(h, dim, scope='w2')
return h
# block
def transformer(x, *, depth, dim_head, dim_features_head, causal = False):
attn_fn = residual(prenorm(attention, 'norm1'))
ff_fn = residual(prenorm(ff, 'norm2'))
for i in range(depth):
with tf.variable_scope(f'layer_{i}'):
x = attn_fn(x, dim_head, dim_features_head, causal = causal)
x = ff_fn(x)
return x
# language model
def transformer_lm(x, *, dim, num_tokens, depth, max_seq_len, dim_head, dim_features_head, causal = False):
mesh, batch, seq_dim = x.mesh, *x.shape
dim = mtf.Dimension('dim', dim)
dim_head = mtf.Dimension('dim_head', dim_head)
dim_features_head = mtf.Dimension('dim_features_head', dim_features_head)
dim_num_tokens = mtf.Dimension('vocab_size', num_tokens)
dim_max_seq_len = mtf.Dimension('max_seq_len', max_seq_len)
wte = mtf.get_variable(mesh, name='wte', shape=mtf.Shape([dim_num_tokens, dim]), dtype=tf.float32)
wpe = mtf.get_variable(mesh, name='wpe', shape=mtf.Shape([seq_dim, dim]), dtype=tf.float32)
x = mtf.gather(wte, x, dim_num_tokens)
p = mtf.gather(wpe, mtf.range(mesh, seq_dim, dtype=tf.int32), dim_max_seq_len)
x = x + p
x = transformer(x, depth = depth, dim_head = dim_head, dim_features_head = dim_features_head, causal = causal)
logits = linear(x, dim_num_tokens, scope='to_logits')
return logits
|
# helpers
def exists(val):
return val is not None
def lcm(*numbers):
return int(functools.reduce(lambda x, y: int((x * y) / gcd(x, y)), numbers, 1))
def masked_mean(tensor, mask, dim = -1):
diff_len = len(tensor.shape) - len(mask.shape)
mask = mask[(..., *((None,) * diff_len))]
tensor.masked_fill_(~mask, 0.)
total_el = mask.sum(dim = dim)
mean = tensor.sum(dim = dim) / total_el.clamp(min = 1.)
mean.masked_fill_(total_el == 0, 0.)
return mean
def next_divisible_length(seqlen, multiple):
return math.ceil(seqlen / multiple) * multiple
def pad_to_multiple(tensor, multiple, *, seq_dim, dim = -1, value = 0.):
seqlen = tensor.shape[seq_dim]
length = next_divisible_length(seqlen, multiple)
if length == seqlen:
return tensor
remainder = length - seqlen
pad_offset = (0,) * (-1 - dim) * 2
return F.pad(tensor, (*pad_offset, 0, remainder), value = value)
# helper classes
class Pad(nn.Module):
def __init__(self, padding, value = 0.):
super().__init__()
self.padding = padding
self.value = value
def forward(self, x):
return F.pad(x, self.padding, value = self.value)
class DepthwiseConv1d(nn.Module):
def __init__(self, dim_in, dim_out, kernel_size):
super().__init__()
self.conv = nn.Conv1d(dim_in, dim_out, kernel_size, groups = dim_in)
self.proj_out = nn.Conv1d(dim_out, dim_out, 1)
def forward(self, x):
x = self.conv(x)
return self.proj_out(x)
# main class
class GBST(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
max_block_size = None,
blocks = None,
downsample_factor = 4,
score_consensus_attn = True
):
super().__init__()
assert exists(max_block_size) ^ exists(blocks), 'either max_block_size or blocks are given on initialization'
self.token_emb = nn.Embedding(num_tokens, dim)
if exists(blocks):
assert isinstance(blocks, tuple), 'blocks must be a tuple of block sizes'
self.blocks = tuple(map(lambda el: el if isinstance(el, tuple) else (el, 0), blocks))
assert all([(offset < block_size) for block_size, offset in self.blocks]), 'offset must be always smaller than the block size'
max_block_size = max(list(map(lambda t: t[0], self.blocks)))
else:
self.blocks = tuple(map(lambda el: (el, 0), range(1, max_block_size + 1)))
self.pos_conv = nn.Sequential(
Pad((0, 0, 0, max_block_size - 1)),
Rearrange('b n d -> b d n'),
DepthwiseConv1d(dim, dim, kernel_size = max_block_size),
Rearrange('b d n -> b n d')
)
self.score_fn = nn.Sequential(
nn.Linear(dim, 1),
Rearrange('... () -> ...')
)
self.score_consensus_attn = score_consensus_attn
assert downsample_factor <= max_block_size, 'final downsample factor should be less than the maximum block size'
self.block_pad_multiple = lcm(*[block_size for block_size, _ in self.blocks])
self.downsample_factor = downsample_factor
def forward(self, x, mask = None):
b, n, block_mult, ds_factor, device = *x.shape, self.block_pad_multiple, self.downsample_factor, x.device
m = next_divisible_length(n, ds_factor)
# get character token embeddings
x = self.token_emb(x)
# do a conv to generate the positions for the tokens
x = self.pos_conv(x)
# pad both sequence and mask to length visibile by all block sizes from 0 to max block size
x = pad_to_multiple(x, block_mult, seq_dim = 1, dim = -2)
if exists(mask):
mask = pad_to_multiple(mask, block_mult, seq_dim = 1, dim = -1, value = False)
# compute representations for all blocks by mean pooling
block_masks = []
block_reprs = []
for block_size, offset in self.blocks:
# clone the input sequence as well as the mask, in order to pad for offsets
block_x = x.clone()
if exists(mask):
block_mask = mask.clone()
# pad for offsets, if needed
need_padding = offset > 0
if need_padding:
left_offset, right_offset = (block_size - offset), offset
block_x = F.pad(block_x, (0, 0, left_offset, right_offset), value = 0.)
if exists(mask):
block_mask = F.pad(block_mask, (left_offset, right_offset), value = False)
# group input sequence into blocks
blocks = rearrange(block_x, 'b (n m) d -> b n m d', m = block_size)
# either mean pool the blocks, or do a masked mean
if exists(mask):
mask_blocks = rearrange(block_mask, 'b (n m) -> b n m', m = block_size)
block_repr = masked_mean(blocks, mask_blocks, dim = -2)
else:
block_repr = blocks.mean(dim = -2)
# append the block representations, as well as the pooled block masks
block_repr = repeat(block_repr, 'b n d -> b (n m) d', m = block_size)
if need_padding:
block_repr = block_repr[:, left_offset:-right_offset]
block_reprs.append(block_repr)
if exists(mask):
mask_blocks = torch.any(mask_blocks, dim = -1)
mask_blocks = repeat(mask_blocks, 'b n -> b (n m)', m = block_size)
if need_padding:
mask_blocks = mask_blocks[:, left_offset:-right_offset]
block_masks.append(mask_blocks)
# stack all the block representations
block_reprs = torch.stack(block_reprs, dim = 2)
# calculate scores and softmax across the block size dimension
scores = self.score_fn(block_reprs)
if exists(mask):
block_masks = torch.stack(block_masks, dim = 2)
max_neg_value = -torch.finfo(scores.dtype).max
scores = scores.masked_fill(~block_masks, max_neg_value)
scores = scores.softmax(dim = 2)
# do the cheap consensus attention, eq (5) in paper
if self.score_consensus_attn:
score_sim = einsum('b i d, b j d -> b i j', scores, scores)
if exists(mask):
cross_mask = rearrange(mask, 'b i -> b i ()') * rearrange(mask, 'b j -> b () j')
max_neg_value = -torch.finfo(score_sim.dtype).max
score_sim = score_sim.masked_fill(~cross_mask, max_neg_value)
score_attn = score_sim.softmax(dim = -1)
scores = einsum('b i j, b j m -> b i m', score_attn, scores)
# multiply the block representations by the position-wise scores
scores = rearrange(scores, 'b n m -> b n m ()')
x = (block_reprs * scores).sum(dim = 2)
# truncate to length divisible by downsample factor
x = x[:, :m]
if exists(mask):
mask = mask[:, :m]
# final mean pooling downsample
x = rearrange(x, 'b (n m) d -> b n m d', m = ds_factor)
if exists(mask):
mask = rearrange(mask, 'b (n m) -> b n m', m = ds_factor)
x = masked_mean(x, mask, dim = 2)
mask = torch.any(mask, dim = -1)
else:
x = x.mean(dim = -2)
return x, mask
|
"""
Bonito Aligner
"""
def align_map(aligner, sequences, n_thread=4):
"""
Align `sequences` with minimap using `n_thread` threads.
"""
return ThreadMap(partial(MappyWorker, aligner), sequences, n_thread)
class MappyWorker(Thread):
"""
Process that reads items from an input_queue, applies a func to them and puts them on an output_queue
"""
def __init__(self, aligner, input_queue=None, output_queue=None):
super().__init__()
self.aligner = aligner
self.input_queue = input_queue
self.output_queue = output_queue
def run(self):
thrbuf = ThreadBuffer()
while True:
item = self.input_queue.get()
if item is StopIteration:
self.output_queue.put(item)
break
k, v = item
mapping = next(self.aligner.map(v['sequence'], buf=thrbuf, MD=True), None)
self.output_queue.put((k, {**v, 'mapping': mapping}))
|
"""
Bonito Fast5 Utils
"""
class Read:
def __init__(self, read, filename):
self.read_id = read.read_id
self.filename = filename.name
self.run_id = read.get_run_id()
if type(self.run_id) in (bytes, np.bytes_):
self.run_id = self.run_id.decode()
read_attrs = read.handle[read.raw_dataset_group_name].attrs
channel_info = read.handle[read.global_key + 'channel_id'].attrs
self.offset = int(channel_info['offset'])
self.sampling_rate = channel_info['sampling_rate']
self.scaling = channel_info['range'] / channel_info['digitisation']
self.mux = read_attrs['start_mux']
self.channel = channel_info['channel_number']
if type(self.channel) in (bytes, np.bytes_):
self.channel = self.channel.decode()
self.start = read_attrs['start_time'] / self.sampling_rate
self.duration = read_attrs['duration'] / self.sampling_rate
raw = read.handle[read.raw_dataset_name][:]
scaled = np.array(self.scaling * (raw + self.offset), dtype=np.float32)
trim_start, _ = trim(scaled[:8000])
scaled = scaled[trim_start:]
self.template_start = self.start + (1 / self.sampling_rate) * trim_start
self.template_duration = self.duration - (1 / self.sampling_rate) * trim_start
if len(scaled) > 8000:
med, mad = med_mad(scaled)
self.signal = (scaled - med) / mad
else:
self.signal = norm_by_noisiest_section(scaled)
def __repr__(self):
return "Read('%s')" % self.read_id
class ReadChunk:
def __init__(self, read, chunk, i, n):
self.read_id = "%s:%i:%i" % (read.read_id, i, n)
self.run_id = read.run_id
self.filename = read.filename
self.mux = read.mux
self.channel = read.channel
self.start = read.start
self.duration = read.duration
self.template_start = self.start
self.template_duration = self.duration
self.signal = chunk
def __repr__(self):
return "ReadChunk('%s')" % self.read_id
def trim(signal, window_size=40, threshold_factor=2.4, min_elements=3):
min_trim = 10
signal = signal[min_trim:]
med, mad = med_mad(signal[-(window_size*100):])
threshold = med + mad * threshold_factor
num_windows = len(signal) // window_size
seen_peak = False
for pos in range(num_windows):
start = pos * window_size
end = start + window_size
window = signal[start:end]
if len(window[window > threshold]) > min_elements or seen_peak:
seen_peak = True
if window[-1] > threshold:
continue
return min(end + min_trim, len(signal)), len(signal)
return min_trim, len(signal)
def med_mad(x, factor=1.4826):
"""
Calculate signal median and median absolute deviation
"""
med = np.median(x)
mad = np.median(np.absolute(x - med)) * factor
return med, mad
def norm_by_noisiest_section(signal, samples=100, threshold=6.0):
"""
Normalise using the medmad from the longest continuous region where the
noise is above some threshold relative to the std of the full signal.
"""
threshold = signal.std() / threshold
noise = np.ones(signal.shape)
for idx in np.arange(signal.shape[0] // samples):
window = slice(idx * samples, (idx + 1) * samples)
noise[window] = np.where(signal[window].std() > threshold, 1, 0)
# start and end low for peak finding
noise[0] = 0; noise[-1] = 0
peaks, info = find_peaks(noise, width=(None, None))
if len(peaks):
widest = np.argmax(info['widths'])
med, mad = med_mad(signal[info['left_bases'][widest]: info['right_bases'][widest]])
else:
med, mad = med_mad(signal)
return (signal - med) / mad
def read_chunks(read, chunksize=4000, overlap=400):
"""
Split a Read in fixed sized ReadChunks
"""
if len(read.signal) < chunksize:
return
_, offset = divmod(len(read.signal) - chunksize, chunksize - overlap)
signal = torch.from_numpy(read.signal[offset:])
blocks = signal.unfold(0, chunksize, chunksize - overlap)
for i, block in enumerate(blocks):
yield ReadChunk(read, block.numpy(), i+1, blocks.shape[0])
def get_raw_data(filename, read_ids=None, skip=False):
"""
Get the raw signal and read id from the fast5 files
"""
with get_fast5_file(filename, 'r') as f5_fh:
for read_id in f5_fh.get_read_ids():
if read_ids is None or (read_id in read_ids) ^ skip:
yield Read(f5_fh.get_read(read_id), filename)
def get_read_ids(filename, read_ids=None, skip=False):
"""
Get all the read_ids from the file `filename`.
"""
with get_fast5_file(filename, 'r') as f5_fh:
ids = [(filename, rid) for rid in f5_fh.get_read_ids()]
if read_ids is None:
return ids
return [rid for rid in ids if (rid[1] in read_ids) ^ skip]
def get_raw_data_for_read(info):
"""
Get the raw signal from the fast5 file for a given filename, read_id pair
"""
filename, read_id = info
with get_fast5_file(filename, 'r') as f5_fh:
return Read(f5_fh.get_read(read_id), filename)
def get_reads(directory, read_ids=None, skip=False, max_read_size=0, n_proc=1, recursive=False, cancel=None):
"""
Get all reads in a given `directory`.
"""
pattern = "**/*.fast5" if recursive else "*.fast5"
get_filtered_reads = partial(get_read_ids, read_ids=read_ids, skip=skip)
with Pool(n_proc) as pool:
for job in chain(pool.imap(get_filtered_reads, (Path(x) for x in glob(directory + "/" + pattern, recursive=True)))):
for read in pool.imap(get_raw_data_for_read, job):
if max_read_size > 0 and len(read.signal) > max_read_size:
sys.stderr.write(
"> skipping long read %s (%s samples)\n" % (read.read_id, len(read.signal))
)
continue
yield read
if cancel is not None and cancel.is_set():
return
|
"""
Bonito utils
"""
try:
from claragenomics.bindings import cuda
from claragenomics.bindings.cudapoa import CudaPoaBatch
except ImportError:
pass
__dir__ = os.path.dirname(os.path.realpath(__file__))
__data__ = os.path.join(__dir__, "data")
__models__ = os.path.join(__dir__, "models")
__configs__ = os.path.join(__dir__, "models/configs")
split_cigar = re.compile(r"(?P<len>\d+)(?P<op>\D+)")
default_data = os.path.join(__data__, "dna_r9.4.1")
default_config = os.path.join(__configs__, "dna_r9.4.1@v3.1.toml")
def init(seed, device):
"""
Initialise random libs and setup cudnn
https://pytorch.org/docs/stable/notes/randomness.html
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if device == "cpu": return
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
assert(torch.cuda.is_available())
def permute(x, input_layout, output_layout):
"""
Permute `x` from `input_layout` to `output_layout`
>>> permute(x, 'TNC', 'NTC')
"""
if input_layout == output_layout: return x
return x.permute(*[input_layout.index(x) for x in output_layout])
def concat(xs, dim=0):
"""
Type agnostic concat.
"""
if isinstance(xs[0], torch.Tensor):
return torch.cat(xs, dim=dim)
elif isinstance(xs[0], np.ndarray):
return np.concatenate(xs, axis=dim)
elif isinstance(xs[0], list):
return [x for l in xs for x in l]
elif isinstance(xs[0], str):
return ''.join(xs)
elif isinstance(xs[0], dict):
return {k: concat([x[k] for x in xs], dim) for k in xs[0].keys()}
else:
raise TypeError
def select_range(x, start, end, dim=0):
"""
Type agnostic range select.
"""
if isinstance(x, dict):
return {k: select_range(v, start, end, dim) for (k, v) in x.items()}
if dim == 0 or isinstance(x, list): return x[start:end]
return x[(*(slice(None),)*dim, slice(start, end))]
def size(x, dim=0):
"""
Type agnostic size.
"""
if hasattr(x, 'shape'):
return x.shape[dim]
elif dim == 0:
return len(x)
raise TypeError
def half_supported():
"""
Returns whether FP16 is support on the GPU
"""
try:
return get_device_capability()[0] >= 7
except:
return False
def phred(prob, scale=1.0, bias=0.0):
"""
Converts `prob` into a ascii encoded phred quality score between 0 and 40.
"""
p = max(1 - prob, 1e-4)
q = -10 * np.log10(p) * scale + bias
return chr(int(np.round(q) + 33))
def mean_qscore_from_qstring(qstring):
"""
Convert qstring into a mean qscore
"""
if len(qstring) == 0: return 0.0
err_probs = [10**((ord(c) - 33) / -10) for c in qstring]
mean_err = np.mean(err_probs)
return -10 * np.log10(max(mean_err, 1e-4))
def decode_ref(encoded, labels):
"""
Convert a integer encoded reference into a string and remove blanks
"""
return ''.join(labels[e] for e in encoded if e)
def column_to_set(filename, idx=0, skip_header=False):
"""
Pull a column from a file and return a set of the values.
"""
if filename and os.path.isfile(filename):
with open(filename, 'r') as tsv:
if skip_header:
next(tsv)
return {line.strip().split()[idx] for line in tsv.readlines()}
def chunk(signal, chunksize, overlap):
"""
Convert a read into overlapping chunks before calling
"""
T = signal.shape[0]
if chunksize == 0:
chunks = signal[None, :]
elif T < chunksize:
chunks = torch.nn.functional.pad(signal, (chunksize - T, 0))[None, :]
else:
stub = (T - overlap) % (chunksize - overlap)
chunks = signal[stub:].unfold(0, chunksize, chunksize - overlap)
if stub > 0:
chunks = torch.cat([signal[None, :chunksize], chunks], dim=0)
return chunks.unsqueeze(1)
def stitch(chunks, chunksize, overlap, length, stride, reverse=False):
"""
Stitch chunks together with a given overlap
"""
if chunks.shape[0] == 1: return chunks.squeeze(0)
semi_overlap = overlap // 2
start, end = semi_overlap // stride, (chunksize - semi_overlap) // stride
stub = (length - overlap) % (chunksize - overlap)
first_chunk_end = (stub + semi_overlap) // stride if (stub > 0) else end
if reverse:
chunks = list(chunks)
return concat([
chunks[-1][:-start], *(x[-end:-start] for x in reversed(chunks[1:-1])), chunks[0][-first_chunk_end:]
])
else:
return concat([
chunks[0, :first_chunk_end], *chunks[1:-1, start:end], chunks[-1, start:]
])
def batchify(items, batchsize, dim=0):
"""
Batch up items up to `batch_size`.
"""
stack, pos = [], 0
for k, v in items:
breaks = range(batchsize - pos, size(v, dim), batchsize)
for start, end in zip([0, *breaks], [*breaks, size(v, dim)]):
sub_batch = select_range(v, start, end, dim)
stack.append(((k, (pos, pos + end - start)), sub_batch))
if pos + end - start == batchsize:
ks, vs = zip(*stack)
yield ks, concat(vs, dim)
stack, pos = [], 0
else:
pos += end - start
if len(stack):
ks, vs = zip(*stack)
yield ks, concat(vs, dim)
def unbatchify(batches, dim=0):
"""
Reconstruct batches.
"""
batches = (
(k, select_range(v, start, end, dim))
for sub_batches, v in batches
for k, (start, end) in sub_batches
)
return (
(k, concat([v for (k, v) in group], dim))
for k, group in groupby(batches, itemgetter(0))
)
def load_data(limit=None, directory=None):
"""
Load the training data
"""
if directory is None:
directory = default_data
chunks = np.load(os.path.join(directory, "chunks.npy"), mmap_mode='r')
targets = np.load(os.path.join(directory, "references.npy"), mmap_mode='r')
lengths = np.load(os.path.join(directory, "reference_lengths.npy"), mmap_mode='r')
indices = os.path.join(directory, "indices.npy")
if os.path.exists(indices):
idx = np.load(indices, mmap_mode='r')
idx = idx[idx < lengths.shape[0]]
if limit:
idx = idx[:limit]
return chunks[idx, :], targets[idx, :], lengths[idx]
if limit:
chunks = chunks[:limit]
targets = targets[:limit]
lengths = lengths[:limit]
return np.array(chunks), np.array(targets), np.array(lengths)
def load_symbol(config, symbol):
"""
Dynamic load a symbol from module specified in model config.
"""
if not isinstance(config, dict):
if not os.path.isdir(config) and os.path.isdir(os.path.join(__models__, config)):
dirname = os.path.join(__models__, config)
else:
dirname = config
config = toml.load(os.path.join(dirname, 'config.toml'))
imported = import_module(config['model']['package'])
return getattr(imported, symbol)
def match_names(state_dict, model):
keys_and_shapes = lambda state_dict: zip(*[
(k, s) for s, i, k in sorted([(v.shape, i, k)
for i, (k, v) in enumerate(state_dict.items())])
])
k1, s1 = keys_and_shapes(state_dict)
k2, s2 = keys_and_shapes(model.state_dict())
assert s1 == s2
remap = dict(zip(k1, k2))
return OrderedDict([(k, remap[k]) for k in state_dict.keys()])
def load_model(dirname, device, weights=None, half=None, chunksize=0):
"""
Load a model from disk
"""
if not os.path.isdir(dirname) and os.path.isdir(os.path.join(__models__, dirname)):
dirname = os.path.join(__models__, dirname)
if not weights: # take the latest checkpoint
weight_files = glob(os.path.join(dirname, "weights_*.tar"))
if not weight_files:
raise FileNotFoundError("no model weights found in '%s'" % dirname)
weights = max([int(re.sub(".*_([0-9]+).tar", "\\1", w)) for w in weight_files])
device = torch.device(device)
config = toml.load(os.path.join(dirname, 'config.toml'))
weights = os.path.join(dirname, 'weights_%s.tar' % weights)
Model = load_symbol(config, "Model")
model = Model(config)
state_dict = torch.load(weights, map_location=device)
state_dict = {k2: state_dict[k1] for k1, k2 in match_names(state_dict, model).items()}
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k.replace('module.', '')
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
if half is None:
half = half_supported()
if half: model = model.half()
model.eval()
model.to(device)
return model
def parasail_to_sam(result, seq):
"""
Extract reference start and sam compatible cigar string.
:param result: parasail alignment result.
:param seq: query sequence.
:returns: reference start coordinate, cigar string.
"""
cigstr = result.cigar.decode.decode()
first = re.search(split_cigar, cigstr)
first_count, first_op = first.groups()
prefix = first.group()
rstart = result.cigar.beg_ref
cliplen = result.cigar.beg_query
clip = '' if cliplen == 0 else '{}S'.format(cliplen)
if first_op == 'I':
pre = '{}S'.format(int(first_count) + cliplen)
elif first_op == 'D':
pre = clip
rstart = int(first_count)
else:
pre = '{}{}'.format(clip, prefix)
mid = cigstr[len(prefix):]
end_clip = len(seq) - result.end_query - 1
suf = '{}S'.format(end_clip) if end_clip > 0 else ''
new_cigstr = ''.join((pre, mid, suf))
return rstart, new_cigstr
def accuracy(ref, seq, balanced=False, min_coverage=0.0):
"""
Calculate the accuracy between `ref` and `seq`
"""
alignment = parasail.sw_trace_striped_32(seq, ref, 8, 4, parasail.dnafull)
counts = defaultdict(int)
q_coverage = len(alignment.traceback.query) / len(seq)
r_coverage = len(alignment.traceback.ref) / len(ref)
if r_coverage < min_coverage:
return 0.0
_, cigar = parasail_to_sam(alignment, seq)
for count, op in re.findall(split_cigar, cigar):
counts[op] += int(count)
if balanced:
accuracy = (counts['='] - counts['I']) / (counts['='] + counts['X'] + counts['D'])
else:
accuracy = counts['='] / (counts['='] + counts['I'] + counts['X'] + counts['D'])
return accuracy * 100
def print_alignment(ref, seq):
"""
Print the alignment between `ref` and `seq`
"""
alignment = parasail.sw_trace_striped_32(seq, ref, 8, 4, parasail.dnafull)
print(alignment.traceback.ref)
print(alignment.traceback.comp)
print(alignment.traceback.query)
print(" Score=%s" % alignment.score)
return alignment.score
def poa(groups, max_poa_sequences=100, gpu_mem_per_batch=0.9):
"""
Generate consensus for POA groups.
Args:
groups : A list of lists of sequences for which consensus is to be generated.
"""
free, total = cuda.cuda_get_mem_info(cuda.cuda_get_device())
gpu_mem_per_batch *= free
batch = CudaPoaBatch(max_poa_sequences, gpu_mem_per_batch, stream=None, output_type="consensus")
results = []
for i, group in enumerate(groups, start=1):
group_status, seq_status = batch.add_poa_group(group)
# Once batch is full, run POA processing
if group_status == 1 or i == len(groups):
batch.generate_poa()
consensus, coverage, status = batch.get_consensus()
results.extend(consensus)
batch.reset()
group_status, seq_status = batch.add_poa_group(group)
return results
|
"""
Bonito nn modules.
"""
layers = {}
def register(layer):
layer.name = layer.__name__.lower()
layers[layer.name] = layer
return layer
register(torch.nn.ReLU)
register(torch.nn.Tanh)
@register
class Swish(torch.nn.SiLU):
pass
@register
class Serial(torch.nn.Sequential):
def __init__(self, sublayers):
super().__init__(*sublayers)
def to_dict(self, include_weights=False):
return {
'sublayers': [to_dict(layer, include_weights) for layer in self._modules.values()]
}
@register
class Reverse(Module):
def __init__(self, sublayers):
super().__init__()
self.layer = Serial(sublayers) if isinstance(sublayers, list) else sublayers
def forward(self, x):
return self.layer(x.flip(0)).flip(0)
def to_dict(self, include_weights=False):
if isinstance(self.layer, Serial):
return self.layer.to_dict(include_weights)
else:
return {'sublayers': to_dict(self.layer, include_weights)}
@register
class Convolution(Module):
def __init__(self, insize, size, winlen, stride=1, padding=0, bias=True, activation=None):
super().__init__()
self.conv = torch.nn.Conv1d(insize, size, winlen, stride=stride, padding=padding, bias=bias)
self.activation = layers.get(activation, lambda: activation)()
def forward(self, x):
if self.activation is not None:
return self.activation(self.conv(x))
return self.conv(x)
def to_dict(self, include_weights=False):
res = {
"insize": self.conv.in_channels,
"size": self.conv.out_channels,
"bias": self.conv.bias is not None,
"winlen": self.conv.kernel_size[0],
"stride": self.conv.stride[0],
"padding": self.conv.padding[0],
"activation": self.activation.name if self.activation else None,
}
if include_weights:
res['params'] = {
'W': self.conv.weight, 'b': self.conv.bias if self.conv.bias is not None else []
}
return res
@register
class LinearCRFEncoder(Module):
def __init__(self, insize, n_base, state_len, bias=True, scale=None, activation=None, blank_score=None):
super().__init__()
self.n_base = n_base
self.state_len = state_len
self.blank_score = blank_score
size = (n_base + 1) * n_base**state_len if blank_score is None else n_base**(state_len + 1)
self.linear = torch.nn.Linear(insize, size, bias=bias)
self.activation = layers.get(activation, lambda: activation)()
self.scale = scale
def forward(self, x):
scores = self.linear(x)
if self.activation is not None:
scores = self.activation(scores)
if self.scale is not None:
scores = scores * self.scale
if self.blank_score is not None:
T, N, C = scores.shape
s = torch.tensor(self.blank_score, device=scores.device, dtype=scores.dtype)
scores = torch.cat([s.expand(T, N, C//self.n_base, 1), scores.reshape(T, N, C//self.n_base, self.n_base)], axis=-1).reshape(T, N, -1)
return scores
def to_dict(self, include_weights=False):
res = {
'insize': self.linear.in_features,
'n_base': self.n_base,
'state_len': self.state_len,
'bias': self.linear.bias is not None,
'scale': self.scale,
'activation': self.activation.name if self.activation else None,
'blank_score': self.blank_score,
}
if include_weights:
res['params'] = {
'W': self.linear.weight, 'b': self.linear.bias
if self.linear.bias is not None else []
}
return res
@register
class SHA(Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** -0.5
self.to_q = nn.Sequential(nn.Linear(dim, dim), nn.LayerNorm(dim))
def forward(self, x, kv):
x = x.transpose(0, 1)
kv = kv.transpose(0, 1)
q = self.to_q(x)
sim = torch.matmul(q, kv.transpose(-1, -2)) * self.scale
attn = sim.softmax(dim=-1)
out = torch.matmul(attn, kv)
return out.transpose(0, 1)
@register
class SHABlock(Module):
""" https://arxiv.org/abs/1911.11423 """
def __init__(self, dim, ff_mult=4):
super().__init__()
self.attn_query_norm = nn.LayerNorm(dim)
self.attn_kv_norm = nn.LayerNorm(dim)
self.attn = SHA(dim=dim)
self.ff_residual_norm = nn.LayerNorm(dim)
self.ff = Serial([
nn.LayerNorm(dim),
nn.Linear(dim, dim * ff_mult),
nn.GELU(),
nn.Linear(dim * ff_mult, dim),
])
def forward(self, x):
kv = self.attn_kv_norm(x)
x = self.attn_query_norm(x)
x = self.attn(x, kv) + x
x = self.ff(x) + self.ff_residual_norm(x)
return x
@register
class Permute(Module):
def __init__(self, dims):
super().__init__()
self.dims = dims
def forward(self, x):
return x.permute(*self.dims)
def to_dict(self, include_weights=False):
return {'dims': self.dims}
def truncated_normal(size, dtype=torch.float32, device=None, num_resample=5):
x = torch.empty(size + (num_resample,), dtype=torch.float32, device=device).normal_()
i = ((x < 2) & (x > -2)).max(-1, keepdim=True)[1]
return torch.clamp_(x.gather(-1, i).squeeze(-1), -2, 2)
class RNNWrapper(Module):
def __init__(
self, rnn_type, *args, reverse=False, orthogonal_weight_init=True, disable_state_bias=True, bidirectional=False, **kwargs
):
super().__init__()
if reverse and bidirectional:
raise Exception("'reverse' and 'bidirectional' should not both be set to True")
self.reverse = reverse
self.rnn = rnn_type(*args, bidirectional=bidirectional, **kwargs)
self.init_orthogonal(orthogonal_weight_init)
self.init_biases()
if disable_state_bias: self.disable_state_bias()
def forward(self, x):
if self.reverse: x = x.flip(0)
y, h = self.rnn(x)
if self.reverse: y = y.flip(0)
return y
def init_biases(self, types=('bias_ih',)):
for name, param in self.rnn.named_parameters():
if any(k in name for k in types):
with torch.no_grad():
param.set_(0.5*truncated_normal(param.shape, dtype=param.dtype, device=param.device))
def init_orthogonal(self, types=True):
if not types: return
if types == True: types = ('weight_ih', 'weight_hh')
for name, x in self.rnn.named_parameters():
if any(k in name for k in types):
for i in range(0, x.size(0), self.rnn.hidden_size):
orthogonal_(x[i:i+self.rnn.hidden_size])
def disable_state_bias(self):
for name, x in self.rnn.named_parameters():
if 'bias_hh' in name:
x.requires_grad = False
x.zero_()
@register
class LSTM(RNNWrapper):
def __init__(self, size, insize, bias=True, reverse=False):
super().__init__(torch.nn.LSTM, size, insize, bias=bias, reverse=reverse)
def to_dict(self, include_weights=False):
res = {
'size': self.rnn.hidden_size,
'insize': self.rnn.input_size,
'bias': self.rnn.bias,
'reverse': self.reverse,
}
if include_weights:
res['params'] = {
'iW': self.rnn.weight_ih_l0.reshape(4, self.rnn.hidden_size, self.rnn.input_size),
'sW': self.rnn.weight_hh_l0.reshape(4, self.rnn.hidden_size, self.rnn.hidden_size),
'b': self.rnn.bias_ih_l0.reshape(4, self.rnn.hidden_size)
}
return res
def to_dict(layer, include_weights=False):
if hasattr(layer, 'to_dict'):
return {'type': layer.name, **layer.to_dict(include_weights)}
return {'type': layer.name}
def from_dict(model_dict, layer_types=None):
model_dict = model_dict.copy()
if layer_types is None:
layer_types = layers
type_name = model_dict.pop('type')
typ = layer_types[type_name]
if 'sublayers' in model_dict:
sublayers = model_dict['sublayers']
model_dict['sublayers'] = [
from_dict(x, layer_types) for x in sublayers
] if isinstance(sublayers, list) else from_dict(sublayers, layer_types)
try:
layer = typ(**model_dict)
except Exception as e:
raise Exception(f'Failed to build layer of type {typ} with args {model_dict}') from e
return layer
|
"""
Bonito Input/Output
"""
logger = getLogger('bonito')
class CSVLogger:
def __init__(self, filename, sep=','):
self.filename = str(filename)
if os.path.exists(self.filename):
with open(self.filename) as f:
self.columns = csv.DictReader(f).fieldnames
else:
self.columns = None
self.fh = open(self.filename, 'a', newline='')
self.csvwriter = csv.writer(self.fh, delimiter=sep)
self.count = 0
def set_columns(self, columns):
if self.columns:
raise Exception('Columns already set')
self.columns = list(columns)
self.csvwriter.writerow(self.columns)
def append(self, row):
if self.columns is None:
self.set_columns(row.keys())
self.csvwriter.writerow([row.get(k, '-') for k in self.columns])
self.count += 1
if self.count > 100:
self.count = 0
self.fh.flush()
def close(self):
self.fh.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
@contextmanager
def devnull(*args, **kwds):
"""
A context manager that sends all out stdout & stderr to devnull.
"""
save_fds = [os.dup(1), os.dup(2)]
null_fds = [os.open(os.devnull, os.O_RDWR) for _ in range(2)]
os.dup2(null_fds[0], 1)
os.dup2(null_fds[1], 2)
try:
yield
finally:
os.dup2(save_fds[0], 1)
os.dup2(save_fds[1], 2)
for fd in null_fds + save_fds: os.close(fd)
def write_fasta(header, sequence, fd=sys.stdout):
"""
Write a fasta record to a file descriptor.
"""
fd.write(">%s\n" % header)
fd.write("%s\n" % sequence)
fd.flush()
def write_fastq(header, sequence, qstring, fd=sys.stdout):
"""
Write a fastq record to a file descriptor.
"""
fd.write("@%s\n" % header)
fd.write("%s\n" % sequence)
fd.write("+\n")
fd.write("%s\n" % qstring)
fd.flush()
def write_sam_header(aligner, fd=sys.stdout, sep='\t'):
"""
Write the SQ & PG sam headers to a file descriptor.
"""
fd.write('%s\n' % os.linesep.join([
sep.join([
'@SQ', 'SN:%s' % name, 'LN:%s' % len(aligner.seq(name))
]) for name in aligner.seq_names
]))
fd.write('%s\n' % sep.join([
'@PG',
'ID:bonito',
'PN:bonito',
'VN:%s' % bonito.__version__,
'CL:%s' % ' '.join(sys.argv),
]))
fd.flush()
def write_sam(read_id, sequence, qstring, mapping, fd=sys.stdout, unaligned=False, sep='\t'):
"""
Write a sam record to a file descriptor.
"""
if unaligned:
fd.write("%s\n" % sep.join(map(str, [
read_id, 4, '*', 0, 0, '*', '*', 0, 0, sequence, qstring, 'NM:i:0'
])))
else:
softclip = [
'%sS' % mapping.q_st if mapping.q_st else '',
mapping.cigar_str,
'%sS' % (len(sequence) - mapping.q_en) if len(sequence) - mapping.q_en else ''
]
fd.write("%s\n" % sep.join(map(str, [
read_id,
0 if mapping.strand == +1 else 16,
mapping.ctg,
mapping.r_st + 1,
mapping.mapq,
''.join(softclip if mapping.strand == +1 else softclip[::-1]),
'*', 0, 0,
sequence if mapping.strand == +1 else revcomp(sequence),
qstring,
'NM:i:%s' % mapping.NM,
'MD:Z:%s' % mapping.MD,
])))
fd.flush()
def summary_file():
"""
Return the filename to use for the summary tsv.
"""
stdout = realpath('/dev/fd/1')
if sys.stdout.isatty() or stdout.startswith('/proc'):
return 'summary.tsv'
return '%s_summary.tsv' % splitext(stdout)[0]
summary_field_names = [
'filename',
'read_id',
'run_id',
'channel',
'mux',
'start_time',
'duration',
'template_start',
'template_duration',
'sequence_length_template',
'mean_qscore_template',
#if alignment
'alignment_genome',
'alignment_genome_start',
'alignment_genome_end',
'alignment_strand_start',
'alignment_strand_end',
'alignment_direction',
'alignment_length',
'alignment_num_aligned',
'alignment_num_correct',
'alignment_num_insertions',
'alignment_num_deletions',
'alignment_num_substitutions',
'alignment_mapq',
'alignment_strand_coverage',
'alignment_identity',
'alignment_accuracy',
]
def summary_row(read, seqlen, qscore, alignment=False):
"""
Summary tsv row.
"""
fields = [
read.filename,
read.read_id,
read.run_id,
read.channel,
read.mux,
read.start,
read.duration,
read.template_start,
read.template_duration,
seqlen,
qscore,
]
if alignment:
ins = sum(count for count, op in alignment.cigar if op == 1)
dels = sum(count for count, op in alignment.cigar if op == 2)
subs = alignment.NM - ins - dels
length = alignment.blen
matches = length - ins - dels
correct = alignment.mlen
fields.extend([
alignment.ctg,
alignment.r_st,
alignment.r_en,
alignment.q_st if alignment.strand == +1 else seqlen - alignment.q_en,
alignment.q_en if alignment.strand == +1 else seqlen - alignment.q_st,
'+' if alignment.strand == +1 else '-',
length, matches, correct,
ins, dels, subs,
alignment.mapq,
(alignment.q_en - alignment.q_st) / seqlen,
correct / matches,
correct / length,
])
elif alignment is None:
fields.extend(
['*', -1, -1, -1, -1, '*', 0, 0, 0, 0, 0, 0, 0, 0.0, 0.0, 0.0]
)
return dict(zip(summary_field_names, fields))
duplex_summary_field_names = [
'filename_template',
'read_id_template',
'filename_complement',
'read_id_complement',
'run_id',
'channel_template',
'mux_template',
'channel_complement',
'mux_complement',
'sequence_length_duplex',
'mean_qscore_duplex',
#if alignment
'alignment_genome',
'alignment_genome_start',
'alignment_genome_end',
'alignment_strand_start',
'alignment_strand_end',
'alignment_direction',
'alignment_length',
'alignment_num_aligned',
'alignment_num_correct',
'alignment_num_insertions',
'alignment_num_deletions',
'alignment_num_substitutions',
'alignment_mapq',
'alignment_strand_coverage',
'alignment_identity',
'alignment_accuracy',
]
def duplex_summary_row(read_temp, comp_read, seqlen, qscore, alignment=False):
"""
Duplex summary tsv row.
"""
fields = [
read_temp.filename,
read_temp.read_id,
comp_read.filename,
comp_read.read_id,
read_temp.run_id,
read_temp.channel,
read_temp.mux,
comp_read.channel,
comp_read.mux,
seqlen,
qscore,
]
if alignment:
ins = sum(count for count, op in alignment.cigar if op == 1)
dels = sum(count for count, op in alignment.cigar if op == 2)
subs = alignment.NM - ins - dels
length = alignment.blen
matches = length - ins - dels
correct = alignment.mlen
fields.extend([
alignment.ctg,
alignment.r_st,
alignment.r_en,
alignment.q_st if alignment.strand == +1 else seqlen - alignment.q_en,
alignment.q_en if alignment.strand == +1 else seqlen - alignment.q_st,
'+' if alignment.strand == +1 else '-',
length, matches, correct,
ins, dels, subs,
alignment.mapq,
(alignment.q_en - alignment.q_st) / seqlen,
correct / matches,
correct / length,
])
elif alignment is None:
fields.extend(
['*', -1, -1, -1, -1, '*', 0, 0, 0, 0, 0, 0, 0, 0.0, 0.0, 0.0]
)
return dict(zip(duplex_summary_field_names, fields))
class Writer(Thread):
def __init__(self, iterator, aligner, fd=sys.stdout, fastq=False, duplex=False):
super().__init__()
self.fd = fd
self.log = []
self.fastq = fastq
self.duplex = duplex
self.aligner = aligner
self.iterator = iterator
self.write_headers()
def write_headers(self):
if self.aligner:
write_sam_header(self.aligner, fd=self.fd)
def run(self):
with CSVLogger(summary_file(), sep='\t') as summary:
for read, res in self.iterator:
seq = res['sequence']
qstring = res.get('qstring', '*')
mean_qscore = res.get('mean_qscore', 0.0)
mapping = res.get('mapping', False)
if self.duplex:
samples = len(read[0].signal) + len(read[1].signal)
read_id = '%s;%s' % (read[0].read_id, read[1].read_id)
else:
samples = len(read.signal)
read_id = read.read_id
if len(seq):
if self.aligner:
write_sam(read_id, seq, qstring, mapping, fd=self.fd, unaligned=mapping is None)
else:
if self.fastq:
write_fastq(read_id, seq, qstring, fd=self.fd)
else:
write_fasta(read_id, seq, fd=self.fd)
if self.duplex:
summary.append(duplex_summary_row(read[0], read[1], len(seq), mean_qscore, alignment=mapping))
else:
summary.append(summary_row(read, len(seq), mean_qscore, alignment=mapping))
self.log.append((read_id, samples))
else:
logger.warn("> skipping empty sequence %s", read_id)
class CTCWriter(Thread):
"""
CTC writer process that writes output numpy training data.
"""
def __init__(self, iterator, aligner, min_coverage, min_accuracy, fd=sys.stdout):
super().__init__()
self.fd = fd
self.log = []
self.aligner = aligner
self.iterator = iterator
self.min_coverage = min_coverage
self.min_accuracy = min_accuracy
self.write_headers()
def write_headers(self):
if self.aligner:
write_sam_header(self.aligner, fd=self.fd)
def run(self):
chunks = []
targets = []
lengths = []
with CSVLogger(summary_file(), sep='\t') as summary:
for read, ctc_data in self.iterator:
seq = ctc_data['sequence']
qstring = ctc_data['qstring']
mean_qscore = ctc_data['mean_qscore']
mapping = ctc_data.get('mapping', False)
self.log.append((read.read_id, len(read.signal)))
if len(seq) == 0 or mapping is None:
continue
cov = (mapping.q_en - mapping.q_st) / len(seq)
acc = mapping.mlen / mapping.blen
refseq = self.aligner.seq(mapping.ctg, mapping.r_st, mapping.r_en)
if acc < self.min_accuracy or cov < self.min_coverage or 'N' in refseq:
continue
write_sam(read.read_id, seq, qstring, mapping, fd=self.fd, unaligned=mapping is None)
summary.append(summary_row(read, len(seq), mean_qscore, alignment=mapping))
if mapping.strand == -1:
refseq = revcomp(refseq)
target = [int(x) for x in refseq.translate({65: '1', 67: '2', 71: '3', 84: '4'})]
targets.append(target)
chunks.append(read.signal)
lengths.append(len(target))
if len(chunks) == 0:
sys.stderr.write("> no suitable ctc data to write\n")
return
chunks = np.array(chunks, dtype=np.float16)
targets_ = np.zeros((chunks.shape[0], max(lengths)), dtype=np.uint8)
for idx, target in enumerate(targets): targets_[idx, :len(target)] = target
lengths = np.array(lengths, dtype=np.uint16)
indices = np.random.permutation(typical_indices(lengths))
chunks = chunks[indices]
targets_ = targets_[indices]
lengths = lengths[indices]
summary = pd.read_csv(summary_file(), sep='\t')
summary.iloc[indices].to_csv(summary_file(), sep='\t', index=False)
output_directory = '.' if sys.stdout.isatty() else dirname(realpath('/dev/fd/1'))
np.save(os.path.join(output_directory, "chunks.npy"), chunks)
np.save(os.path.join(output_directory, "references.npy"), targets_)
np.save(os.path.join(output_directory, "reference_lengths.npy"), lengths)
sys.stderr.write("> written ctc training data\n")
sys.stderr.write(" - chunks.npy with shape (%s)\n" % ','.join(map(str, chunks.shape)))
sys.stderr.write(" - references.npy with shape (%s)\n" % ','.join(map(str, targets_.shape)))
sys.stderr.write(" - reference_lengths.npy shape (%s)\n" % ','.join(map(str, lengths.shape)))
def stop(self):
self.join()
|
modules = [
'basecaller', 'train', 'evaluate', 'view', 'convert', 'download', 'export', 'duplex',
]
__version__ = '0.4.0'
def main():
parser = ArgumentParser(
'bonito',
formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-v', '--version', action='version',
version='%(prog)s {}'.format(__version__)
)
subparsers = parser.add_subparsers(
title='subcommands', description='valid commands',
help='additional help', dest='command'
)
subparsers.required = True
for module in modules:
mod = globals()[module]
p = subparsers.add_parser(module, parents=[mod.argparser()])
p.set_defaults(func=mod.main)
args = parser.parse_args()
args.func(args)
|
"""
Bonito Multiprocesing
"""
def process_iter(iterator, maxsize=1):
"""
Take an iterator and run it on another process.
"""
return iter(ProcessIterator(iterator, maxsize=maxsize))
def thread_iter(iterator, maxsize=1):
"""
Take an iterator and run it on another thread.
"""
return iter(ThreadIterator(iterator, maxsize=maxsize))
def process_cancel():
"""
Register an cancel event on sigint
"""
event = Event()
signal(SIGINT, lambda *a: event.set())
return event
def process_map(func, iterator, n_proc=4, maxsize=0):
"""
Take an `iterator` of key, value pairs and apply `func` to all values using `n_proc` processes.
"""
if n_proc == 0: return ((k, func(v)) for k, v in iterator)
return iter(ProcessMap(func, iterator, n_proc, output_queue=Queue(maxsize)))
def thread_map(func, iterator, n_thread=4, maxsize=2):
"""
Take an `iterator` of key, value pairs and apply `func` to all values using `n_thread` threads.
"""
if n_thread == 0: return ((k, func(v)) for k, v in iterator)
return iter(ThreadMap(partial(MapWorkerThread, func), iterator, n_thread, maxsize=maxsize))
class BackgroundIterator:
"""
Runs an iterator in the background.
"""
def __init__(self, iterator, maxsize=10):
super().__init__()
self.iterator = iterator
self.queue = self.QueueClass(maxsize)
def __iter__(self):
self.start()
while True:
item = self.queue.get()
if item is StopIteration:
break
yield item
def run(self):
for item in self.iterator:
self.queue.put(item)
self.queue.put(StopIteration)
def stop(self):
self.join()
class ThreadIterator(BackgroundIterator, Thread):
"""
Runs an iterator in a separate process.
"""
QueueClass = queue.Queue
class ProcessIterator(BackgroundIterator, Process):
"""
Runs an iterator in a separate process.
"""
QueueClass = Queue
class MapWorker(Process):
"""
Process that reads items from an input_queue, applies a func to them and puts them on an output_queue
"""
def __init__(self, func, input_queue, output_queue):
super().__init__()
self.func = func
self.input_queue = input_queue
self.output_queue = output_queue
def run(self):
while True:
item = self.input_queue.get()
if item is StopIteration:
break
k, v = item
self.output_queue.put((k, self.func(v)))
class ProcessMap(Thread):
def __init__(self, func, iterator, n_proc, output_queue=None):
super().__init__()
self.key_map = {}
self.iterator = iterator
self.work_queue = Queue(n_proc * 2)
self.output_queue = output_queue or Queue()
self.processes = [MapWorker(func, self.work_queue, self.output_queue) for _ in range(n_proc)]
def start(self):
for process in self.processes:
process.start()
super().start()
def run(self):
for (k, v) in self.iterator:
self.work_queue.put((id(k), v))
self.key_map[id(k)] = k
for _ in self.processes:
self.work_queue.put(StopIteration)
for process in self.processes:
process.join()
self.output_queue.put(StopIteration)
def __iter__(self):
self.start()
while True:
item = self.output_queue.get()
if item is StopIteration:
break
k, v = item
yield self.key_map.pop(k), v
class MapWorkerThread(Thread):
"""
Process that reads items from an input_queue, applies a func to them and puts them on an output_queue
"""
def __init__(self, func, input_queue=None, output_queue=None):
super().__init__()
self.func = func
self.input_queue = input_queue
self.output_queue = output_queue
def run(self):
while True:
item = self.input_queue.get()
if item is StopIteration:
self.output_queue.put(item)
break
k, v = item
self.output_queue.put((k, self.func(v)))
class ThreadMap(Thread):
def __init__(self, worker_type, iterator, n_thread, maxsize=2):
super().__init__()
self.iterator = iterator
self.n_thread = n_thread
self.work_queues = [queue.Queue(maxsize) for _ in range(n_thread)]
self.output_queues = [queue.Queue(maxsize) for _ in range(n_thread)]
self.workers = [worker_type(input_queue=in_q, output_queue=out_q) for (in_q, out_q) in zip(self.work_queues, self.output_queues)]
def start(self):
for worker in self.workers:
worker.start()
super().start()
def __iter__(self):
self.start()
for i in count():
item = self.output_queues[i % self.n_thread].get()
if item is StopIteration:
#do we need to empty output_queues in order to join worker threads?
for j in range(i + 1, i + self.n_thread):
self.output_queues[j % self.n_thread].get()
break
yield item
def run(self):
for i, (k, v) in enumerate(self.iterator):
self.work_queues[i % self.n_thread].put((k, v))
for q in self.work_queues:
q.put(StopIteration)
for worker in self.workers:
worker.join()
|
"""
Bonito train
"""
class ChunkDataSet:
def __init__(self, chunks, targets, lengths):
self.chunks = np.expand_dims(chunks, axis=1)
self.targets = targets
self.lengths = lengths
def __getitem__(self, i):
return (
self.chunks[i].astype(np.float32),
self.targets[i].astype(np.int64),
self.lengths[i].astype(np.int64),
)
def __len__(self):
return len(self.lengths)
def const_schedule(y):
"""
Constant Scheduler
"""
return lambda t: y
def linear_schedule(y0, y1):
"""
Linear Scheduler
"""
return lambda t: y0 + (y1 - y0) * t
def cosine_decay_schedule(y0, y1):
"""
Cosine Decay Scheduler
"""
return lambda t: y1 + 0.5 * (y0 - y1) * (np.cos(t * np.pi) + 1.0)
def piecewise_schedule(knots, funcs):
"""
Piecewise Scheduler
"""
def f(t):
i = np.searchsorted(knots, t)
t0 = 0.0 if i == 0 else knots[i - 1]
t1 = 1.0 if i == len(knots) else knots[i]
return funcs[i]((t - t0) / (t1 - t0))
return f
def func_scheduler(optimizer, func, total_steps, warmup_steps=None, warmup_ratio=0.1, start_step=0):
"""
Learning Rate Scheduler
"""
if warmup_steps:
y0 = func(0.0)
func = piecewise_schedule(
[warmup_steps / total_steps],
[linear_schedule(warmup_ratio * y0, y0), func]
)
return LambdaLR(optimizer, (lambda step: func((step + start_step) / total_steps)))
def load_state(dirname, device, model):
"""
Load a model state dict from disk
"""
model.to(device)
weight_no = None
weight_files = glob(os.path.join(dirname, "weights_*.tar"))
if weight_files:
weight_no = max([int(re.sub(".*_([0-9]+).tar", "\\1", w)) for w in weight_files])
if weight_no:
print("[picking up from epoch %s]" % weight_no)
state_dict = torch.load(
os.path.join(dirname, 'weights_%s.tar' % weight_no), map_location=device
)
state_dict = {k2: state_dict[k1] for k1, k2 in match_names(state_dict, model).items()}
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k.replace('module.', '')
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
epoch = weight_no
else:
epoch = 0
return epoch
class Trainer:
def __init__(self, model, device, train_loader, valid_loader, criterion=None, use_amp=True):
self.model = model.to(device)
self.device = device
self.train_loader = train_loader
self.valid_loader = valid_loader
self.criterion = criterion or (model.seqdist.ctc_loss if hasattr(model, 'seqdist') else model.ctc_label_smoothing_loss)
self.use_amp = use_amp
self.scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
self.optimizer = None
def train_one_step(self, batch):
data, targets, lengths = batch
self.optimizer.zero_grad()
with amp.autocast(enabled=self.use_amp):
scores = self.model(data.to(self.device))
losses = self.criterion(scores, targets.to(self.device), lengths.to(self.device))
if not isinstance(losses, dict):
losses = {'loss': losses}
self.scaler.scale(losses['loss']).backward()
self.scaler.unscale_(self.optimizer)
grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=2.0).item()
self.scaler.step(self.optimizer)
self.scaler.update()
return losses, grad_norm
def train_one_epoch(self, loss_log, lr_scheduler):
t0 = perf_counter()
chunks = 0
self.model.train()
progress_bar = tqdm(
total=len(self.train_loader), desc='[0/{}]'.format(len(self.train_loader.dataset)),
ascii=True, leave=True, ncols=100, bar_format='{l_bar}{bar}| [{elapsed}{postfix}]'
)
smoothed_loss = None
with progress_bar:
for batch in self.train_loader:
chunks += batch[0].shape[0]
losses, grad_norm = self.train_one_step(batch)
losses = {k: v.item() for k,v in losses.items()}
if lr_scheduler is not None: lr_scheduler.step()
smoothed_loss = losses['loss'] if smoothed_loss is None else (0.01 * losses['loss'] + 0.99 * smoothed_loss)
progress_bar.set_postfix(loss='%.4f' % smoothed_loss)
progress_bar.set_description("[{}/{}]".format(chunks, len(self.train_loader.dataset)))
progress_bar.update()
if loss_log is not None:
loss_log.append({'chunks': chunks, 'time': perf_counter() - t0, 'grad_norm': grad_norm, **losses})
return smoothed_loss, perf_counter() - t0
def validate_one_step(self, batch):
data, targets, lengths = batch
scores = self.model(data.to(self.device))
losses = self.criterion(scores, targets.to(self.device), lengths.to(self.device))
losses = {k: v.item() for k, v in losses.items()} if isinstance(losses, dict) else losses.item()
if hasattr(self.model, 'decode_batch'):
seqs = self.model.decode_batch(scores)
else:
seqs = [self.model.decode(x) for x in permute(scores, 'TNC', 'NTC')]
refs = [decode_ref(target, self.model.alphabet) for target in targets]
accs = [
accuracy(ref, seq, min_coverage=0.5) if len(seq) else 0. for ref, seq in zip(refs, seqs)
]
return seqs, refs, accs, losses
def validate_one_epoch(self):
self.model.eval()
with torch.no_grad():
seqs, refs, accs, losses = zip(*(self.validate_one_step(batch) for batch in self.valid_loader))
seqs, refs, accs = (sum(x, []) for x in (seqs, refs, accs))
loss = np.mean([(x['ctc_loss'] if isinstance(x, dict) else x) for x in losses])
return loss, np.mean(accs), np.median(accs)
def init_optimizer(self, lr, **kwargs):
self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=lr, **kwargs)
def get_lr_scheduler(self, epochs, last_epoch=0):
return func_scheduler(
self.optimizer, cosine_decay_schedule(1.0, 0.1), epochs * len(self.train_loader),
warmup_steps=500,
start_step=last_epoch*len(self.train_loader)
)
def fit(self, workdir, epochs=1, lr=2e-3, last_epoch=0):
if self.optimizer is None:
self.init_optimizer(lr)
lr_scheduler = self.get_lr_scheduler(epochs, last_epoch=last_epoch)
for epoch in range(1 + last_epoch, epochs + 1 + last_epoch):
try:
with bonito.io.CSVLogger(os.path.join(workdir, 'losses_{}.csv'.format(epoch))) as loss_log:
train_loss, duration = self.train_one_epoch(loss_log, lr_scheduler)
model_state = self.model.module.state_dict() if hasattr(self.model, 'module') else self.model.state_dict()
torch.save(model_state, os.path.join(workdir, "weights_%s.tar" % epoch))
val_loss, val_mean, val_median = self.validate_one_epoch()
except KeyboardInterrupt:
break
print("[epoch {}] directory={} loss={:.4f} mean_acc={:.3f}% median_acc={:.3f}%".format(
epoch, workdir, val_loss, val_mean, val_median
))
with bonito.io.CSVLogger(os.path.join(workdir, 'training.csv')) as training_log:
training_log.append({
'time': datetime.today(),
'duration': int(duration),
'epoch': epoch,
'train_loss': train_loss,
'validation_loss': val_loss,
'validation_mean': val_mean,
'validation_median': val_median
}) |
"""
Bonito Download
"""
class File:
"""
Small class for downloading models and training assets.
"""
__url__ = "https://nanoporetech.box.com/shared/static/"
def __init__(self, path, url_frag, force):
self.path = path
self.force = force
self.url = os.path.join(self.__url__, url_frag)
def location(self, filename):
return os.path.join(self.path, filename)
def exists(self, filename):
return os.path.exists(self.location(filename))
def download(self):
"""
Download the remote file
"""
# create the requests for the file
req = requests.get(self.url, stream=True)
total = int(req.headers.get('content-length', 0))
fname = re.findall('filename="([^"]+)', req.headers['content-disposition'])[0]
# skip download if local file is found
if self.exists(fname.strip('.zip')) and not self.force:
print("[skipping %s]" % fname)
return
if self.exists(fname.strip('.zip')) and self.force:
rmtree(self.location(fname.strip('.zip')))
# download the file
with tqdm(total=total, unit='iB', ascii=True, ncols=100, unit_scale=True, leave=False) as t:
with open(self.location(fname), 'wb') as f:
for data in req.iter_content(1024):
f.write(data)
t.update(len(data))
print("[downloaded %s]" % fname)
# unzip .zip files
if fname.endswith('.zip'):
with ZipFile(self.location(fname), 'r') as zfile:
zfile.extractall(self.path)
os.remove(self.location(fname))
# convert chunkify training files to bonito
if fname.endswith('.hdf5'):
print("[converting %s]" % fname)
args = cargparser().parse_args([
self.location(fname),
self.location(fname).strip('.hdf5')
])
convert(args)
r9_models = [
"n8c07gc9ro09zt0ivgcoeuz6krnwsnf6.zip", # dna_r9.4.1@v1
"nas0uhf46fd1lh2jndhx2a54a9vvhxp4.zip", # dna_r9.4.1@v2
"1wodp3ur4jhvqvu5leowfg6lrw54jxp2.zip", # dna_r9.4.1@v3
"uetgwsnb8yfqvuyoka8p09mxilgskqc7.zip", # dna_r9.4.1@v3.1
"47t2y48zw4waly25lmzx6sagf4bbbqqz.zip", # dna_r9.4.1@v3.2
"hrv649cvx8lvomu1u0tsd47e5u2bbabt.zip", # dna_r9.4.1@v3.3
"arqi4qwcj9btsd6bbjsnlbai0s6dg8yd.zip",
]
r10_models = [
"e70s615lh3i24rkhz006i0e4u4m8y2xa.zip", # dna_r10.3_q20ea
"hnr5mwlm8vmdsfpvn5fsxn3mvhbucy5f.zip", # dna_r10.3@v3
"yesf11tisfrncmod5hj2xtx9kbdveuqt.zip", # dna_r10.3@v3.2
"ci6xdu7d4wczmhorhw1sweyg4gczx97t.zip", # dna_r10.3@v3.3
"4cunv5z7nwjag7v2bun0g7vk2lf8rqnc.zip",
]
training = [
"cmh91cxupa0are1kc3z9aok425m75vrb.hdf5",
]
def main(args):
"""
Download models and training sets
"""
if args.models or args.all:
print("[downloading models]")
for model in r9_models[-1 if args.latest else 0:]:
File(__models__, model, args.force).download()
for model in r10_models[-1 if args.latest else 0:]:
File(__models__, model, args.force).download()
if args.training or args.all:
print("[downloading training data]")
for train in training:
File(__data__, train, args.force).download()
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
group = parser.add_mutually_exclusive_group()
group.add_argument('--all', action='store_true')
group.add_argument('--models', action='store_true')
group.add_argument('--training', action='store_true')
parser.add_argument('-f', '--force', action='store_true')
parser.add_argument('--latest', action='store_true')
return parser
|
#!/usr/bin/env python
"""
Convert a Taiyaki chunkify training file to set of Bonito CTC .npy files
"""
def align(samples, pointers, reference):
""" align to the start of the mapping """
squiggle_duration = len(samples)
mapped_off_the_start = len(pointers[pointers < 0])
mapped_off_the_end = len(pointers[pointers >= squiggle_duration])
pointers = pointers[mapped_off_the_start:len(pointers) - mapped_off_the_end]
reference = reference[mapped_off_the_start:len(reference) - mapped_off_the_end]
return samples[pointers[0]:pointers[-1]], pointers - pointers[0], reference
def scale(read, normalise=True):
""" scale and normalise a read """
samples = read['Dacs'][:]
scaling = read.attrs['range'] / read.attrs['digitisation']
scaled = (scaling * (samples + read.attrs['offset'])).astype(np.float32)
if normalise:
return (scaled - read.attrs['shift_frompA']) / read.attrs['scale_frompA']
return scaled
def pad_lengths(ragged_array, max_len=None):
lengths = np.array([len(x) for x in ragged_array], dtype=np.uint16)
padded = np.zeros((len(ragged_array), max_len or np.max(lengths)), dtype=ragged_array[0].dtype)
for x, y in zip(ragged_array, padded):
y[:len(x)] = x
return padded, lengths
def regular_break_points(n, chunk_len, overlap=0, align='mid'):
num_chunks, remainder = divmod(n - overlap, chunk_len - overlap)
start = {'left': 0, 'mid': remainder // 2, 'right': remainder}[align]
starts = np.arange(start, start + num_chunks*(chunk_len - overlap), (chunk_len - overlap))
return np.vstack([starts, starts + chunk_len]).T
def get_chunks(read, break_points):
sample = scale(read)
pointers = read['Ref_to_signal'][:]
target = read['Reference'][:] + 1 # CTC convention
return (
(sample[i:j], target[ti:tj]) for (i, j), (ti, tj)
in zip(break_points, np.searchsorted(pointers, break_points))
)
def chunk_dataset(reads, chunk_len, num_chunks=None):
all_chunks = (
(chunk, target) for read in reads for chunk, target in
get_chunks(reads[read], regular_break_points(len(reads[read]['Dacs']), chunk_len))
)
chunks, targets = zip(*tqdm(take(all_chunks, num_chunks), total=num_chunks))
targets, target_lens = pad_lengths(targets) # convert refs from ragged arrray
return ChunkDataSet(chunks, targets, target_lens)
def validation_split(reads, num_valid=1000):
reads = np.random.permutation(sorted(reads.items()))
return OrderedDict(reads[:-num_valid]), OrderedDict(reads[-num_valid:])
def typical_indices(x, n=2.5):
mu, sd = np.mean(x), np.std(x)
idx, = np.where((mu - n*sd < x) & (x < mu + n*sd))
return idx
def filter_chunks(ds, idx):
filtered = ChunkDataSet(ds.chunks.squeeze(1)[idx], ds.targets[idx], ds.lengths[idx])
filtered.targets = filtered.targets[:, :filtered.lengths.max()]
return filtered
def save_chunks(chunks, output_directory):
os.makedirs(output_directory, exist_ok=True)
np.save(os.path.join(output_directory, "chunks.npy"), chunks.chunks.squeeze(1))
np.save(os.path.join(output_directory, "references.npy"), chunks.targets)
np.save(os.path.join(output_directory, "reference_lengths.npy"), chunks.lengths)
print()
print("> data written to %s:" % output_directory)
print(" - chunks.npy with shape", chunks.chunks.squeeze(1).shape)
print(" - references.npy with shape", chunks.targets.shape)
print(" - reference_lengths.npy shape", chunks.lengths.shape)
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
reads = h5py.File(args.chunkify_file, 'r')['Reads']
training, validation = validation_split(reads, args.validation_reads)
print("> preparing training chunks\n")
training_chunks = chunk_dataset(training, args.chunksize)
training_indices = typical_indices(training_chunks.lengths)
training_chunks = filter_chunks(training_chunks, np.random.permutation(training_indices))
save_chunks(training_chunks, args.output_directory)
print("\n> preparing validation chunks\n")
validation_chunks = chunk_dataset(validation, args.chunksize)
validation_indices = typical_indices(validation_chunks.lengths)
validation_chunks = filter_chunks(validation_chunks, validation_indices)
save_chunks(validation_chunks, os.path.join(args.output_directory, "validation"))
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("chunkify_file")
parser.add_argument("output_directory")
parser.add_argument("--seed", default=25, type=int)
parser.add_argument("--chunksize", default=3600, type=int)
parser.add_argument("--validation-reads", default=1000, type=int)
return parser
|
"""
Bonito Export
"""
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, torch.nn.Parameter):
return obj.data
elif isinstance(obj, torch.Tensor):
return obj.detach().numpy()
else:
return super(JsonEncoder, self).default(obj)
def file_md5(filename, nblock=1024):
"""
Get md5 string from file.
"""
hasher = hashlib.md5()
block_size = nblock * hasher.block_size
with open(filename, "rb") as fh:
for blk in iter((lambda: fh.read(block_size)), b""):
hasher.update(blk)
return hasher.hexdigest()
def reformat_output_layer(layer_dict):
n_base, state_len, blank_score = [layer_dict.pop(k) for k in ['n_base', 'state_len', 'blank_score']]
layer_dict['size'] = (n_base + 1) * n_base**state_len
layer_dict['type'] = 'GlobalNormTransducer'
if blank_score is not None:
assert layer_dict['activation'] == 'tanh'
params = layer_dict['params']
params['W'] = torch.nn.functional.pad(
params['W'].reshape([n_base**state_len, n_base, -1]),
(0, 0, 1, 0),
value=0.
).reshape((n_base + 1) * n_base**state_len, -1)
params['b'] = torch.nn.functional.pad(
params['b'].reshape(n_base**state_len, n_base),
(1, 0),
value=np.arctanh(blank_score / layer_dict['scale'])
).reshape(-1)
return layer_dict
def to_guppy_dict(model, include_weights=True):
guppy_dict = bonito.nn.to_dict(model.encoder, include_weights=include_weights)
guppy_dict['sublayers'] = [x for x in guppy_dict['sublayers'] if x['type'] != 'permute']
guppy_dict['sublayers'] = [dict(x, type='LSTM', activation='tanh', gate='sigmoid') if x['type'] == 'lstm' else x for x in guppy_dict['sublayers']]
guppy_dict['sublayers'] = [dict(x, padding=(x['padding'], x['padding'])) if x['type'] == 'convolution' else x for x in guppy_dict['sublayers']]
guppy_dict['sublayers'] = [{'type': 'reverse', 'sublayers': x} if x.pop('reverse', False) else x for x in guppy_dict['sublayers']]
guppy_dict['sublayers'][-1] = reformat_output_layer(guppy_dict['sublayers'][-1])
return guppy_dict
def main(args):
if not os.path.isdir(args.model):
print("[error] file given - please provide a model directory to export.", file=sys.stderr)
return 1
model = bonito.util.load_model(args.model, device='cpu')
jsn = to_guppy_dict(model)
weight_files = glob(os.path.join(args.model, "weights_*.tar"))
weights = max([int(re.sub(".*_([0-9]+).tar", "\\1", w)) for w in weight_files])
jsn["md5sum"] = file_md5(os.path.join(args.model, 'weights_%s.tar' % weights))
json.dump(jsn, sys.stdout, cls=JsonEncoder)
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument('model')
return parser
|
"""
Bonito model viewer - display a model architecture for a given config.
"""
def main(args):
config = toml.load(args.config)
Model = load_symbol(config, "Model")
model = Model(config)
print(model)
print("Total parameters in model", sum(p.numel() for p in model.parameters()))
def argparser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("config")
return parser
|
"""
Bonito Basecaller
"""
def main(args):
if args.save_ctc and not args.reference:
sys.stderr.write("> a reference is needed to output ctc training data\n")
exit(1)
sys.stderr.write("> loading model\n")
model = load_model(args.model_directory, args.device, weights=int(args.weights))
if args.reference:
sys.stderr.write("> loading reference\n")
aligner = Aligner(args.reference, preset='ont-map', best_n=1)
if not aligner:
sys.stderr.write("> failed to load/build index\n")
exit(1)
else:
aligner = None
reads = get_reads(
args.reads_directory, n_proc=8, recursive=args.recursive,
read_ids=column_to_set(args.read_ids), skip=args.skip,
cancel=process_cancel()
)
if args.max_reads:
reads = take(reads, args.max_reads)
basecall = load_symbol(args.model_directory, "basecall")
if args.save_ctc:
reads = (
chunk for read in reads for chunk in read_chunks(read, chunksize=args.chunksize)
)
basecalls = basecall(
model, reads, batchsize=64, chunksize=args.chunksize,
aligner=aligner, qscores=args.fastq, reverse=args.revcomp,
)
writer = CTCWriter(
tqdm(basecalls, desc="> calling", unit=" reads", leave=False),
aligner, args.ctc_min_coverage, args.ctc_min_accuracy
)
else:
basecalls = basecall(
model, reads, aligner=aligner, reverse=args.revcomp,
qscores=args.fastq, batchsize=args.batchsize, chunksize=args.chunksize,
)
writer = Writer(
tqdm(basecalls, desc="> calling", unit=" reads", leave=False),
aligner, fastq=args.fastq
)
t0 = perf_counter()
writer.start()
writer.join()
duration = perf_counter() - t0
num_samples = sum(num_samples for read_id, num_samples in writer.log)
sys.stderr.write("> completed reads: %s\n" % len(writer.log))
sys.stderr.write("> duration: %s\n" % timedelta(seconds=np.round(duration)))
sys.stderr.write("> samples per second %.1E\n" % (num_samples / duration))
sys.stderr.write("> done\n")
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("model_directory")
parser.add_argument("reads_directory")
parser.add_argument("--reference")
parser.add_argument("--read-ids")
parser.add_argument("--device", default="cuda")
parser.add_argument("--weights", default="0", type=str)
parser.add_argument("--skip", action="store_true", default=False)
parser.add_argument("--fastq", action="store_true", default=False)
parser.add_argument("--save-ctc", action="store_true", default=False)
parser.add_argument("--revcomp", action="store_true", default=False)
parser.add_argument("--recursive", action="store_true", default=False)
parser.add_argument("--ctc-min-coverage", default=0.9, type=float)
parser.add_argument("--ctc-min-accuracy", default=0.9, type=float)
parser.add_argument("--batchsize", default=32, type=int)
parser.add_argument("--chunksize", default=4000, type=int)
parser.add_argument("--max-reads", default=0, type=int)
return parser
|
"""
Bonito Duplex consensus decoding.
https://www.biorxiv.org/content/10.1101/2020.02.25.956771v1
"""
def poagen(groups, gpu_percent=0.8):
free, total = cuda.cuda_get_mem_info(cuda.cuda_get_device())
gpu_mem_per_batch = gpu_percent * free
max_seq_sz = 0
max_sequences_per_poa = 0
for group in groups:
longest_seq = len(max(group, key=len))
max_seq_sz = longest_seq if longest_seq > max_seq_sz else max_seq_sz
seq_in_poa = len(group)
max_sequences_per_poa = seq_in_poa if seq_in_poa > max_sequences_per_poa else max_sequences_per_poa
batch = CudaPoaBatch(
max_sequences_per_poa,
max_seq_sz,
gpu_mem_per_batch,
output_type="consensus",
cuda_banded_alignment=True,
alignment_band_width=256,
)
poa_index = 0
initial_count = 0
while poa_index < len(groups):
group = groups[poa_index]
group_status, seq_status = batch.add_poa_group(group)
# If group was added and more space is left in batch, continue onto next group.
if group_status == 0:
for seq_index, status in enumerate(seq_status):
if status != 0:
print("Could not add sequence {} to POA {} - error {}".format(seq_index, poa_index, status_to_str(status)), file=sys.stderr)
poa_index += 1
# Once batch is full or no groups are left, run POA processing.
if ((group_status == 1) or ((group_status == 0) and (poa_index == len(groups)))):
batch.generate_poa()
consensus, coverage, con_status = batch.get_consensus()
for p, status in enumerate(con_status):
if status != 0:
print("Could not get consensus for POA group {} - {}".format(initial_count + p, status_to_str(status)), file=sys.stderr)
yield from consensus
initial_count = poa_index
batch.reset()
# In the case where POA group wasn't processed correctly.
elif group_status != 0:
print("Could not add POA group {} to batch - {}".format(poa_index, status_to_str(group_status)), file=sys.stderr)
poa_index += 1
def get_read(readdir, summary, idx):
"""
Get a single read from row `idx` in the `summary` dataframe.
"""
return get_raw_data_for_read(
(readdir / summary.iloc[idx].filename_fast5, summary.iloc[idx].read_id)
)
def read_gen(directory, summary, n_proc=1, cancel=None):
"""
Generate reads from the given `directory` listed in the `summary` dataframe.
"""
with Pool(n_proc) as pool:
for read in pool.imap(partial(get_read, Path(directory), summary), range(len(summary))):
yield read
if cancel is not None and cancel.is_set():
return
def get_read_ids(filename):
"""
Return a dictionary of read_id -> filename mappings.
"""
with get_fast5_file(filename, 'r') as f5:
return {
read.read_id: basename(filename) for read in f5.get_reads()
}
def build_index(files, n_proc=1):
"""
Build an index of read ids to filename mappings
"""
index = {}
with ProcessPoolExecutor(max_workers=n_proc) as pool:
for res in tqdm(pool.map(get_read_ids, files), leave=False):
index.update(res)
return index
def build_envelope(len1, seq1, path1, len2, seq2, path2, padding=15):
# needleman-wunsch alignment with constant gap penalty.
aln = parasail.nw_trace_striped_32(seq2, seq1, 2, 2, parasail.dnafull)
# pair up positions
alignment = np.column_stack([
np.cumsum([x != '-' for x in aln.traceback.ref]) - 1,
np.cumsum([x != '-' for x in aln.traceback.query]) - 1
])
path_range1 = np.column_stack([path1, path1[1:] + [len1]])
path_range2 = np.column_stack([path2, path2[1:] + [len2]])
envelope = np.full((len1, 2), -1, dtype=int)
for idx1, idx2 in alignment.clip(0):
st_1, en_1 = path_range1[idx1]
st_2, en_2 = path_range2[idx2]
for idx in range(st_1, en_1):
if st_2 < envelope[idx, 0] or envelope[idx, 0] < 0:
envelope[idx, 0] = st_2
if en_2 > envelope[idx, 1] or envelope[idx, 1] < 0:
envelope[idx, 1] = en_2
# add a little padding to ensure some overlap
envelope[:, 0] = envelope[:, 0] - padding
envelope[:, 1] = envelope[:, 1] + padding
envelope = np.clip(envelope, 0, len2)
prev_end = 0
for i in range(envelope.shape[0]):
if envelope[i, 0] > envelope[i, 1]:
envelope[i, 0] = 0
if envelope[i, 0] > prev_end:
envelope[i, 0] = prev_end
prev_end = envelope[i, 1]
return envelope.astype(np.uint64)
def find_follow_on(df, gap=5, distance=51, cov=0.85, min_len=100):
"""
Find follow on reads from a sequencing summary file.
"""
df = df[
df.alignment_coverage.astype('float32').gt(cov) &
df.sequence_length_template.astype('int32').gt(min_len)
]
df = df.sort_values(['run_id', 'channel', 'mux', 'start_time'])
genome_start = np.array(df.alignment_genome_start, dtype=np.int32)
genome_end = np.array(df.alignment_genome_end, dtype=np.int32)
direction = np.array(df.alignment_direction)
start_time = np.array(df.start_time, dtype=np.float32)
end_time = np.array(df.start_time + df.duration, dtype=np.float32)
channel = np.array(df.channel, dtype=np.int32)
mux = np.array(df.mux, dtype=np.int32)
filt = (
(channel[1:] == channel[:-1]) &
(mux[1:] == mux[:-1]) &
(np.abs(genome_start[1:] - genome_start[:-1]) < distance) &
(np.abs(genome_end[1:] - genome_end[:-1]) < distance) &
(direction[1:] != direction[:-1]) &
(start_time[1:] - end_time[:-1] < gap)
)
mask = np.full(len(filt) + 1, False)
mask[:-1] = mask[:-1] | filt
mask[1:] = mask[1:] | filt
return df[mask]
def compute_scores(model, batch, reverse=False):
with torch.no_grad():
device = next(model.parameters()).device
dtype = torch.float16 if half_supported() else torch.float32
scores = model.encoder(batch.to(dtype).to(device))
if reverse: scores = model.seqdist.reverse_complement(scores)
betas = model.seqdist.backward_scores(scores.to(torch.float32))
trans, init = model.seqdist.compute_transition_probs(scores, betas)
return {
'trans': trans.to(dtype).transpose(0, 1),
'init': init.to(dtype).unsqueeze(1),
}
def basecall(model, reads, chunksize=4000, overlap=500, batchsize=32, reverse=False):
reads = (
read_chunk for read in reads
for read_chunk in split_read(read, chunksize * batchsize)[::-1 if reverse else 1]
)
chunks = (
((read, start, end),
chunk(torch.from_numpy(read.signal[start:end]), chunksize, overlap))
for (read, start, end) in reads
)
batches = (
(k, compute_scores(model, batch, reverse=reverse))
for k, batch in batchify(chunks, batchsize=batchsize)
)
stitched = (
(read, stitch(x, chunksize, overlap, end - start, model.stride, reverse=reverse))
for ((read, start, end), x) in unbatchify(batches)
)
transferred = thread_map(transfer, stitched, n_thread=1)
return (
(read, concat([part for k, part in parts]))
for read, parts in groupby(transferred, lambda x: x[0])
)
def beam_search_duplex(seq1, path1, t1, b1, seq2, path2, t2, b2, alphabet='NACGT', beamsize=5, pad=40, T=0.01):
env = build_envelope(t1.shape[0], seq1, path1, t2.shape[0], seq2, path2, padding=pad)
return crf_beam_search_duplex(
t1, b1, t2, b2,
alphabet=alphabet,
beam_size=beamsize,
beam_cut_threshold=T,
envelope=env,
)
def decode(res, beamsize_1=5, pad_1=40, cut_1=0.01, beamsize_2=5, pad_2=40, cut_2=0.01, match=80, alphabet="NACGT"):
temp_probs, init1 = res[0]['trans'].astype(np.float32), res[0]['init'][0].astype(np.float32)
comp_probs, init2 = res[1]['trans'].astype(np.float32), res[1]['init'][0].astype(np.float32)
simplex1, path1 = crf_beam_search(temp_probs, init1, alphabet, beam_size=5, beam_cut_threshold=0.01)
simplex2, path2 = crf_beam_search(comp_probs, init2, alphabet, beam_size=5, beam_cut_threshold=0.01)
if len(simplex1) < 10 or len(simplex2) < 10:
return [simplex1, simplex2]
if accuracy(simplex1, simplex2) < match:
return [simplex1, simplex2]
duplex1 = beam_search_duplex(
simplex1, path1, temp_probs, init1, simplex2, path2, comp_probs, init2, pad=pad_1, beamsize=5, T=cut_1
)
duplex2 = beam_search_duplex(
simplex2, path2, comp_probs, init2, simplex1, path1, temp_probs, init1, pad=pad_2, beamsize=5, T=cut_2
)
return [duplex1, duplex2, simplex1, simplex2]
def poa(seqs, allseq=False):
con, msa = spoa.poa(seqs, genmsa=False)
if allseq: return (con, *seqs)
return (con, )
def call(model, reads_directory, templates, complements, aligner=None, cudapoa=True):
temp_reads = read_gen(reads_directory, templates, n_proc=8, cancel=process_cancel())
comp_reads = read_gen(reads_directory, complements, n_proc=8, cancel=process_cancel())
temp_scores = basecall(model, temp_reads, reverse=False)
comp_scores = basecall(model, comp_reads, reverse=True)
scores = (((r1, r2), (s1, s2)) for (r1, s1), (r2, s2) in zip(temp_scores, comp_scores))
calls = thread_map(decode, scores, n_thread=12)
if cudapoa:
sequences = ((reads, [seqs, ]) for reads, seqs in calls if len(seqs) > 2)
consensus = (zip(reads, poagen(calls)) for reads, calls in batchify(sequences, 100))
res = ((reads[0], {'sequence': seq}) for seqs in consensus for reads, seq in seqs)
else:
sequences = ((reads, seqs) for reads, seqs in calls if len(seqs) > 2)
consensus = process_map(poa, sequences, n_proc=4)
res = ((reads, {'sequence': seq}) for reads, seqs in consensus for seq in seqs)
if aligner is None: return res
return align_map(aligner, res)
def main(args):
sys.stderr.write("> loading model\n")
model = load_model(args.model, args.device)
if args.reference:
sys.stderr.write("> loading reference\n")
aligner = Aligner(args.reference, preset='ont-map')
if not aligner:
sys.stderr.write("> failed to load/build index\n")
exit(1)
else:
aligner = None
if args.summary:
sys.stderr.write("> finding follow on strands\n")
pairs = pd.read_csv(args.summary, '\t', low_memory=False)
pairs = pairs[pairs.sequence_length_template.gt(0)]
if 'filename' in pairs.columns:
pairs = pairs.rename(columns={'filename': 'filename_fast5'})
if 'alignment_strand_coverage' in pairs.columns:
pairs = pairs.rename(columns={'alignment_strand_coverage': 'alignment_coverage'})
valid_fast5s = [
f for f in pairs.filename_fast5.unique()
if ((args.reads_directory / Path(f)).exists())
]
pairs = pairs[pairs.filename_fast5.isin(valid_fast5s)]
pairs = find_follow_on(pairs)
sys.stderr.write("> found %s follow strands in summary\n" % (len(pairs) // 2))
if args.max_reads > 0: pairs = pairs.head(args.max_reads)
temp_reads = pairs.iloc[0::2]
comp_reads = pairs.iloc[1::2]
else:
if args.index is not None:
sys.stderr.write("> loading read index\n")
index = json.load(open(args.index, 'r'))
else:
sys.stderr.write("> building read index\n")
files = list(glob(os.path.join(args.reads_directory, '*.fast5')))
index = build_index(files, n_proc=8)
if args.save_index:
with open('bonito-read-id.idx', 'w') as f:
json.dump(index, f)
pairs = pd.read_csv(args.pairs, sep=args.sep, names=['read_1', 'read_2'])
if args.max_reads > 0: pairs = pairs.head(args.max_reads)
pairs['file_1'] = pairs['read_1'].apply(index.get)
pairs['file_2'] = pairs['read_2'].apply(index.get)
pairs = pairs.dropna().reset_index()
temp_reads = pairs[['read_1', 'file_1']].rename(
columns={'read_1': 'read_id', 'file_1': 'filename_fast5'}
)
comp_reads = pairs[['read_2', 'file_2']].rename(
columns={'read_2': 'read_id', 'file_2': 'filename_fast5'}
)
if len(pairs) == 0:
print("> no matched pairs found in given directory", file=sys.stderr)
exit(1)
# https://github.com/clara-parabricks/GenomeWorks/issues/648
with devnull(): CudaPoaBatch(1000, 1000, 3724032)
basecalls = call(model, args.reads_directory, temp_reads, comp_reads, aligner=aligner)
writer = Writer(tqdm(basecalls, desc="> calling", unit=" reads", leave=False), aligner, duplex=True)
t0 = perf_counter()
writer.start()
writer.join()
duration = perf_counter() - t0
num_samples = sum(num_samples for read_id, num_samples in writer.log)
print("> duration: %s" % timedelta(seconds=np.round(duration)), file=sys.stderr)
print("> samples per second %.1E" % (num_samples / duration), file=sys.stderr)
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("model")
parser.add_argument("reads_directory")
group = parser.add_mutually_exclusive_group()
group.add_argument("--summary", default=None)
group.add_argument("--pairs", default=None)
parser.add_argument("--sep", default=' ')
parser.add_argument("--index", default=None)
parser.add_argument("--save-index", action="store_true", default=False)
parser.add_argument("--reference")
parser.add_argument("--device", default="cuda")
parser.add_argument("--max-reads", default=0, type=int)
return parser
|
#!/usr/bin/env python3
"""
Bonito training.
"""
def main(args):
workdir = os.path.expanduser(args.training_directory)
if os.path.exists(workdir) and not args.force:
print("[error] %s exists, use -f to force continue training." % workdir)
exit(1)
init(args.seed, args.device)
device = torch.device(args.device)
print("[loading data]")
train_data = load_data(limit=args.chunks, directory=args.directory)
if os.path.exists(os.path.join(args.directory, 'validation')):
valid_data = load_data(directory=os.path.join(args.directory, 'validation'))
else:
print("[validation set not found: splitting training set]")
split = np.floor(len(train_data[0]) * 0.97).astype(np.int32)
valid_data = [x[split:] for x in train_data]
train_data = [x[:split] for x in train_data]
train_loader = DataLoader(ChunkDataSet(*train_data), batch_size=args.batch, shuffle=True, num_workers=4, pin_memory=True)
valid_loader = DataLoader(ChunkDataSet(*valid_data), batch_size=args.batch, num_workers=4, pin_memory=True)
if args.pretrained:
dirname = args.pretrained
if not os.path.isdir(dirname) and os.path.isdir(os.path.join(__models__, dirname)):
dirname = os.path.join(__models__, dirname)
config_file = os.path.join(dirname, 'config.toml')
else:
config_file = args.config
config = toml.load(config_file)
argsdict = dict(training=vars(args))
os.makedirs(workdir, exist_ok=True)
toml.dump({**config, **argsdict}, open(os.path.join(workdir, 'config.toml'), 'w'))
print("[loading model]")
if args.pretrained:
print("[using pretrained model {}]".format(args.pretrained))
model = load_model(args.pretrained, device, half=False)
else:
model = load_symbol(config, 'Model')(config)
last_epoch = load_state(workdir, args.device, model)
if args.multi_gpu:
from torch.nn import DataParallel
model = DataParallel(model)
model.decode = model.module.decode
model.alphabet = model.module.alphabet
trainer = Trainer(model, device, train_loader, valid_loader, use_amp=half_supported() and not args.no_amp)
trainer.fit(workdir, args.epochs, args.lr, last_epoch=last_epoch)
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("training_directory")
group = parser.add_mutually_exclusive_group()
group.add_argument('--config', default=default_config)
group.add_argument('--pretrained', default="")
parser.add_argument("--directory", default=default_data)
parser.add_argument("--device", default="cuda")
parser.add_argument("--lr", default=2e-3, type=float)
parser.add_argument("--seed", default=25, type=int)
parser.add_argument("--epochs", default=5, type=int)
parser.add_argument("--batch", default=64, type=int)
parser.add_argument("--chunks", default=0, type=int)
parser.add_argument("--no-amp", action="store_true", default=False)
parser.add_argument("--multi-gpu", action="store_true", default=False)
parser.add_argument("-f", "--force", action="store_true", default=False)
return parser
|
"""
Bonito model evaluator
"""
def main(args):
poas = []
init(args.seed, args.device)
print("* loading data")
directory = args.directory
if os.path.exists(os.path.join(directory, 'validation')):
directory = os.path.join(directory, 'validation')
testdata = ChunkDataSet(
*load_data(
limit=args.chunks, directory=directory
)
)
dataloader = DataLoader(testdata, batch_size=args.batchsize)
accuracy_with_cov = lambda ref, seq: accuracy(ref, seq, min_coverage=args.min_coverage)
for w in [int(i) for i in args.weights.split(',')]:
seqs = []
print("* loading model", w)
model = load_model(args.model_directory, args.device, weights=w)
print("* calling")
t0 = time.perf_counter()
with torch.no_grad():
for data, *_ in dataloader:
if half_supported():
data = data.type(torch.float16).to(args.device)
else:
data = data.to(args.device)
log_probs = model(data)
if hasattr(model, 'decode_batch'):
seqs.extend(model.decode_batch(log_probs))
else:
seqs.extend([model.decode(p) for p in permute(log_probs, 'TNC', 'NTC')])
duration = time.perf_counter() - t0
refs = [decode_ref(target, model.alphabet) for target in dataloader.dataset.targets]
accuracies = [accuracy_with_cov(ref, seq) if len(seq) else 0. for ref, seq in zip(refs, seqs)]
if args.poa: poas.append(sequences)
print("* mean %.2f%%" % np.mean(accuracies))
print("* median %.2f%%" % np.median(accuracies))
print("* time %.2f" % duration)
print("* samples/s %.2E" % (args.chunks * data.shape[2] / duration))
if args.poa:
print("* doing poa")
t0 = time.perf_counter()
# group each sequence prediction per model together
poas = [list(seq) for seq in zip(*poas)]
consensuses = poa(poas)
duration = time.perf_counter() - t0
accuracies = list(starmap(accuracy_with_coverage_filter, zip(references, consensuses)))
print("* mean %.2f%%" % np.mean(accuracies))
print("* median %.2f%%" % np.median(accuracies))
print("* time %.2f" % duration)
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("model_directory")
parser.add_argument("--directory", default=None)
parser.add_argument("--device", default="cuda")
parser.add_argument("--seed", default=9, type=int)
parser.add_argument("--weights", default="0", type=str)
parser.add_argument("--chunks", default=1000, type=int)
parser.add_argument("--batchsize", default=96, type=int)
parser.add_argument("--beamsize", default=5, type=int)
parser.add_argument("--poa", action="store_true", default=False)
parser.add_argument("--min-coverage", default=0.5, type=float)
return parser
|
"""
Bonito CTC-CRF Model.
"""
def get_stride(m):
if hasattr(m, 'stride'):
return m.stride if isinstance(m.stride, int) else m.stride[0]
if isinstance(m, Convolution):
return get_stride(m.conv)
if isinstance(m, Serial):
return int(np.prod([get_stride(x) for x in m]))
return 1
class CTC_CRF(SequenceDist):
def __init__(self, state_len, alphabet):
super().__init__()
self.alphabet = alphabet
self.state_len = state_len
self.n_base = len(alphabet[1:])
self.idx = torch.cat([
torch.arange(self.n_base**(self.state_len))[:, None],
torch.arange(
self.n_base**(self.state_len)
).repeat_interleave(self.n_base).reshape(self.n_base, -1).T
], dim=1).to(torch.int32)
def n_score(self):
return len(self.alphabet) * self.n_base**(self.state_len)
def logZ(self, scores, S:semiring=Log):
T, N, _ = scores.shape
Ms = scores.reshape(T, N, -1, len(self.alphabet))
alpha_0 = Ms.new_full((N, self.n_base**(self.state_len)), S.one)
beta_T = Ms.new_full((N, self.n_base**(self.state_len)), S.one)
return seqdist.sparse.logZ(Ms, self.idx, alpha_0, beta_T, S)
def normalise(self, scores):
return (scores - self.logZ(scores)[:, None] / len(scores))
def forward_scores(self, scores, S: semiring=Log):
T, N, _ = scores.shape
Ms = scores.reshape(T, N, -1, self.n_base + 1)
alpha_0 = Ms.new_full((N, self.n_base**(self.state_len)), S.one)
return seqdist.sparse.fwd_scores_cupy(Ms, self.idx, alpha_0, S, K=1)
def backward_scores(self, scores, S: semiring=Log):
T, N, _ = scores.shape
Ms = scores.reshape(T, N, -1, self.n_base + 1)
beta_T = Ms.new_full((N, self.n_base**(self.state_len)), S.one)
return seqdist.sparse.bwd_scores_cupy(Ms, self.idx, beta_T, S, K=1)
def compute_transition_probs(self, scores, betas):
T, N, C = scores.shape
# add bwd scores to edge scores
log_trans_probs = (scores.reshape(T, N, -1, self.n_base + 1) + betas[1:, :, :, None])
# transpose from (new_state, dropped_base) to (old_state, emitted_base) layout
log_trans_probs = torch.cat([
log_trans_probs[:, :, :, [0]],
log_trans_probs[:, :, :, 1:].transpose(3, 2).reshape(T, N, -1, self.n_base)
], dim=-1)
# convert from log probs to probs by exponentiating and normalising
trans_probs = torch.softmax(log_trans_probs, dim=-1)
#convert first bwd score to initial state probabilities
init_state_probs = torch.softmax(betas[0], dim=-1)
return trans_probs, init_state_probs
def reverse_complement(self, scores):
T, N, C = scores.shape
expand_dims = T, N, *(self.n_base for _ in range(self.state_len)), self.n_base + 1
scores = scores.reshape(*expand_dims)
blanks = torch.flip(scores[..., 0].permute(
0, 1, *range(self.state_len + 1, 1, -1)).reshape(T, N, -1, 1), [0, 2]
)
emissions = torch.flip(scores[..., 1:].permute(
0, 1, *range(self.state_len, 1, -1),
self.state_len +2,
self.state_len + 1).reshape(T, N, -1, self.n_base), [0, 2, 3]
)
return torch.cat([blanks, emissions], dim=-1).reshape(T, N, -1)
def viterbi(self, scores):
traceback = self.posteriors(scores, Max)
paths = traceback.argmax(2) % len(self.alphabet)
return paths
def path_to_str(self, path):
alphabet = np.frombuffer(''.join(self.alphabet).encode(), dtype='u1')
seq = alphabet[path[path != 0]]
return seq.tobytes().decode()
def prepare_ctc_scores(self, scores, targets):
# convert from CTC targets (with blank=0) to zero indexed
targets = torch.clamp(targets - 1, 0)
T, N, C = scores.shape
scores = scores.to(torch.float32)
n = targets.size(1) - (self.state_len - 1)
stay_indices = sum(
targets[:, i:n + i] * self.n_base ** (self.state_len - i - 1)
for i in range(self.state_len)
) * len(self.alphabet)
move_indices = stay_indices[:, 1:] + targets[:, :n - 1] + 1
stay_scores = scores.gather(2, stay_indices.expand(T, -1, -1))
move_scores = scores.gather(2, move_indices.expand(T, -1, -1))
return stay_scores, move_scores
def ctc_loss(self, scores, targets, target_lengths, loss_clip=None, reduction='mean', normalise_scores=True):
if normalise_scores:
scores = self.normalise(scores)
stay_scores, move_scores = self.prepare_ctc_scores(scores, targets)
logz = logZ_cupy(stay_scores, move_scores, target_lengths + 1 - self.state_len)
loss = - (logz / target_lengths)
if loss_clip:
loss = torch.clamp(loss, 0.0, loss_clip)
if reduction == 'mean':
return loss.mean()
elif reduction in ('none', None):
return loss
else:
raise ValueError('Unknown reduction type {}'.format(reduction))
def ctc_viterbi_alignments(self, scores, targets, target_lengths):
stay_scores, move_scores = self.prepare_ctc_scores(scores, targets)
return viterbi_alignments(stay_scores, move_scores, target_lengths + 1 - self.state_len)
def conv(c_in, c_out, ks, stride=1, bias=False, activation=None):
return Convolution(c_in, c_out, ks, stride=stride, padding=ks//2, bias=bias, activation=activation)
def rnn_encoder(n_base, state_len, insize=1, stride=5, winlen=19, activation='swish', rnn_type='lstm', features=768, scale=5.0, blank_score=None, single_head_attn=False):
rnn = layers[rnn_type]
return Serial([
conv(insize, 4, ks=5, bias=True, activation=activation),
conv(4, 16, ks=5, bias=True, activation=activation),
conv(16, features, ks=winlen, stride=stride, bias=True, activation=activation),
Permute([2, 0, 1]),
rnn(features, features, reverse=True), rnn(features, features),
rnn(features, features, reverse=True), rnn(features, features),
*([SHABlock(features)] if single_head_attn else []),
rnn(features, features, reverse=True),
LinearCRFEncoder(features, n_base, state_len, bias=True, activation='tanh', scale=scale, blank_score=blank_score)
])
class SeqdistModel(Module):
def __init__(self, encoder, seqdist):
super().__init__()
self.seqdist = seqdist
self.encoder = encoder
self.stride = get_stride(encoder)
self.alphabet = seqdist.alphabet
def forward(self, x):
return self.encoder(x).to(torch.float32)
def decode_batch(self, x):
scores = self.seqdist.posteriors(x.to(torch.float32)) + 1e-8
tracebacks = self.seqdist.viterbi(scores.log()).to(torch.int16).T
return [self.seqdist.path_to_str(x) for x in tracebacks.cpu().numpy()]
def decode(self, x):
return self.decode_batch(x.unsqueeze(1))[0]
class Model(SeqdistModel):
def __init__(self, config):
seqdist = CTC_CRF(
state_len=config['global_norm']['state_len'],
alphabet=config['labels']['labels']
)
if 'type' in config['encoder']: #new-style config
encoder = from_dict(config['encoder'])
else: #old-style
encoder = rnn_encoder(seqdist.n_base, seqdist.state_len, insize=config['input']['features'], **config['encoder'])
super().__init__(encoder, seqdist)
self.config = config
|
"""
Bonito CRF basecall
"""
def stitch(chunks, chunksize, overlap, length, stride, reverse=False):
"""
Stitch chunks together with a given overlap
"""
if isinstance(chunks, dict):
return {
k: stitch(v, chunksize, overlap, length, stride, reverse=reverse)
for k, v in chunks.items()
}
return bonito.util.stitch(chunks, chunksize, overlap, length, stride, reverse=reverse)
def compute_scores(model, batch, reverse=False):
"""
Compute scores for model.
"""
with torch.no_grad():
device = next(model.parameters()).device
dtype = torch.float16 if half_supported() else torch.float32
scores = model(batch.to(dtype).to(device))
if reverse: scores = model.seqdist.reverse_complement(scores)
betas = model.seqdist.backward_scores(scores.to(torch.float32))
betas -= (betas.max(2, keepdim=True)[0] - 5.0)
return {
'scores': scores.transpose(0, 1),
'betas': betas.transpose(0, 1),
}
def quantise_int8(x, scale=127/5):
"""
Quantise scores to int8.
"""
scores = x['scores']
scores *= scale
scores = torch.round(scores).to(torch.int8).detach()
betas = x['betas']
betas *= scale
betas = torch.round(torch.clamp(betas, -127., 128.)).to(torch.int8).detach()
return {'scores': scores, 'betas': betas}
def transfer(x):
"""
Device to host transfer using pinned memory.
"""
torch.cuda.synchronize()
with torch.cuda.stream(torch.cuda.Stream()):
return {
k: torch.empty(v.shape, pin_memory=True, dtype=v.dtype).copy_(v).numpy()
for k, v in x.items()
}
def decode_int8(scores, seqdist, scale=127/5, beamsize=40, beamcut=100.0):
"""
Beamsearch decode.
"""
path, _ = beamsearch(
scores['scores'], scale, seqdist.n_base, beamsize,
guide=scores['betas'], beam_cut=beamcut
)
try:
return seqdist.path_to_str(path % 4 + 1)
except IndexError:
return ""
def split_read(read, split_read_length=400000):
"""
Split large reads into manageable pieces.
"""
if len(read.signal) <= split_read_length:
return [(read, 0, len(read.signal))]
breaks = np.arange(0, len(read.signal) + split_read_length, split_read_length)
return [(read, start, min(end, len(read.signal))) for (start, end) in zip(breaks[:-1], breaks[1:])]
def basecall(model, reads, aligner=None, beamsize=40, chunksize=4000, overlap=500, batchsize=32, qscores=False, reverse=False):
"""
Basecalls a set of reads.
"""
_decode = partial(decode_int8, seqdist=model.seqdist, beamsize=beamsize)
reads = (read_chunk for read in reads for read_chunk in split_read(read)[::-1 if reverse else 1])
chunks = (
((read, start, end), chunk(torch.from_numpy(read.signal[start:end]), chunksize, overlap))
for (read, start, end) in reads
)
batches = (
(k, quantise_int8(compute_scores(model, batch, reverse=reverse)))
for k, batch in thread_iter(batchify(chunks, batchsize=batchsize))
)
stitched = (
(read, stitch(x, chunksize, overlap, end - start, model.stride, reverse=reverse))
for ((read, start, end), x) in unbatchify(batches)
)
transferred = thread_map(transfer, stitched, n_thread=1)
basecalls = thread_map(_decode, transferred, n_thread=8)
basecalls = (
(read, ''.join(seq for k, seq in parts))
for read, parts in groupby(basecalls, lambda x: (x[0].parent if hasattr(x[0], 'parent') else x[0]))
)
basecalls = (
(read, {'sequence': seq, 'qstring': '?' * len(seq) if qscores else '*', 'mean_qscore': 0.0})
for read, seq in basecalls
)
if aligner: return align_map(aligner, basecalls)
return basecalls
|
"""
Bonito Model template
"""
class Model(Module):
"""
Model template for QuartzNet style architectures
https://arxiv.org/pdf/1910.10261.pdf
"""
def __init__(self, config):
super(Model, self).__init__()
if 'qscore' not in config:
self.qbias = 0.0
self.qscale = 1.0
else:
self.qbias = config['qscore']['bias']
self.qscale = config['qscore']['scale']
self.config = config
self.stride = config['block'][0]['stride'][0]
self.alphabet = config['labels']['labels']
self.features = config['block'][-1]['filters']
self.encoder = Encoder(config)
self.decoder = Decoder(self.features, len(self.alphabet))
def forward(self, x):
encoded = self.encoder(x)
return self.decoder(encoded)
def decode(self, x, beamsize=5, threshold=1e-3, qscores=False, return_path=False):
x = x.exp().cpu().numpy().astype(np.float32)
if beamsize == 1 or qscores:
seq, path = viterbi_search(x, self.alphabet, qscores, self.qscale, self.qbias)
else:
seq, path = beam_search(x, self.alphabet, beamsize, threshold)
if return_path: return seq, path
return seq
def ctc_label_smoothing_loss(self, log_probs, targets, lengths, weights=None):
T, N, C = log_probs.shape
weights = weights or torch.cat([torch.tensor([0.4]), (0.1 / (C - 1)) * torch.ones(C - 1)])
log_probs_lengths = torch.full(size=(N, ), fill_value=T, dtype=torch.int64)
loss = ctc_loss(log_probs.to(torch.float32), targets, log_probs_lengths, lengths, reduction='mean')
label_smoothing_loss = -((log_probs * weights.to(log_probs.device)).mean())
return {'loss': loss + label_smoothing_loss, 'ctc_loss': loss, 'label_smooth_loss': label_smoothing_loss}
class Encoder(Module):
"""
Builds the model encoder
"""
def __init__(self, config):
super(Encoder, self).__init__()
self.config = config
features = self.config['input']['features']
activation = layers[self.config['encoder']['activation']]()
encoder_layers = []
for layer in self.config['block']:
encoder_layers.append(
Block(
features, layer['filters'], activation,
repeat=layer['repeat'], kernel_size=layer['kernel'],
stride=layer['stride'], dilation=layer['dilation'],
dropout=layer['dropout'], residual=layer['residual'],
separable=layer['separable'],
)
)
features = layer['filters']
self.encoder = Sequential(*encoder_layers)
def forward(self, x):
return self.encoder(x)
class TCSConv1d(Module):
"""
Time-Channel Separable 1D Convolution
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False, separable=False):
super(TCSConv1d, self).__init__()
self.separable = separable
if separable:
self.depthwise = Conv1d(
in_channels, in_channels, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=bias, groups=in_channels
)
self.pointwise = Conv1d(
in_channels, out_channels, kernel_size=1, stride=1,
dilation=dilation, bias=bias, padding=0
)
else:
self.conv = Conv1d(
in_channels, out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, bias=bias
)
def forward(self, x):
if self.separable:
x = self.depthwise(x)
x = self.pointwise(x)
else:
x = self.conv(x)
return x
class Block(Module):
"""
TCSConv, Batch Normalisation, Activation, Dropout
"""
def __init__(self, in_channels, out_channels, activation, repeat=5, kernel_size=1, stride=1, dilation=1, dropout=0.0, residual=False, separable=False):
super(Block, self).__init__()
self.use_res = residual
self.conv = ModuleList()
_in_channels = in_channels
padding = self.get_padding(kernel_size[0], stride[0], dilation[0])
# add the first n - 1 convolutions + activation
for _ in range(repeat - 1):
self.conv.extend(
self.get_tcs(
_in_channels, out_channels, kernel_size=kernel_size,
stride=stride, dilation=dilation,
padding=padding, separable=separable
)
)
self.conv.extend(self.get_activation(activation, dropout))
_in_channels = out_channels
# add the last conv and batch norm
self.conv.extend(
self.get_tcs(
_in_channels, out_channels,
kernel_size=kernel_size,
stride=stride, dilation=dilation,
padding=padding, separable=separable
)
)
# add the residual connection
if self.use_res:
self.residual = Sequential(*self.get_tcs(in_channels, out_channels))
# add the activation and dropout
self.activation = Sequential(*self.get_activation(activation, dropout))
def get_activation(self, activation, dropout):
return activation, Dropout(p=dropout)
def get_padding(self, kernel_size, stride, dilation):
if stride > 1 and dilation > 1:
raise ValueError("Dilation and stride can not both be greater than 1")
return (kernel_size // 2) * dilation
def get_tcs(self, in_channels, out_channels, kernel_size=1, stride=1, dilation=1, padding=0, bias=False, separable=False):
return [
TCSConv1d(
in_channels, out_channels, kernel_size,
stride=stride, dilation=dilation, padding=padding,
bias=bias, separable=separable
),
BatchNorm1d(out_channels, eps=1e-3, momentum=0.1)
]
def forward(self, x):
_x = x
for layer in self.conv:
_x = layer(_x)
if self.use_res:
_x = _x + self.residual(x)
return self.activation(_x)
class Decoder(Module):
"""
Decoder
"""
def __init__(self, features, classes):
super(Decoder, self).__init__()
self.layers = Sequential(
Conv1d(features, classes, kernel_size=1, bias=True),
Permute([2, 0, 1])
)
def forward(self, x):
return log_softmax(self.layers(x), dim=-1)
|
"""
Bonito basecall
"""
def basecall(model, reads, aligner=None, beamsize=5, chunksize=0, overlap=0, batchsize=1, qscores=False, reverse=None):
"""
Basecalls a set of reads.
"""
chunks = (
(read, chunk(torch.tensor(read.signal), chunksize, overlap)) for read in reads
)
scores = unbatchify(
(k, compute_scores(model, v)) for k, v in batchify(chunks, batchsize)
)
scores = (
(read, {'scores': stitch(v, chunksize, overlap, len(read.signal), model.stride)}) for read, v in scores
)
decoder = partial(decode, decode=model.decode, beamsize=beamsize, qscores=qscores)
basecalls = process_map(decoder, scores, n_proc=4)
if aligner: return align_map(aligner, basecalls)
return basecalls
def compute_scores(model, batch):
"""
Compute scores for model.
"""
with torch.no_grad():
device = next(model.parameters()).device
chunks = batch.to(torch.half).to(device)
probs = permute(model(chunks), 'TNC', 'NTC')
return probs.cpu().to(torch.float32)
def decode(scores, decode, beamsize=5, qscores=False):
"""
Convert the network scores into a sequence.
"""
# do a greedy decode to get a sensible qstring to compute the mean qscore from
seq, path = decode(scores['scores'], beamsize=1, qscores=True, return_path=True)
seq, qstring = seq[:len(path)], seq[len(path):]
mean_qscore = mean_qscore_from_qstring(qstring)
# beam search will produce a better sequence but doesn't produce a sensible qstring/path
if not (qscores or beamsize == 1):
try:
seq = decode(scores['scores'], beamsize=beamsize)
path = None
qstring = '*'
except:
pass
return {'sequence': seq, 'qstring': qstring, 'mean_qscore': mean_qscore, 'path': path}
|
# constants
FlashAttentionConfig = namedtuple('FlashAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = FlashAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = FlashAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = FlashAttentionConfig(False, True, True)
def flash_attn(self, q, k, v):
_, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
dropout_p = self.dropout if self.training else 0.
)
return out
def forward(self, q, k, v):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
q_len, k_len, device = q.shape[-2], k.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if self.flash:
return self.flash_attn(q, k, v)
# similarity
sim = einsum(f"b h i d, b h j d -> b h i j", q, k) * scale
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum(f"b h i j, b h j d -> b h i d", attn, v)
return out
|
# helper functions
def exists(val):
return val is not None
# norm
class RMSNorm(Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.gamma
# attention
class FeedForward(Module):
def __init__(
self,
dim,
mult = 4,
dropout = 0.
):
super().__init__()
dim_inner = int(dim * mult)
self.net = nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, dim_inner),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim_inner, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(Module):
def __init__(
self,
dim,
heads = 8,
dim_head = 64,
dropout = 0.,
rotary_embed = None,
flash = True
):
super().__init__()
self.heads = heads
self.scale = dim_head **-0.5
dim_inner = heads * dim_head
self.rotary_embed = rotary_embed
self.attend = Attend(flash = flash, dropout = dropout)
self.norm = RMSNorm(dim)
self.to_qkv = nn.Linear(dim, dim_inner * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(dim_inner, dim, bias = False),
nn.Dropout(dropout)
)
def forward(self, x):
x = self.norm(x)
q, k, v = rearrange(self.to_qkv(x), 'b n (qkv h d) -> qkv b h n d', qkv = 3, h = self.heads)
if exists(self.rotary_embed):
q = self.rotary_embed.rotate_queries_or_keys(q)
k = self.rotary_embed.rotate_queries_or_keys(k)
out = self.attend(q, k, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
ff_mult = 4,
norm_output = True,
rotary_embed = None,
flash_attn = True
):
super().__init__()
self.layers = ModuleList([])
for _ in range(depth):
self.layers.append(ModuleList([
Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout, rotary_embed = rotary_embed, flash = flash_attn),
FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)
]))
self.norm = RMSNorm(dim) if norm_output else nn.Identity()
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return self.norm(x)
# bandsplit module
class BandSplit(Module):
@beartype
def __init__(
self,
dim,
dim_inputs: Tuple[int, ...]
):
super().__init__()
self.dim_inputs = dim_inputs
self.to_features = ModuleList([])
for dim_in in dim_inputs:
net = nn.Sequential(
RMSNorm(dim_in),
nn.Linear(dim_in, dim)
)
self.to_features.append(net)
def forward(self, x):
x = x.split(self.dim_inputs, dim = -1)
outs = []
for split_input, to_feature in zip(x, self.to_features):
split_output = to_feature(split_input)
outs.append(split_output)
return torch.stack(outs, dim = -2)
class LinearGLUWithTanH(Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim = -1)
return x.tanh() * gate.sigmoid()
class MaskEstimator(Module):
@beartype
def __init__(
self,
dim,
dim_inputs: Tuple[int, ...],
depth
):
super().__init__()
self.dim_inputs = dim_inputs
self.to_freqs = ModuleList([])
for dim_in in dim_inputs:
net = []
for ind in range(depth):
is_last = ind == (depth - 1)
dim_out = dim if not is_last else dim_in
net.append(LinearGLUWithTanH(dim, dim_out))
self.to_freqs.append(nn.Sequential(*net))
def forward(self, x):
x = x.unbind(dim = -2)
outs = []
for band_features, to_freq in zip(x, self.to_freqs):
freq_out = to_freq(band_features)
outs.append(freq_out)
return torch.cat(outs, dim = -1)
# main class
class BSRoformer(Module):
@beartype
def __init__(
self,
dim,
*,
depth,
time_transformer_depth = 2,
freq_transformer_depth = 2,
freqs_per_bands: Tuple[int, ...] = (256, 257), # in the paper, they divide into ~60 bands, test with 1 for starters
dim_head = 64,
heads = 8,
attn_dropout = 0.,
ff_dropout = 0.,
flash_attn = True,
dim_freqs_in = 513,
stft_n_fft = 1024,
stft_hop_length = 256,
stft_win_length = 1024,
stft_normalized = False,
mask_estimator_depth = 1,
multi_stft_resolution_loss_weight = 1.,
multi_stft_resolutions_window_sizes: Tuple[int, ...] = (4096, 2048, 1024, 512, 256),
multi_stft_hop_size = 147,
multi_stft_normalized = False
):
super().__init__()
self.layers = ModuleList([])
transformer_kwargs = dict(
dim = dim,
heads = heads,
dim_head = dim_head,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
flash_attn = flash_attn
)
time_rotary_embed = RotaryEmbedding(dim = dim_head)
freq_rotary_embed = RotaryEmbedding(dim = dim_head)
for _ in range(depth):
self.layers.append(nn.ModuleList([
Transformer(depth = time_transformer_depth, rotary_embed = time_rotary_embed, **transformer_kwargs),
Transformer(depth = freq_transformer_depth, rotary_embed = freq_rotary_embed, **transformer_kwargs)
]))
self.stft_kwargs = dict(
n_fft = stft_n_fft,
hop_length = stft_hop_length,
win_length = stft_win_length,
normalized = stft_normalized
)
freqs = torch.stft(torch.randn(1, 1024), **self.stft_kwargs, return_complex = True).shape[1]
assert len(freqs_per_bands) > 1
assert sum(freqs_per_bands) == freqs, f'the number of freqs in the bands must equal {freqs} based on the STFT settings'
freqs_per_bands_with_complex = tuple(2 * f for f in freqs_per_bands)
self.band_split = BandSplit(
dim = dim,
dim_inputs = freqs_per_bands_with_complex
)
self.mask_estimator = MaskEstimator(
dim = dim,
dim_inputs = freqs_per_bands_with_complex,
depth = mask_estimator_depth
)
# for the multi-resolution stft loss
self.multi_stft_resolution_loss_weight = multi_stft_resolution_loss_weight
self.multi_stft_resolutions_window_sizes = multi_stft_resolutions_window_sizes
self.multi_stft_n_fft = stft_n_fft
self.multi_stft_kwargs = dict(
hop_length = multi_stft_hop_size,
normalized = multi_stft_normalized
)
def forward(
self,
raw_audio,
target = None,
return_loss_breakdown = False
):
"""
einops
b - batch
f - freq
t - time
c - complex (2)
d - feature dimension
"""
# to stft
stft_repr = torch.stft(raw_audio, **self.stft_kwargs, return_complex = True)
stft_repr = torch.view_as_real(stft_repr)
x = rearrange(stft_repr, 'b f t c -> b t (f c)')
x = self.band_split(x)
# axial / hierarchical attention
for time_transformer, freq_transformer in self.layers:
x = rearrange(x, 'b t f d -> b f t d')
x, ps = pack([x], 'b * d')
x = time_transformer(x)
x, = unpack(x, ps, 'b * d')
x = rearrange(x, 'b f t d -> b t f d')
x, ps = pack([x], 'b * d')
x = freq_transformer(x)
x, = unpack(x, ps, 'b * d')
mask = self.mask_estimator(x)
mask = rearrange(mask, 'b t (f c) -> b f t c', c = 2)
# modulate frequency representation
stft_repr = stft_repr * mask
# istft
stft_repr = torch.view_as_complex(stft_repr)
recon_audio = torch.istft(stft_repr, **self.stft_kwargs, return_complex = False)
# if a target is passed in, calculate loss for learning
if not exists(target):
return recon_audio
target = target[..., :recon_audio.shape[-1]] # protect against lost length on istft
loss = F.l1_loss(recon_audio, target)
multi_stft_resolution_loss = 0.
for window_size in self.multi_stft_resolutions_window_sizes:
res_stft_kwargs = dict(
n_fft = max(window_size, self.multi_stft_n_fft), # not sure what n_fft is across multi resolution stft
win_length = window_size,
return_complex = True,
**self.multi_stft_kwargs,
)
recon_Y = torch.stft(recon_audio, **res_stft_kwargs)
target_Y = torch.stft(target, **res_stft_kwargs)
multi_stft_resolution_loss = multi_stft_resolution_loss + F.l1_loss(recon_Y, target_Y)
weighted_multi_resolution_loss = multi_stft_resolution_loss * self.multi_stft_resolution_loss_weight
total_loss = loss + weighted_multi_resolution_loss
if not return_loss_breakdown:
return total_loss
return total_loss, (loss, multi_stft_resolution_loss)
|
class BlackHole(object):
def __setattr__(self, name, value):
pass
def __call__(self, *args, **kwargs):
return self
def __getattr__(self, name):
return self
def seed_all(seed):
torch.backends.cudnn.deterministic = True
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def recursive_to(obj, device):
if isinstance(obj, torch.Tensor):
try:
return obj.cuda(device=device, non_blocking=True)
except RuntimeError:
return obj.to(device)
elif isinstance(obj, list):
return [recursive_to(o, device=device) for o in obj]
elif isinstance(obj, tuple):
return (recursive_to(o, device=device) for o in obj)
elif isinstance(obj, dict):
return {k: recursive_to(v, device=device) for k, v in obj.items()}
else:
return obj
|
NON_STANDARD_SUBSTITUTIONS = {
'2AS':'ASP', '3AH':'HIS', '5HP':'GLU', 'ACL':'ARG', 'AGM':'ARG', 'AIB':'ALA', 'ALM':'ALA', 'ALO':'THR', 'ALY':'LYS', 'ARM':'ARG',
'ASA':'ASP', 'ASB':'ASP', 'ASK':'ASP', 'ASL':'ASP', 'ASQ':'ASP', 'AYA':'ALA', 'BCS':'CYS', 'BHD':'ASP', 'BMT':'THR', 'BNN':'ALA',
'BUC':'CYS', 'BUG':'LEU', 'C5C':'CYS', 'C6C':'CYS', 'CAS':'CYS', 'CCS':'CYS', 'CEA':'CYS', 'CGU':'GLU', 'CHG':'ALA', 'CLE':'LEU', 'CME':'CYS',
'CSD':'ALA', 'CSO':'CYS', 'CSP':'CYS', 'CSS':'CYS', 'CSW':'CYS', 'CSX':'CYS', 'CXM':'MET', 'CY1':'CYS', 'CY3':'CYS', 'CYG':'CYS',
'CYM':'CYS', 'CYQ':'CYS', 'DAH':'PHE', 'DAL':'ALA', 'DAR':'ARG', 'DAS':'ASP', 'DCY':'CYS', 'DGL':'GLU', 'DGN':'GLN', 'DHA':'ALA',
'DHI':'HIS', 'DIL':'ILE', 'DIV':'VAL', 'DLE':'LEU', 'DLY':'LYS', 'DNP':'ALA', 'DPN':'PHE', 'DPR':'PRO', 'DSN':'SER', 'DSP':'ASP',
'DTH':'THR', 'DTR':'TRP', 'DTY':'TYR', 'DVA':'VAL', 'EFC':'CYS', 'FLA':'ALA', 'FME':'MET', 'GGL':'GLU', 'GL3':'GLY', 'GLZ':'GLY',
'GMA':'GLU', 'GSC':'GLY', 'HAC':'ALA', 'HAR':'ARG', 'HIC':'HIS', 'HIP':'HIS', 'HMR':'ARG', 'HPQ':'PHE', 'HTR':'TRP', 'HYP':'PRO',
'IAS':'ASP', 'IIL':'ILE', 'IYR':'TYR', 'KCX':'LYS', 'LLP':'LYS', 'LLY':'LYS', 'LTR':'TRP', 'LYM':'LYS', 'LYZ':'LYS', 'MAA':'ALA', 'MEN':'ASN',
'MHS':'HIS', 'MIS':'SER', 'MLE':'LEU', 'MPQ':'GLY', 'MSA':'GLY', 'MSE':'MET', 'MVA':'VAL', 'NEM':'HIS', 'NEP':'HIS', 'NLE':'LEU',
'NLN':'LEU', 'NLP':'LEU', 'NMC':'GLY', 'OAS':'SER', 'OCS':'CYS', 'OMT':'MET', 'PAQ':'TYR', 'PCA':'GLU', 'PEC':'CYS', 'PHI':'PHE',
'PHL':'PHE', 'PR3':'CYS', 'PRR':'ALA', 'PTR':'TYR', 'PYX':'CYS', 'SAC':'SER', 'SAR':'GLY', 'SCH':'CYS', 'SCS':'CYS', 'SCY':'CYS',
'SEL':'SER', 'SEP':'SER', 'SET':'SER', 'SHC':'CYS', 'SHR':'LYS', 'SMC':'CYS', 'SOC':'CYS', 'STY':'TYR', 'SVA':'SER', 'TIH':'ALA',
'TPL':'TRP', 'TPO':'THR', 'TPQ':'ALA', 'TRG':'LYS', 'TRO':'TRP', 'TYB':'TYR', 'TYI':'TYR', 'TYQ':'TYR', 'TYS':'TYR', 'TYY':'TYR'
}
RESIDUE_SIDECHAIN_POSTFIXES = {
'A': ['B'],
'R': ['B', 'G', 'D', 'E', 'Z', 'H1', 'H2'],
'N': ['B', 'G', 'D1', 'D2'],
'D': ['B', 'G', 'D1', 'D2'],
'C': ['B', 'G'],
'E': ['B', 'G', 'D', 'E1', 'E2'],
'Q': ['B', 'G', 'D', 'E1', 'E2'],
'G': [],
'H': ['B', 'G', 'D1', 'D2', 'E1', 'E2'],
'I': ['B', 'G1', 'G2', 'D1'],
'L': ['B', 'G', 'D1', 'D2'],
'K': ['B', 'G', 'D', 'E', 'Z'],
'M': ['B', 'G', 'D', 'E'],
'F': ['B', 'G', 'D1', 'D2', 'E1', 'E2', 'Z'],
'P': ['B', 'G', 'D'],
'S': ['B', 'G'],
'T': ['B', 'G1', 'G2'],
'W': ['B', 'G', 'D1', 'D2', 'E1', 'E2', 'E3', 'Z2', 'Z3', 'H2'],
'Y': ['B', 'G', 'D1', 'D2', 'E1', 'E2', 'Z', 'H'],
'V': ['B', 'G1', 'G2'],
}
GLY_INDEX = 5
ATOM_N, ATOM_CA, ATOM_C, ATOM_O, ATOM_CB = 0, 1, 2, 3, 4
def augmented_three_to_one(three):
if three in NON_STANDARD_SUBSTITUTIONS:
three = NON_STANDARD_SUBSTITUTIONS[three]
return three_to_one(three)
def augmented_three_to_index(three):
if three in NON_STANDARD_SUBSTITUTIONS:
three = NON_STANDARD_SUBSTITUTIONS[three]
return three_to_index(three)
def augmented_is_aa(three):
if three in NON_STANDARD_SUBSTITUTIONS:
three = NON_STANDARD_SUBSTITUTIONS[three]
return is_aa(three, standard=True)
def is_hetero_residue(res):
return len(res.id[0].strip()) > 0
def get_atom_name_postfix(atom):
name = atom.get_name()
if name in ('N', 'CA', 'C', 'O'):
return name
if name[-1].isnumeric():
return name[-2:]
else:
return name[-1:]
def get_residue_pos14(res):
pos14 = torch.full([14, 3], float('inf'))
suffix_to_atom = {get_atom_name_postfix(a):a for a in res.get_atoms()}
atom_order = ['N', 'CA', 'C', 'O'] + RESIDUE_SIDECHAIN_POSTFIXES[augmented_three_to_one(res.get_resname())]
for i, atom_suffix in enumerate(atom_order):
if atom_suffix not in suffix_to_atom: continue
pos14[i,0], pos14[i,1], pos14[i,2] = suffix_to_atom[atom_suffix].get_coord().tolist()
return pos14
def parse_pdb(path, model_id=0):
warnings.simplefilter('ignore', BiopythonWarning)
parser = PDBParser()
structure = parser.get_structure(None, path)
return parse_complex(structure, model_id)
def parse_complex(structure, model_id=None):
if model_id is not None:
structure = structure[model_id]
chains = Selection.unfold_entities(structure, 'C')
aa, resseq, icode, seq = [], [], [], []
pos14, pos14_mask = [], []
chain_id, chain_seq = [], []
for i, chain in enumerate(chains):
seq_this = 0
for res in chain:
resname = res.get_resname()
if not augmented_is_aa(resname): continue
if not (res.has_id('CA') and res.has_id('C') and res.has_id('N')): continue
# Chain
chain_id.append(chain.get_id())
chain_seq.append(i+1)
# Residue types
restype = augmented_three_to_index(resname)
aa.append(restype)
# Atom coordinates
pos14_this = get_residue_pos14(res)
pos14_mask_this = pos14_this.isfinite()
pos14.append(pos14_this.nan_to_num(posinf=99999))
pos14_mask.append(pos14_mask_this)
# Sequential number
resseq_this = int(res.get_id()[1])
icode_this = res.get_id()[2]
if seq_this == 0:
seq_this = 1
else:
d_resseq = resseq_this - resseq[-1]
if d_resseq == 0: seq_this += 1
else: seq_this += d_resseq
resseq.append(resseq_this)
icode.append(icode_this)
seq.append(seq_this)
if len(aa) == 0:
return None
return {
'name': structure.get_id(),
# Chain
'chain_id': ''.join(chain_id),
'chain_seq': torch.LongTensor(chain_seq),
# Sequence
'aa': torch.LongTensor(aa),
'resseq': torch.LongTensor(resseq),
'icode': ''.join(icode),
'seq': torch.LongTensor(seq),
# Atom positions
'pos14': torch.stack(pos14),
'pos14_mask': torch.stack(pos14_mask),
}
|
class PaddingCollate(object):
def __init__(self, length_ref_key='mutation_mask', pad_values={'aa': 20, 'pos14': float('999'), 'icode': ' ', 'chain_id': '-'}, donot_pad={'foldx'}, eight=False):
super().__init__()
self.length_ref_key = length_ref_key
self.pad_values = pad_values
self.donot_pad = donot_pad
self.eight = eight
def _pad_last(self, x, n, value=0):
if isinstance(x, torch.Tensor):
assert x.size(0) <= n
if x.size(0) == n:
return x
pad_size = [n - x.size(0)] + list(x.shape[1:])
pad = torch.full(pad_size, fill_value=value).to(x)
return torch.cat([x, pad], dim=0)
elif isinstance(x, list):
pad = [value] * (n - len(x))
return x + pad
elif isinstance(x, str):
if value == 0: # Won't pad strings if not specified
return x
pad = value * (n - len(x))
return x + pad
elif isinstance(x, dict):
padded = {}
for k, v in x.items():
if k in self.donot_pad:
padded[k] = v
else:
padded[k] = self._pad_last(v, n, value=self._get_pad_value(k))
return padded
else:
return x
@staticmethod
def _get_pad_mask(l, n):
return torch.cat([
torch.ones([l], dtype=torch.bool),
torch.zeros([n-l], dtype=torch.bool)
], dim=0)
def _get_pad_value(self, key):
if key not in self.pad_values:
return 0
return self.pad_values[key]
def __call__(self, data_list):
max_length = max([data[self.length_ref_key].size(0) for data in data_list])
if self.eight:
max_length = math.ceil(max_length / 8) * 8
data_list_padded = []
for data in data_list:
data_padded = {
k: self._pad_last(v, max_length, value=self._get_pad_value(k))
for k, v in data.items() if k in ('wt', 'mut', 'ddG', 'mutation_mask', 'index', 'mutation')
}
data_padded['mask'] = self._get_pad_mask(data[self.length_ref_key].size(0), max_length)
data_list_padded.append(data_padded)
return default_collate(data_list_padded)
def _mask_list(l, mask):
return [l[i] for i in range(len(l)) if mask[i]]
def _mask_string(s, mask):
return ''.join([s[i] for i in range(len(s)) if mask[i]])
def _mask_dict_recursively(d, mask):
out = {}
for k, v in d.items():
if isinstance(v, torch.Tensor) and v.size(0) == mask.size(0):
out[k] = v[mask]
elif isinstance(v, list) and len(v) == mask.size(0):
out[k] = _mask_list(v, mask)
elif isinstance(v, str) and len(v) == mask.size(0):
out[k] = _mask_string(v, mask)
elif isinstance(v, dict):
out[k] = _mask_dict_recursively(v, mask)
else:
out[k] = v
return out
class KnnResidue(object):
def __init__(self, num_neighbors=128):
super().__init__()
self.num_neighbors = num_neighbors
def __call__(self, data):
pos_CA = data['wt']['pos14'][:, ATOM_CA]
pos_CA_mut = pos_CA[data['mutation_mask']]
diff = pos_CA_mut.view(1, -1, 3) - pos_CA.view(-1, 1, 3)
dist = torch.linalg.norm(diff, dim=-1)
try:
mask = torch.zeros([dist.size(0)], dtype=torch.bool)
mask[ dist.min(dim=1)[0].argsort()[:self.num_neighbors] ] = True
except IndexError as e:
print(data)
raise e
return _mask_dict_recursively(data, mask)
def load_wt_mut_pdb_pair(wt_path, mut_path):
data_wt = parse_pdb(wt_path)
data_mut = parse_pdb(mut_path)
transform = KnnResidue()
collate_fn = PaddingCollate()
mutation_mask = (data_wt['aa'] != data_mut['aa'])
batch = collate_fn([transform({'wt': data_wt, 'mut': data_mut, 'mutation_mask': mutation_mask})])
return batch
|
class ComplexEncoder(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.relpos_embedding = nn.Embedding(cfg.max_relpos*2+2, cfg.pair_feat_dim)
self.residue_encoder = PerResidueEncoder(cfg.node_feat_dim)
if cfg.geomattn is not None:
self.ga_encoder = GAEncoder(
node_feat_dim = cfg.node_feat_dim,
pair_feat_dim = cfg.pair_feat_dim,
num_layers = cfg.geomattn.num_layers,
spatial_attn_mode = cfg.geomattn.spatial_attn_mode,
)
else:
self.out_mlp = nn.Sequential(
nn.Linear(cfg.node_feat_dim, cfg.node_feat_dim), nn.ReLU(),
nn.Linear(cfg.node_feat_dim, cfg.node_feat_dim), nn.ReLU(),
nn.Linear(cfg.node_feat_dim, cfg.node_feat_dim),
)
def forward(self, pos14, aa, seq, chain, mask_atom):
"""
Args:
pos14: (N, L, 14, 3).
aa: (N, L).
seq: (N, L).
chain: (N, L).
mask_atom: (N, L, 14)
Returns:
(N, L, node_ch)
"""
same_chain = (chain[:, None, :] == chain[:, :, None]) # (N, L, L)
relpos = (seq[:, None, :] - seq[:, :, None]).clamp(min=-self.cfg.max_relpos, max=self.cfg.max_relpos) + self.cfg.max_relpos # (N, L, L)
relpos = torch.where(same_chain, relpos, torch.full_like(relpos, fill_value=self.cfg.max_relpos*2+1))
pair_feat = self.relpos_embedding(relpos) # (N, L, L, pair_ch)
R = construct_3d_basis(pos14[:, :, ATOM_CA], pos14[:, :, ATOM_C], pos14[:, :, ATOM_N])
# Residue encoder
res_feat = self.residue_encoder(aa, pos14, mask_atom)
# Geom encoder
t = pos14[:, :, ATOM_CA]
mask_residue = mask_atom[:, :, ATOM_CA]
res_feat = self.ga_encoder(R, t, get_pos_CB(pos14, mask_atom), res_feat, pair_feat, mask_residue)
return res_feat
class DDGReadout(nn.Module):
def __init__(self, feat_dim):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(feat_dim*2, feat_dim), nn.ReLU(),
nn.Linear(feat_dim, feat_dim), nn.ReLU(),
nn.Linear(feat_dim, feat_dim), nn.ReLU(),
nn.Linear(feat_dim, feat_dim)
)
self.project = nn.Linear(feat_dim, 1, bias=False)
def forward(self, node_feat_wt, node_feat_mut, mask=None):
"""
Args:
node_feat_wt: (N, L, F).
node_feat_mut: (N, L, F).
mask: (N, L).
"""
feat_wm = torch.cat([node_feat_wt, node_feat_mut], dim=-1)
feat_mw = torch.cat([node_feat_mut, node_feat_wt], dim=-1)
feat_diff = self.mlp(feat_wm) - self.mlp(feat_mw) # (N, L, F)
# feat_diff = self.mlp(node_feat_wt) - self.mlp(node_feat_mut)
per_residue_ddg = self.project(feat_diff).squeeze(-1) # (N, L)
if mask is not None:
per_residue_ddg = per_residue_ddg * mask
ddg = per_residue_ddg.sum(dim=1) # (N,)
return ddg
class DDGPredictor(nn.Module):
def __init__(self, cfg):
super().__init__()
self.encoder = ComplexEncoder(cfg)
self.ddG_readout = DDGReadout(cfg.node_feat_dim)
def forward(self, complex_wt, complex_mut, ddG_true=None):
mask_atom_wt = complex_wt['pos14_mask'].all(dim=-1) # (N, L, 14)
mask_atom_mut = complex_mut['pos14_mask'].all(dim=-1)
feat_wt = self.encoder(complex_wt['pos14'], complex_wt['aa'], complex_wt['seq'], complex_wt['chain_seq'], mask_atom_wt)
feat_mut = self.encoder(complex_mut['pos14'], complex_mut['aa'], complex_mut['seq'], complex_mut['chain_seq'], mask_atom_mut)
mask_res = mask_atom_wt[:, :, ATOM_CA]
ddG_pred = self.ddG_readout(feat_wt, feat_mut, mask_res) # One mask is enough
if ddG_true is None:
return ddG_pred
else:
losses = {
'ddG': F.mse_loss(ddG_pred, ddG_true),
}
return losses, ddG_pred
|
def _alpha_from_logits(logits, mask, inf=1e5):
"""
Args:
logits: Logit matrices, (N, L_i, L_j, num_heads).
mask: Masks, (N, L).
Returns:
alpha: Attention weights.
"""
N, L, _, _ = logits.size()
mask_row = mask.view(N, L, 1, 1).expand_as(logits) # (N, L, *, *)
mask_pair = mask_row * mask_row.permute(0, 2, 1, 3) # (N, L, L, *)
logits = torch.where(mask_pair, logits, logits-inf)
alpha = torch.softmax(logits, dim=2) # (N, L, L, num_heads)
alpha = torch.where(mask_row, alpha, torch.zeros_like(alpha))
return alpha
def _heads(x, n_heads, n_ch):
"""
Args:
x: (..., num_heads * num_channels)
Returns:
(..., num_heads, num_channels)
"""
s = list(x.size())[:-1] + [n_heads, n_ch]
return x.view(*s)
class GeometricAttention(nn.Module):
def __init__(self, node_feat_dim, pair_feat_dim, spatial_attn_mode='CB', value_dim=16, query_key_dim=16, num_query_points=8, num_value_points=8, num_heads=12):
super().__init__()
self.node_feat_dim = node_feat_dim
self.pair_feat_dim = pair_feat_dim
self.value_dim = value_dim
self.query_key_dim = query_key_dim
self.num_query_points = num_query_points
self.num_value_points = num_value_points
self.num_heads = num_heads
assert spatial_attn_mode in ('CB', 'vpoint')
self.spatial_attn_mode = spatial_attn_mode
# Node
self.proj_query = nn.Linear(node_feat_dim, query_key_dim*num_heads, bias=False)
self.proj_key = nn.Linear(node_feat_dim, query_key_dim*num_heads, bias=False)
self.proj_value = nn.Linear(node_feat_dim, value_dim*num_heads, bias=False)
# Pair
self.proj_pair_bias = nn.Linear(pair_feat_dim, num_heads, bias=False)
# Spatial
self.spatial_coef = nn.Parameter(torch.full([1, 1, 1, self.num_heads], fill_value=np.log(np.exp(1.) - 1.)), requires_grad=True)
if spatial_attn_mode == 'vpoint':
self.proj_query_point = nn.Linear(node_feat_dim, num_query_points*num_heads*3, bias=False)
self.proj_key_point = nn.Linear(node_feat_dim, num_query_points*num_heads*3, bias=False)
self.proj_value_point = nn.Linear(node_feat_dim, num_value_points*num_heads*3, bias=False)
# Output
if spatial_attn_mode == 'CB':
self.out_transform = nn.Linear(
in_features = (num_heads*pair_feat_dim) + (num_heads*value_dim) + (num_heads*(3+3+1)),
out_features = node_feat_dim,
)
elif spatial_attn_mode == 'vpoint':
self.out_transform = nn.Linear(
in_features = (num_heads*pair_feat_dim) + (num_heads*value_dim) + (num_heads*num_value_points*(3+3+1)),
out_features = node_feat_dim,
)
self.layer_norm = nn.LayerNorm(node_feat_dim)
def _node_logits(self, x):
query_l = _heads(self.proj_query(x), self.num_heads, self.query_key_dim) # (N, L, n_heads, qk_ch)
key_l = _heads(self.proj_key(x), self.num_heads, self.query_key_dim) # (N, L, n_heads, qk_ch)
query_l = query_l.permute(0, 2, 1, 3) # (N,L1,H,C) -> (N,H,L1,C)
key_l = key_l.permute(0, 2, 3, 1) # (N,L2,H,C) -> (N,H,C,L2)
logits = torch.matmul(query_l, key_l) # (N,H,L1,L2)
logits = logits.permute(0, 2, 3, 1) # (N,L1,L2,H)
# logits = (query_l.unsqueeze(2) * key_l.unsqueeze(1) * (1 / np.sqrt(self.query_key_dim))).sum(-1) # (N, L, L, num_heads)
return logits
def _pair_logits(self, z):
logits_pair = self.proj_pair_bias(z)
return logits_pair
def _beta_logits(self, R, t, p_CB):
N, L, _ = t.size()
qk = p_CB[:, :, None, :].expand(N, L, self.num_heads, 3)
sum_sq_dist = ((qk.unsqueeze(2) - qk.unsqueeze(1)) ** 2).sum(-1) # (N, L, L, n_heads)
gamma = F.softplus(self.spatial_coef)
logtis_beta = sum_sq_dist * ((-1 * gamma * np.sqrt(2 / 9)) / 2)
return logtis_beta
def _spatial_logits(self, R, t, x):
N, L, _ = t.size()
# Query
query_points = _heads(self.proj_query_point(x), self.num_heads*self.num_query_points, 3) # (N, L, n_heads * n_pnts, 3)
query_points = local_to_global(R, t, query_points) # Global query coordinates, (N, L, n_heads * n_pnts, 3)
query_s = query_points.reshape(N, L, self.num_heads, -1) # (N, L, n_heads, n_pnts*3)
# Key
key_points = _heads(self.proj_key_point(x), self.num_heads*self.num_query_points, 3) # (N, L, 3, n_heads * n_pnts)
key_points = local_to_global(R, t, key_points) # Global key coordinates, (N, L, n_heads * n_pnts, 3)
key_s = key_points.reshape(N, L, self.num_heads, -1) # (N, L, n_heads, n_pnts*3)
# Q-K Product
sum_sq_dist = ((query_s.unsqueeze(2) - key_s.unsqueeze(1)) ** 2).sum(-1) # (N, L, L, n_heads)
gamma = F.softplus(self.spatial_coef)
logits_spatial = sum_sq_dist * ((-1 * gamma * np.sqrt(2 / (9 * self.num_query_points))) / 2) # (N, L, L, n_heads)
return logits_spatial
def _pair_aggregation(self, alpha, z):
N, L = z.shape[:2]
feat_p2n = alpha.unsqueeze(-1) * z.unsqueeze(-2) # (N, L, L, n_heads, C)
feat_p2n = feat_p2n.sum(dim=2) # (N, L, n_heads, C)
return feat_p2n.reshape(N, L, -1)
def _node_aggregation(self, alpha, x):
N, L = x.shape[:2]
value_l = _heads(self.proj_value(x), self.num_heads, self.query_key_dim) # (N, L, n_heads, v_ch)
feat_node = alpha.unsqueeze(-1) * value_l.unsqueeze(1) # (N, L, L, n_heads, *) @ (N, *, L, n_heads, v_ch)
feat_node = feat_node.sum(dim=2) # (N, L, n_heads, v_ch)
return feat_node.reshape(N, L, -1)
def _beta_aggregation(self, alpha, R, t, p_CB, x):
N, L, _ = t.size()
v = p_CB[:, :, None, :].expand(N, L, self.num_heads, 3) # (N, L, n_heads, 3)
aggr = alpha.reshape(N, L, L, self.num_heads, 1) * v.unsqueeze(1) # (N, *, L, n_heads, 3)
aggr = aggr.sum(dim=2)
feat_points = global_to_local(R, t, aggr) # (N, L, n_heads, 3)
feat_distance = feat_points.norm(dim=-1)
feat_direction = normalize_vector(feat_points, dim=-1, eps=1e-4)
feat_spatial = torch.cat([
feat_points.reshape(N, L, -1),
feat_distance.reshape(N, L, -1),
feat_direction.reshape(N, L, -1),
], dim=-1)
return feat_spatial
def _spatial_aggregation(self, alpha, R, t, x):
N, L, _ = t.size()
value_points = _heads(self.proj_value_point(x), self.num_heads*self.num_value_points, 3) # (N, L, n_heads * n_v_pnts, 3)
value_points = local_to_global(R, t, value_points.reshape(N, L, self.num_heads, self.num_value_points, 3)) # (N, L, n_heads, n_v_pnts, 3)
aggr_points = alpha.reshape(N, L, L, self.num_heads, 1, 1) * value_points.unsqueeze(1) # (N, *, L, n_heads, n_pnts, 3)
aggr_points = aggr_points.sum(dim=2) # (N, L, n_heads, n_pnts, 3)
feat_points = global_to_local(R, t, aggr_points) # (N, L, n_heads, n_pnts, 3)
feat_distance = feat_points.norm(dim=-1) # (N, L, n_heads, n_pnts)
feat_direction = normalize_vector(feat_points, dim=-1, eps=1e-4) # (N, L, n_heads, n_pnts, 3)
feat_spatial = torch.cat([
feat_points.reshape(N, L, -1),
feat_distance.reshape(N, L, -1),
feat_direction.reshape(N, L, -1),
], dim=-1)
return feat_spatial
def forward_beta(self, R, t, p_CB, x, z, mask):
"""
Args:
R: Frame basis matrices, (N, L, 3, 3_index).
t: Frame external (absolute) coordinates, (N, L, 3).
x: Node-wise features, (N, L, F).
z: Pair-wise features, (N, L, L, C).
mask: Masks, (N, L).
Returns:
x': Updated node-wise features, (N, L, F).
"""
# Attention logits
logits_node = self._node_logits(x)
logits_pair = self._pair_logits(z)
logits_spatial = self._beta_logits(R, t, p_CB)
# Summing logits up and apply `softmax`.
logits_sum = logits_node + logits_pair + logits_spatial
alpha = _alpha_from_logits(logits_sum * np.sqrt(1 / 3), mask) # (N, L, L, n_heads)
# Aggregate features
feat_p2n = self._pair_aggregation(alpha, z)
feat_node = self._node_aggregation(alpha, x)
feat_spatial = self._beta_aggregation(alpha, R, t, p_CB, x)
# Finally
feat_all = self.out_transform(torch.cat([feat_p2n, feat_node, feat_spatial], dim=-1)) # (N, L, F)
feat_all = mask_zero(mask.unsqueeze(-1), feat_all)
x_updated = self.layer_norm(x + feat_all)
return x_updated
def forward_vpoint(self, R, t, p_CB, x, z, mask):
"""
Args:
R: Frame basis matrices, (N, L, 3, 3_index).
t: Frame external (absolute) coordinates, (N, L, 3).
x: Node-wise features, (N, L, F).
z: Pair-wise features, (N, L, L, C).
mask: Masks, (N, L).
Returns:
x': Updated node-wise features, (N, L, F).
"""
# Attention logits
logits_node = self._node_logits(x)
logits_pair = self._pair_logits(z)
logits_spatial = self._spatial_logits(R, t, x)
# Summing logits up and apply `softmax`.
logits_sum = logits_node + logits_pair + logits_spatial
alpha = _alpha_from_logits(logits_sum * np.sqrt(1 / 3), mask) # (N, L, L, n_heads)
# Aggregate features
feat_p2n = self._pair_aggregation(alpha, z)
feat_node = self._node_aggregation(alpha, x)
feat_spatial = self._spatial_aggregation(alpha, R, t, x)
# Finally
feat_all = self.out_transform(torch.cat([feat_p2n, feat_node, feat_spatial], dim=-1)) # (N, L, F)
feat_all = mask_zero(mask.unsqueeze(-1), feat_all)
x_updated = self.layer_norm(x + feat_all)
return x_updated
def forward(self, R, t, p_CB, x, z, mask):
if self.spatial_attn_mode == 'CB':
return self.forward_beta(R, t, p_CB, x, z, mask)
else:
return self.forward_vpoint(R, t, p_CB, x, z, mask)
class GAEncoder(nn.Module):
def __init__(self, node_feat_dim, pair_feat_dim, num_layers, spatial_attn_mode='CB'):
super().__init__()
self.blocks = nn.ModuleList([
GeometricAttention(node_feat_dim, pair_feat_dim, spatial_attn_mode=spatial_attn_mode)
for _ in range(num_layers)
])
def forward(self, R, t, p_CB, x, z, mask):
for block in self.blocks:
x = block(R, t, p_CB, x, z, mask) # Residual connection within the block
return x
|
class PerResidueEncoder(nn.Module):
def __init__(self, feat_dim):
super().__init__()
self.aatype_embed = nn.Embedding(21, feat_dim)
self.torsion_embed = PositionalEncoding()
self.mlp = nn.Sequential(
nn.Linear(21*14*3 + feat_dim, feat_dim * 2), nn.ReLU(),
nn.Linear(feat_dim * 2, feat_dim), nn.ReLU(),
nn.Linear(feat_dim, feat_dim), nn.ReLU(),
nn.Linear(feat_dim, feat_dim)
)
def forward(self, aa, pos14, atom_mask):
"""
Args:
aa: (N, L).
pos14: (N, L, 14, 3).
atom_mask: (N, L, 14).
"""
N, L = aa.size()
R = construct_3d_basis(pos14[:, :, 1], pos14[:, :, 2], pos14[:, :, 0]) # (N, L, 3, 3)
t = pos14[:, :, 1] # (N, L, 3)
crd14 = global_to_local(R, t, pos14) # (N, L, 14, 3)
crd14_mask = atom_mask[:, :, :, None].expand_as(crd14)
crd14 = torch.where(crd14_mask, crd14, torch.zeros_like(crd14))
aa_expand = aa[:, :, None, None, None].expand(N, L, 21, 14, 3)
rng_expand = torch.arange(0, 21)[None, None, :, None, None].expand(N, L, 21, 14, 3).to(aa_expand)
place_mask = (aa_expand == rng_expand)
crd_expand = crd14[:, :, None, :, :].expand(N, L, 21, 14, 3)
crd_expand = torch.where(place_mask, crd_expand, torch.zeros_like(crd_expand))
crd_feat = crd_expand.reshape(N, L, 21 * 14 * 3)
aa_feat = self.aatype_embed(aa) # (N, L, feat)
out_feat = self.mlp(torch.cat([crd_feat, aa_feat], dim=-1))
return out_feat
|
def get_pos_CB(pos14, atom_mask):
"""
Args:
pos14: (N, L, 14, 3)
atom_mask: (N, L, 14)
"""
N, L = pos14.shape[:2]
mask_CB = atom_mask[:, :, ATOM_CB] # (N, L)
mask_CB = mask_CB[:, :, None].expand(N, L, 3)
pos_CA = pos14[:, :, ATOM_CA] # (N, L, 3)
pos_CB = pos14[:, :, ATOM_CB]
return torch.where(mask_CB, pos_CB, pos_CA)
def mask_zero(mask, value):
return torch.where(mask, value, torch.zeros_like(value))
class PositionalEncoding(nn.Module):
def __init__(self, num_funcs=6):
super().__init__()
self.num_funcs = num_funcs
self.register_buffer('freq_bands', 2.0 ** torch.linspace(0.0, num_funcs-1, num_funcs))
def get_out_dim(self, in_dim):
return in_dim * (2 * self.num_funcs + 1)
def forward(self, x):
"""
Args:
x: (..., d).
"""
shape = list(x.shape[:-1]) + [-1]
x = x.unsqueeze(-1) # (..., d, 1)
code = torch.cat([x, torch.sin(x * self.freq_bands), torch.cos(x * self.freq_bands)], dim=-1) # (..., d, 2f+1)
code = code.reshape(shape)
return code
def safe_norm(x, dim=-1, keepdim=False, eps=1e-8, sqrt=True):
out = torch.clamp(torch.sum(torch.square(x), dim=dim, keepdim=keepdim), min=eps)
return torch.sqrt(out) if sqrt else out
def normalize_vector(v, dim, eps=1e-6):
return v / (torch.linalg.norm(v, ord=2, dim=dim, keepdim=True) + eps)
def project_v2v(v, e, dim):
"""
Description:
Project vector `v` onto vector `e`.
Args:
v: (N, L, 3).
e: (N, L, 3).
"""
return (e * v).sum(dim=dim, keepdim=True) * e
def construct_3d_basis(center, p1, p2):
"""
Args:
center: (N, L, 3), usually the position of C_alpha.
p1: (N, L, 3), usually the position of C.
p2: (N, L, 3), usually the position of N.
Returns
A batch of orthogonal basis matrix, (N, L, 3, 3cols_index).
The matrix is composed of 3 column vectors: [e1, e2, e3].
"""
v1 = p1 - center # (N, L, 3)
e1 = normalize_vector(v1, dim=-1)
v2 = p2 - center # (N, L, 3)
u2 = v2 - project_v2v(v2, e1, dim=-1)
e2 = normalize_vector(u2, dim=-1)
e3 = torch.cross(e1, e2, dim=-1) # (N, L, 3)
mat = torch.cat([
e1.unsqueeze(-1), e2.unsqueeze(-1), e3.unsqueeze(-1)
], dim=-1) # (N, L, 3, 3_index)
return mat
def local_to_global(R, t, p):
"""
Description:
Convert local (internal) coordinates to global (external) coordinates q.
q <- Rp + t
Args:
R: (N, L, 3, 3).
t: (N, L, 3).
p: Local coordinates, (N, L, ..., 3).
Returns:
q: Global coordinates, (N, L, ..., 3).
"""
assert p.size(-1) == 3
p_size = p.size()
N, L = p_size[0], p_size[1]
p = p.view(N, L, -1, 3).transpose(-1, -2) # (N, L, *, 3) -> (N, L, 3, *)
q = torch.matmul(R, p) + t.unsqueeze(-1) # (N, L, 3, *)
q = q.transpose(-1, -2).reshape(p_size) # (N, L, 3, *) -> (N, L, *, 3) -> (N, L, ..., 3)
return q
def global_to_local(R, t, q):
"""
Description:
Convert global (external) coordinates q to local (internal) coordinates p.
p <- R^{T}(q - t)
Args:
R: (N, L, 3, 3).
t: (N, L, 3).
q: Global coordinates, (N, L, ..., 3).
Returns:
p: Local coordinates, (N, L, ..., 3).
"""
assert q.size(-1) == 3
q_size = q.size()
N, L = q_size[0], q_size[1]
q = q.reshape(N, L, -1, 3).transpose(-1, -2) # (N, L, *, 3) -> (N, L, 3, *)
if t is None:
p = torch.matmul(R.transpose(-1, -2), q) # (N, L, 3, *)
else:
p = torch.matmul(R.transpose(-1, -2), (q - t.unsqueeze(-1))) # (N, L, 3, *)
p = p.transpose(-1, -2).reshape(q_size) # (N, L, 3, *) -> (N, L, *, 3) -> (N, L, ..., 3)
return p
|
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('wt_pdb', type=str)
parser.add_argument('mut_pdb', type=str)
parser.add_argument('--model', type=str, default='./data/model.pt')
parser.add_argument('--device', type=str, default='cuda')
args = parser.parse_args()
batch = load_wt_mut_pdb_pair(args.wt_pdb, args.mut_pdb)
batch = recursive_to(batch, args.device)
ckpt = torch.load(args.model)
config = ckpt['config']
weight = ckpt['model']
model = DDGPredictor(config.model).to(args.device)
model.load_state_dict(weight)
with torch.no_grad():
model.eval()
pred = model(batch['wt'], batch['mut'])
print('Predicted ddG: %.2f' % pred.item())
|
AoA = AttentionOnAttention
|
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class AttentionOnAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
dropout = 0.,
aoa_dropout = 0.
):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.scale = dim_head ** -0.5
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.dropout = nn.Dropout(dropout)
self.aoa = nn.Sequential(
nn.Linear(2 * inner_dim, 2 * dim),
nn.GLU(),
nn.Dropout(aoa_dropout)
)
def forward(self, x, context = None):
h = self.heads
q_ = self.to_q(x)
context = default(context, x)
kv = self.to_kv(context).chunk(2, dim = -1)
# split heads
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q_, *kv))
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
# attention
attn = dots.softmax(dim = -1)
attn = self.dropout(attn)
# weighted average of values
attn_out = einsum('b h i j, b h j d -> b h i d', attn, v)
# concat heads
out = rearrange(attn_out, 'b h n d -> b n (h d)', h = h)
# attention on attention
out = self.aoa(torch.cat((out, q_), dim = -1))
return out
|
# helpers
def exists(val):
return val is not None
def batched_index_select(values, indices):
last_dim = values.shape[-1]
return values.gather(1, indices[:, :, None].expand(-1, -1, last_dim))
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x, **kwargs):
return self.net(x)
# adjacent attention class
class AdjacentAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 4,
dropout = 0.
):
super().__init__()
inner_dim = dim_head * heads
self.scale = dim_head ** -0.5
self.heads = heads
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
self.null_k = nn.Parameter(torch.randn(heads, dim_head))
self.null_v = nn.Parameter(torch.randn(heads, dim_head))
self.dropout = nn.Dropout(dropout)
def forward(
self,
x,
adj_kv_indices,
mask
):
b, n, d, h = *x.shape, self.heads
flat_indices = repeat(adj_kv_indices, 'b n a -> (b h) (n a)', h = h)
# derive query, key, value
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
# gather keys and values according to adjacency matrix
k, v = map(lambda t: rearrange(t, 'b h n d -> (b h) n d'), (k, v))
k = batched_index_select(k, flat_indices)
v = batched_index_select(v, flat_indices)
k, v = map(lambda t: rearrange(t, '(b h) (n a) d -> b h n a d', h = h, n = n), (k, v))
# add null key / value, so a node can attend to nothing
# have come across this in GNN literature as some other name
nk, nv = map(lambda t: rearrange(t, 'h d -> () h () () d').expand(b, -1, n, 1, -1), (self.null_k, self.null_v))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
mask = F.pad(mask, (1, 0), value = 1)
# similarity of each node to its neighbors
sim = einsum('b h n d, b h n a d -> b h n a', q, k) * self.scale
# mask out neighbors that are just padding
mask_value = -torch.finfo(sim.dtype).max
mask = rearrange(mask.bool(), 'b n a -> b () n a')
sim.masked_fill_(~mask.bool(), mask_value)
# attention
attn = sim.softmax(dim = -1)
# dropout
attn = self.dropout(attn)
# get weighted average of the values of all neighbors
out = einsum('b h n a, b h n a d -> b h n d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
# combine output
return self.to_out(out)
# adjacent network (layers of adjacent attention)
class AdjacentAttentionNetwork(nn.Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 4,
num_neighbors_cutoff = None,
num_global_nodes = 0,
attn_dropout = 0.,
ff_dropout = 0.
):
super().__init__()
self.num_neighbors_cutoff = num_neighbors_cutoff
self.layers = nn.ModuleList([])
for _ in range(depth):
global_attn = PreNorm(dim, ISAB(
dim = dim,
heads = heads,
num_induced_points = num_global_nodes
)) if num_global_nodes > 0 else None
self.layers.append(nn.ModuleList([
Residual(PreNorm(dim, AdjacentAttention(
dim = dim,
dim_head = dim_head,
heads = heads,
dropout = attn_dropout
))),
global_attn,
Residual(PreNorm(dim, FeedForward(
dim = dim,
dropout = ff_dropout
)))
]))
def forward(self, x, adjacency_mat, mask = None):
device, n = x.device, x.shape[1]
diag = torch.eye(adjacency_mat.shape[-1], device = device).bool()
adjacency_mat |= diag # nodes should pay attention itself (self-interacting)
# zero out points on adjacency matrix
# where the nodes are just padding
if exists(mask):
adjacency_mat &= (mask[:, :, None] * mask[:, None, :])
adj_mat = adjacency_mat.float()
# if we don't set a hard limit to the number of neighbors:
# - get the maximum number of neighbors and pad the rest of the nodes with less than that number of neighbors
# else:
# - randomly sample the cutoff number of neighbors for any node that exceeds the max
# - this would be similar to random sparse attention (bigbird)
# get the maximum number of neighbors
max_neighbors = int(adj_mat.sum(dim = -1).max())
if exists(self.num_neighbors_cutoff) and max_neighbors > self.num_neighbors_cutoff:
# to randomly sample the neighbors, add a small uniform noise to the mask and topk
noise = torch.empty((n, n), device = device).uniform_(-0.01, 0.01)
adj_mat = adj_mat + noise
adj_mask, adj_kv_indices = adj_mat.topk(dim = -1, k = self.num_neighbors_cutoff)
# cast the mask back to 0s and 1s
adj_mask = (adj_mask > 0.5).float()
else:
# todo - get distribution of number of neighbors, and strategically break up attention (message passing) to multiple steps
# - start with a bimodal num neighbors test case, then generalize
# use topk to get all the neighbors
# also pass the mask into the attention, as some neighbors will be just padding and not actually neighbors
adj_mask, adj_kv_indices = adj_mat.topk(dim = -1, k = max_neighbors)
for attn, global_attn, ff in self.layers:
x = attn(
x,
adj_kv_indices = adj_kv_indices,
mask = adj_mask
)
if exists(global_attn):
out, _ = global_attn(x, mask = mask)
x = x + out
x = ff(x)
return x
|
logging.set_verbosity_error()
def exists(val):
return val is not None
def map_values(fn, dictionary):
return {k: fn(v) for k, v in dictionary.items()}
CONTEXT_EMBED_USE_CPU = os.getenv('CONTEXT_EMBED_USE_CPU', None) is not None
if CONTEXT_EMBED_USE_CPU:
print('calculating context embed only on cpu')
MODELS = dict(
pubmed = dict(
dim = 768,
path = 'microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract',
)
)
GLOBAL_VARIABLES = dict(model = None, tokenizer = None)
def get_contextual_dim(model_name):
assert model_name in MODELS
return MODELS[model_name]['dim']
@run_once('init_transformer')
def init_transformer(model_name):
path = MODELS[model_name]['path']
GLOBAL_VARIABLES['tokenizer'] = AutoTokenizer.from_pretrained(path)
model = AutoModelForMaskedLM.from_pretrained(path)
if not CONTEXT_EMBED_USE_CPU:
model = model.cuda()
GLOBAL_VARIABLES['model'] = model
@torch.no_grad()
def tokenize_text(
text,
max_length = 256,
model_name = 'pubmed',
hidden_state_index = -1,
return_cls_token = True
):
init_transformer(model_name)
model = GLOBAL_VARIABLES['model']
tokenizer = GLOBAL_VARIABLES['tokenizer']
encoding = tokenizer.batch_encode_plus(
[text],
add_special_tokens = True,
padding = True,
truncation = True,
max_length = max_length,
return_attention_mask = True,
return_tensors = 'pt'
)
if not CONTEXT_EMBED_USE_CPU:
encoding = map_values(lambda t: t.cuda(), encoding)
model.eval()
with torch.no_grad():
outputs = model(**encoding, output_hidden_states = True)
hidden_state = outputs.hidden_states[hidden_state_index][0]
if return_cls_token:
return hidden_state[0]
return hidden_state.mean(dim = 0)
def get_text_repr(
texts,
*,
device,
max_length = 256,
model_name = 'pubmed',
hidden_state_index = -1,
return_cls_token = True,
):
assert model_name in MODELS, f'{model_name} not found in available text transformers to use'
if isinstance(texts, str):
texts = [texts]
get_context_repr_fn = cache_fn(tokenize_text, path = f'contexts/{model_name}')
representations = [get_context_repr_fn(text, max_length = max_length, model_name = model_name, hidden_state_index = hidden_state_index, return_cls_token = return_cls_token) for text in texts]
return torch.stack(representations).to(device)
|
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def cycle(dl):
while True:
for data in dl:
yield data
def has_int_squareroot(num):
return (math.sqrt(num) ** 2) == num
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
def convert_image_to(img_type, image):
if image.mode != img_type:
return image.convert(img_type)
return image
# small helper modules
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
def Upsample(dim, dim_out = None):
return nn.Sequential(
nn.Upsample(scale_factor = 2, mode = 'nearest'),
nn.Conv2d(dim, default(dim_out, dim), 3, padding = 1)
)
def Downsample(dim, dim_out = None):
return nn.Conv2d(dim, default(dim_out, dim), 4, 2, 1)
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) * (var + eps).rsqrt() * self.g
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = LayerNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
# positional embeds
class LearnedSinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim))
def forward(self, x):
x = rearrange(x, 'b -> b 1')
freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)
fouriered = torch.cat((x, fouriered), dim = -1)
return fouriered
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups = 8):
super().__init__()
self.proj = nn.Conv2d(dim, dim_out, 3, padding = 1)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x, scale_shift = None):
x = self.proj(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.act(x)
return x
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, *, time_emb_dim = None, groups = 8):
super().__init__()
self.mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_emb_dim, dim_out * 2)
) if exists(time_emb_dim) else None
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb = None):
scale_shift = None
if exists(self.mlp) and exists(time_emb):
time_emb = self.mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1 1')
scale_shift = time_emb.chunk(2, dim = 1)
h = self.block1(x, scale_shift = scale_shift)
h = self.block2(h)
return h + self.res_conv(x)
class LinearAttention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Sequential(
nn.Conv2d(hidden_dim, dim, 1),
LayerNorm(dim)
)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q.softmax(dim = -2)
k = k.softmax(dim = -1)
q = q * self.scale
v = v / (h * w)
context = torch.einsum('b h d n, b h e n -> b h d e', k, v)
out = torch.einsum('b h d e, b h d n -> b h e n', context, q)
out = rearrange(out, 'b h c (x y) -> b (h c) x y', h = self.heads, x = h, y = w)
return self.to_out(out)
class Attention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q * self.scale
sim = einsum('b h d i, b h d j -> b h i j', q, k)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h d j -> b h i d', attn, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w)
return self.to_out(out)
# model
class Unet(nn.Module):
def __init__(
self,
dim,
init_dim = None,
dim_mults=(1, 2, 4, 8),
channels = 3,
resnet_block_groups = 8,
learned_sinusoidal_dim = 16
):
super().__init__()
# determine dimensions
self.channels = channels
input_channels = channels * 2
init_dim = default(init_dim, dim)
self.init_conv = nn.Conv2d(input_channels, init_dim, 7, padding = 3)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
block_klass = partial(ResnetBlock, groups = resnet_block_groups)
# time embeddings
time_dim = dim * 4
sinu_pos_emb = LearnedSinusoidalPosEmb(learned_sinusoidal_dim)
fourier_dim = learned_sinusoidal_dim + 1
self.time_mlp = nn.Sequential(
sinu_pos_emb,
nn.Linear(fourier_dim, time_dim),
nn.GELU(),
nn.Linear(time_dim, time_dim)
)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
for ind, (dim_in, dim_out) in enumerate(in_out):
is_last = ind >= (num_resolutions - 1)
self.downs.append(nn.ModuleList([
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
Residual(PreNorm(dim_in, LinearAttention(dim_in))),
Downsample(dim_in, dim_out) if not is_last else nn.Conv2d(dim_in, dim_out, 3, padding = 1)
]))
mid_dim = dims[-1]
self.mid_block1 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
self.mid_attn = Residual(PreNorm(mid_dim, Attention(mid_dim)))
self.mid_block2 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
for ind, (dim_in, dim_out) in enumerate(reversed(in_out)):
is_last = ind == (len(in_out) - 1)
self.ups.append(nn.ModuleList([
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
Residual(PreNorm(dim_out, LinearAttention(dim_out))),
Upsample(dim_out, dim_in) if not is_last else nn.Conv2d(dim_out, dim_in, 3, padding = 1)
]))
self.final_res_block = block_klass(dim * 2, dim, time_emb_dim = time_dim)
self.final_conv = nn.Conv2d(dim, channels, 1)
def forward(self, x, time, x_self_cond = None):
x_self_cond = default(x_self_cond, lambda: torch.zeros_like(x))
x = torch.cat((x_self_cond, x), dim = 1)
x = self.init_conv(x)
r = x.clone()
t = self.time_mlp(time)
h = []
for block1, block2, attn, downsample in self.downs:
x = block1(x, t)
h.append(x)
x = block2(x, t)
x = attn(x)
h.append(x)
x = downsample(x)
x = self.mid_block1(x, t)
x = self.mid_attn(x)
x = self.mid_block2(x, t)
for block1, block2, attn, upsample in self.ups:
x = torch.cat((x, h.pop()), dim = 1)
x = block1(x, t)
x = torch.cat((x, h.pop()), dim = 1)
x = block2(x, t)
x = attn(x)
x = upsample(x)
x = torch.cat((x, r), dim = 1)
x = self.final_res_block(x, t)
return self.final_conv(x)
# chroma class
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def right_pad_dims_to(x, t):
padding_dims = x.ndim - t.ndim
if padding_dims <= 0:
return t
return t.view(*t.shape, *((1,) * padding_dims))
def beta_linear_log_snr(t):
return -torch.log(expm1(1e-4 + 10 * (t ** 2)))
def alpha_cosine_log_snr(t, s: float = 0.008):
return -log((torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** -2) - 1, eps = 1e-5) # not sure if this accounts for beta being clipped to 0.999 in discrete version
def log_snr_to_alpha_sigma(log_snr):
return torch.sqrt(torch.sigmoid(log_snr)), torch.sqrt(torch.sigmoid(-log_snr))
class Chroma(nn.Module):
def __init__(
self,
model,
*,
image_size,
timesteps = 1000,
use_ddim = False,
noise_schedule = 'cosine',
time_difference = 0.
):
super().__init__()
self.model = model
self.channels = self.model.channels
self.image_size = image_size
if noise_schedule == "linear":
self.log_snr = beta_linear_log_snr
elif noise_schedule == "cosine":
self.log_snr = alpha_cosine_log_snr
else:
raise ValueError(f'invalid noise schedule {noise_schedule}')
self.timesteps = timesteps
self.use_ddim = use_ddim
# proposed in the paper, summed to time_next
# as a way to fix a deficiency in self-conditioning and lower FID when the number of sampling timesteps is < 400
self.time_difference = time_difference
@property
def device(self):
return next(self.model.parameters()).device
def get_sampling_timesteps(self, batch, *, device):
times = torch.linspace(1., 0., self.timesteps + 1, device = device)
times = repeat(times, 't -> b t', b = batch)
times = torch.stack((times[:, :-1], times[:, 1:]), dim = 0)
times = times.unbind(dim = -1)
return times
@torch.no_grad()
def ddpm_sample(self, shape, time_difference = None):
batch, device = shape[0], self.device
time_difference = default(time_difference, self.time_difference)
time_pairs = self.get_sampling_timesteps(batch, device = device)
img = torch.randn(shape, device=device)
x_start = None
for time, time_next in tqdm(time_pairs, desc = 'sampling loop time step', total = self.timesteps):
# add the time delay
time_next = (time_next - self.time_difference).clamp(min = 0.)
noise_cond = self.log_snr(time)
# get predicted x0
x_start = self.model(img, noise_cond, x_start)
# clip x0
x_start.clamp_(-1., 1.)
# get log(snr)
log_snr = self.log_snr(time)
log_snr_next = self.log_snr(time_next)
log_snr, log_snr_next = map(partial(right_pad_dims_to, img), (log_snr, log_snr_next))
# get alpha sigma of time and next time
alpha, sigma = log_snr_to_alpha_sigma(log_snr)
alpha_next, sigma_next = log_snr_to_alpha_sigma(log_snr_next)
# derive posterior mean and variance
c = -expm1(log_snr - log_snr_next)
mean = alpha_next * (img * (1 - c) / alpha + c * x_start)
variance = (sigma_next ** 2) * c
log_variance = log(variance)
# get noise
noise = torch.where(
rearrange(time_next > 0, 'b -> b 1 1 1'),
torch.randn_like(img),
torch.zeros_like(img)
)
img = mean + (0.5 * log_variance).exp() * noise
return img
@torch.no_grad()
def ddim_sample(self, shape, time_difference = None):
batch, device = shape[0], self.device
time_difference = default(time_difference, self.time_difference)
time_pairs = self.get_sampling_timesteps(batch, device = device)
img = torch.randn(shape, device = device)
x_start = None
for times, times_next in tqdm(time_pairs, desc = 'sampling loop time step'):
# get times and noise levels
log_snr = self.log_snr(times)
log_snr_next = self.log_snr(times_next)
padded_log_snr, padded_log_snr_next = map(partial(right_pad_dims_to, img), (log_snr, log_snr_next))
alpha, sigma = log_snr_to_alpha_sigma(padded_log_snr)
alpha_next, sigma_next = log_snr_to_alpha_sigma(padded_log_snr_next)
# add the time delay
times_next = (times_next - time_difference).clamp(min = 0.)
# predict x0
x_start = self.model(img, log_snr, x_start)
# clip x0
x_start.clamp_(-1., 1.)
# get predicted noise
pred_noise = (img - alpha * x_start) / sigma.clamp(min = 1e-8)
# calculate x next
img = x_start * alpha_next + pred_noise * sigma_next
return img
@torch.no_grad()
def sample(self, batch_size = 16):
image_size, channels = self.image_size, self.channels
sample_fn = self.ddpm_sample if not self.use_ddim else self.ddim_sample
return sample_fn((batch_size, channels, image_size, image_size))
def forward(self, img, *args, **kwargs):
batch, c, h, w, device, img_size, = *img.shape, img.device, self.image_size
assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
# sample random times
times = torch.zeros((batch,), device = device).float().uniform_(0, 1.)
# noise sample
noise = torch.randn_like(img)
noise_level = self.log_snr(times)
padded_noise_level = right_pad_dims_to(img, noise_level)
alpha, sigma = log_snr_to_alpha_sigma(padded_noise_level)
noised_img = alpha * img + sigma * noise
# if doing self-conditioning, 50% of the time, predict x_start from current set of times
# and condition with unet with that
# this technique will slow down training by 25%, but seems to lower FID significantly
self_cond = None
if random() < 0.5:
with torch.no_grad():
self_cond = self.model(noised_img, noise_level).detach_()
# predict and take gradient step
pred = self.model(noised_img, noise_level, self_cond)
return F.mse_loss(pred, img)
# trainer class
class Trainer(object):
def __init__(
self,
diffusion_model,
folder,
*,
train_batch_size = 16,
gradient_accumulate_every = 1,
augment_horizontal_flip = True,
train_lr = 1e-4,
train_num_steps = 100000,
ema_update_every = 10,
ema_decay = 0.995,
adam_betas = (0.9, 0.99),
save_and_sample_every = 1000,
num_samples = 25,
results_folder = './results',
amp = False,
fp16 = False,
split_batches = True,
convert_image_to = None
):
super().__init__()
self.accelerator = Accelerator(
split_batches = split_batches,
mixed_precision = 'fp16' if fp16 else 'no'
)
self.accelerator.native_amp = amp
self.model = diffusion_model
assert has_int_squareroot(num_samples), 'number of samples must have an integer square root'
self.num_samples = num_samples
self.save_and_sample_every = save_and_sample_every
self.batch_size = train_batch_size
self.gradient_accumulate_every = gradient_accumulate_every
self.train_num_steps = train_num_steps
self.image_size = diffusion_model.image_size
# dataset and dataloader
self.ds = Dataset(folder, self.image_size, augment_horizontal_flip = augment_horizontal_flip, convert_image_to = convert_image_to)
dl = DataLoader(self.ds, batch_size = train_batch_size, shuffle = True, pin_memory = True, num_workers = cpu_count())
dl = self.accelerator.prepare(dl)
self.dl = cycle(dl)
# optimizer
self.opt = Adam(diffusion_model.parameters(), lr = train_lr, betas = adam_betas)
# for logging results in a folder periodically
if self.accelerator.is_main_process:
self.ema = EMA(diffusion_model, beta = ema_decay, update_every = ema_update_every)
self.results_folder = Path(results_folder)
self.results_folder.mkdir(exist_ok = True)
# step counter state
self.step = 0
# prepare model, dataloader, optimizer with accelerator
self.model, self.opt = self.accelerator.prepare(self.model, self.opt)
def save(self, milestone):
if not self.accelerator.is_local_main_process:
return
data = {
'step': self.step,
'model': self.accelerator.get_state_dict(self.model),
'opt': self.opt.state_dict(),
'ema': self.ema.state_dict(),
'scaler': self.accelerator.scaler.state_dict() if exists(self.accelerator.scaler) else None
}
torch.save(data, str(self.results_folder / f'model-{milestone}.pt'))
def load(self, milestone):
data = torch.load(str(self.results_folder / f'model-{milestone}.pt'))
model = self.accelerator.unwrap_model(self.model)
model.load_state_dict(data['model'])
self.step = data['step']
self.opt.load_state_dict(data['opt'])
self.ema.load_state_dict(data['ema'])
if exists(self.accelerator.scaler) and exists(data['scaler']):
self.accelerator.scaler.load_state_dict(data['scaler'])
def train(self):
accelerator = self.accelerator
device = accelerator.device
with tqdm(initial = self.step, total = self.train_num_steps, disable = not accelerator.is_main_process) as pbar:
while self.step < self.train_num_steps:
total_loss = 0.
for _ in range(self.gradient_accumulate_every):
data = next(self.dl).to(device)
with self.accelerator.autocast():
loss = self.model(data)
loss = loss / self.gradient_accumulate_every
total_loss += loss.item()
self.accelerator.backward(loss)
pbar.set_description(f'loss: {total_loss:.4f}')
accelerator.wait_for_everyone()
self.opt.step()
self.opt.zero_grad()
accelerator.wait_for_everyone()
if accelerator.is_main_process:
self.ema.to(device)
self.ema.update()
if self.step != 0 and self.step % self.save_and_sample_every == 0:
self.ema.ema_model.eval()
with torch.no_grad():
milestone = self.step // self.save_and_sample_every
batches = num_to_groups(self.num_samples, self.batch_size)
all_images_list = list(map(lambda n: self.ema.ema_model.sample(batch_size=n), batches))
all_images = torch.cat(all_images_list, dim = 0)
utils.save_image(all_images, str(self.results_folder / f'sample-{milestone}.png'), nrow = int(math.sqrt(self.num_samples)))
self.save(milestone)
self.step += 1
pbar.update(1)
accelerator.print('training complete')
|
terminate = False
def signal_handling(signum,frame):
global terminate
terminate = True
num_attempts = 4
for attempt in range(num_attempts):
dream = Imagine(
text = "an armchair in the form of pikachu\\an armchair imitating pikachu\\abstract",
text_min = "blur\\zoom",
lr = 7e-2,
image_size = 512,
gradient_accumulate_every = 1,
save_every = 50,
epochs = 5,
iterations = 50,
save_progress = False,
bilinear = False,
open_folder = False,
seed = None,
torch_deterministic = False,
max_classes = 20,
class_temperature = 2.,
save_date_time = False,
save_best = True,
experimental_resample = True,
ema_decay = 0.99
)
dream()
shutil.copy(dream.textpath + ".best.png", f"{attempt}.png")
try:
time.sleep(2)
del dream
time.sleep(2)
torch.cuda.empty_cache()
except Exception:
torch.cuda.empty_cache() |
__version__ = '0.9.1'
|
"""Good differentiable image resampling for PyTorch."""
def sinc(x):
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
def lanczos(x, a):
cond = torch.logical_and(-a < x, x < a)
out = torch.where(cond, sinc(x) * sinc(x/a), x.new_zeros([]))
return out / out.sum()
def ramp(ratio, width):
n = math.ceil(width / ratio + 1)
out = torch.empty([n])
cur = 0
for i in range(out.shape[0]):
out[i] = cur
cur += ratio
return torch.cat([-out[1:].flip([0]), out])[1:-1]
def odd(fn):
return update_wrapper(lambda x: torch.sign(x) * fn(abs(x)), fn)
def _to_linear_srgb(input):
cond = input <= 0.04045
a = input / 12.92
b = ((input + 0.055) / 1.055)**2.4
return torch.where(cond, a, b)
def _to_nonlinear_srgb(input):
cond = input <= 0.0031308
a = 12.92 * input
b = 1.055 * input**(1/2.4) - 0.055
return torch.where(cond, a, b)
to_linear_srgb = odd(_to_linear_srgb)
to_nonlinear_srgb = odd(_to_nonlinear_srgb)
def resample(input, size, align_corners=True, is_srgb=False):
n, c, h, w = input.shape
dh, dw = size
if is_srgb:
input = to_linear_srgb(input)
input = input.view([n * c, 1, h, w])
if dh < h:
kernel_h = lanczos(ramp(dh / h, 3), 3).to(input.device, input.dtype)
pad_h = (kernel_h.shape[0] - 1) // 2
input = F.pad(input, (0, 0, pad_h, pad_h), 'reflect')
input = F.conv2d(input, kernel_h[None, None, :, None])
if dw < w:
kernel_w = lanczos(ramp(dw / w, 3), 3).to(input.device, input.dtype)
pad_w = (kernel_w.shape[0] - 1) // 2
input = F.pad(input, (pad_w, pad_w, 0, 0), 'reflect')
input = F.conv2d(input, kernel_w[None, None, None, :])
input = input.view([n, c, h, w])
input = F.interpolate(input, size, mode='bicubic', align_corners=align_corners)
if is_srgb:
input = to_nonlinear_srgb(input)
return input
|
# Exponential Moving Average (from https://gist.github.com/crowsonkb/76b94d5238272722290734bf4725d204)
"""Exponential moving average for PyTorch. Adapted from
https://www.zijianhu.com/post/pytorch/ema/ by crowsonkb
"""
class EMA(nn.Module):
def __init__(self, model, decay):
super().__init__()
self.model = model
self.decay = decay
self.register_buffer('accum', torch.tensor(1.))
self._biased = deepcopy(self.model)
self.average = deepcopy(self.model)
for param in self._biased.parameters():
param.detach_().zero_()
for param in self.average.parameters():
param.detach_().zero_()
self.update()
@torch.no_grad()
def update(self):
assert self.training, 'Update should only be called during training'
self.accum *= self.decay
model_params = dict(self.model.named_parameters())
biased_params = dict(self._biased.named_parameters())
average_params = dict(self.average.named_parameters())
assert model_params.keys() == biased_params.keys() == average_params.keys(), f'Model parameter keys incompatible with EMA stored parameter keys'
for name, param in model_params.items():
biased_params[name].mul_(self.decay)
biased_params[name].add_((1 - self.decay) * param)
average_params[name].copy_(biased_params[name])
average_params[name].div_(1 - self.accum)
model_buffers = dict(self.model.named_buffers())
biased_buffers = dict(self._biased.named_buffers())
average_buffers = dict(self.average.named_buffers())
assert model_buffers.keys() == biased_buffers.keys() == average_buffers.keys()
for name, buffer in model_buffers.items():
biased_buffers[name].copy_(buffer)
average_buffers[name].copy_(buffer)
def forward(self, *args, **kwargs):
if self.training:
return self.model(*args, **kwargs)
return self.average(*args, **kwargs)
|
# this code is a copy from huggingface
# with some minor modifications
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BIGGAN_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BIGGAN_CACHE',
Path.home() / '.pytorch_pretrained_biggan'))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BIGGAN_CACHE = os.getenv('PYTORCH_PRETRAINED_BIGGAN_CACHE',
os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_biggan'))
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PRETRAINED_MODEL_ARCHIVE_MAP = {
'biggan-deep-128': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-128-pytorch_model.bin",
'biggan-deep-256': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-256-pytorch_model.bin",
'biggan-deep-512': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-512-pytorch_model.bin",
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
'biggan-deep-128': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-128-config.json",
'biggan-deep-256': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-256-config.json",
'biggan-deep-512': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-512-config.json",
}
WEIGHTS_NAME = 'pytorch_model.bin'
CONFIG_NAME = 'config.json'
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w', encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
class BigGANConfig(object):
""" Configuration class to store the configuration of a `BigGAN`.
Defaults are for the 128x128 model.
layers tuple are (up-sample in the layer ?, input channels, output channels)
"""
def __init__(self,
output_dim=128,
z_dim=128,
class_embed_dim=128,
channel_width=128,
num_classes=1000,
layers=[(False, 16, 16),
(True, 16, 16),
(False, 16, 16),
(True, 16, 8),
(False, 8, 8),
(True, 8, 4),
(False, 4, 4),
(True, 4, 2),
(False, 2, 2),
(True, 2, 1)],
attention_layer_position=8,
eps=1e-4,
n_stats=51):
"""Constructs BigGANConfig. """
self.output_dim = output_dim
self.z_dim = z_dim
self.class_embed_dim = class_embed_dim
self.channel_width = channel_width
self.num_classes = num_classes
self.layers = layers
self.attention_layer_position = attention_layer_position
self.eps = eps
self.n_stats = n_stats
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BigGANConfig` from a Python dictionary of parameters."""
config = BigGANConfig()
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BigGANConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def snconv2d(eps=1e-12, **kwargs):
return nn.utils.spectral_norm(nn.Conv2d(**kwargs), eps=eps)
def snlinear(eps=1e-12, **kwargs):
return nn.utils.spectral_norm(nn.Linear(**kwargs), eps=eps)
def sn_embedding(eps=1e-12, **kwargs):
return nn.utils.spectral_norm(nn.Embedding(**kwargs), eps=eps)
class SelfAttn(nn.Module):
""" Self attention Layer"""
def __init__(self, in_channels, eps=1e-12):
super(SelfAttn, self).__init__()
self.in_channels = in_channels
self.snconv1x1_theta = snconv2d(in_channels=in_channels, out_channels=in_channels//8,
kernel_size=1, bias=False, eps=eps)
self.snconv1x1_phi = snconv2d(in_channels=in_channels, out_channels=in_channels//8,
kernel_size=1, bias=False, eps=eps)
self.snconv1x1_g = snconv2d(in_channels=in_channels, out_channels=in_channels//2,
kernel_size=1, bias=False, eps=eps)
self.snconv1x1_o_conv = snconv2d(in_channels=in_channels//2, out_channels=in_channels,
kernel_size=1, bias=False, eps=eps)
self.maxpool = nn.MaxPool2d(2, stride=2, padding=0)
self.softmax = nn.Softmax(dim=-1)
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, x):
_, ch, h, w = x.size()
# Theta path
theta = self.snconv1x1_theta(x)
theta = theta.view(-1, ch//8, h*w)
# Phi path
phi = self.snconv1x1_phi(x)
phi = self.maxpool(phi)
phi = phi.view(-1, ch//8, h*w//4)
# Attn map
attn = torch.bmm(theta.permute(0, 2, 1), phi)
attn = self.softmax(attn)
# g path
g = self.snconv1x1_g(x)
g = self.maxpool(g)
g = g.view(-1, ch//2, h*w//4)
# Attn_g - o_conv
attn_g = torch.bmm(g, attn.permute(0, 2, 1))
attn_g = attn_g.view(-1, ch//2, h, w)
attn_g = self.snconv1x1_o_conv(attn_g)
# Out
out = x + self.gamma*attn_g
return out
class BigGANBatchNorm(nn.Module):
""" This is a batch norm module that can handle conditional input and can be provided with pre-computed
activation means and variances for various truncation parameters.
We cannot just rely on torch.batch_norm since it cannot handle
batched weights (pytorch 1.0.1). We computate batch_norm our-self without updating running means and variances.
If you want to train this model you should add running means and variance computation logic.
"""
def __init__(self, num_features, condition_vector_dim=None, n_stats=51, eps=1e-4, conditional=True):
super(BigGANBatchNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.conditional = conditional
# We use pre-computed statistics for n_stats values of truncation between 0 and 1
self.register_buffer('running_means', torch.zeros(n_stats, num_features))
self.register_buffer('running_vars', torch.ones(n_stats, num_features))
self.step_size = 1.0 / (n_stats - 1)
if conditional:
assert condition_vector_dim is not None
self.scale = snlinear(in_features=condition_vector_dim, out_features=num_features, bias=False, eps=eps)
self.offset = snlinear(in_features=condition_vector_dim, out_features=num_features, bias=False, eps=eps)
else:
self.weight = torch.nn.Parameter(torch.Tensor(num_features))
self.bias = torch.nn.Parameter(torch.Tensor(num_features))
def forward(self, x, truncation, condition_vector=None):
# Retreive pre-computed statistics associated to this truncation
coef, start_idx = math.modf(truncation / self.step_size)
start_idx = int(start_idx)
if coef != 0.0: # Interpolate
running_mean = self.running_means[start_idx] * coef + self.running_means[start_idx + 1] * (1 - coef)
running_var = self.running_vars[start_idx] * coef + self.running_vars[start_idx + 1] * (1 - coef)
else:
running_mean = self.running_means[start_idx]
running_var = self.running_vars[start_idx]
if self.conditional:
running_mean = running_mean.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
running_var = running_var.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
weight = 1 + self.scale(condition_vector).unsqueeze(-1).unsqueeze(-1)
bias = self.offset(condition_vector).unsqueeze(-1).unsqueeze(-1)
out = (x - running_mean) / torch.sqrt(running_var + self.eps) * weight + bias
else:
out = F.batch_norm(x, running_mean, running_var, self.weight, self.bias,
training=False, momentum=0.0, eps=self.eps)
return out
class GenBlock(nn.Module):
def __init__(self, in_size, out_size, condition_vector_dim, reduction_factor=4, up_sample=False,
n_stats=51, eps=1e-12):
super(GenBlock, self).__init__()
self.up_sample = up_sample
self.drop_channels = (in_size != out_size)
middle_size = in_size // reduction_factor
self.bn_0 = BigGANBatchNorm(in_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
self.conv_0 = snconv2d(in_channels=in_size, out_channels=middle_size, kernel_size=1, eps=eps)
self.bn_1 = BigGANBatchNorm(middle_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
self.conv_1 = snconv2d(in_channels=middle_size, out_channels=middle_size, kernel_size=3, padding=1, eps=eps)
self.bn_2 = BigGANBatchNorm(middle_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
self.conv_2 = snconv2d(in_channels=middle_size, out_channels=middle_size, kernel_size=3, padding=1, eps=eps)
self.bn_3 = BigGANBatchNorm(middle_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
self.conv_3 = snconv2d(in_channels=middle_size, out_channels=out_size, kernel_size=1, eps=eps)
self.relu = nn.ReLU()
def forward(self, x, cond_vector, truncation):
x0 = x
x = self.bn_0(x, truncation, cond_vector)
x = self.relu(x)
x = self.conv_0(x)
x = self.bn_1(x, truncation, cond_vector)
x = self.relu(x)
if self.up_sample:
x = F.interpolate(x, scale_factor=2, mode='nearest')
x = self.conv_1(x)
x = self.bn_2(x, truncation, cond_vector)
x = self.relu(x)
x = self.conv_2(x)
x = self.bn_3(x, truncation, cond_vector)
x = self.relu(x)
x = self.conv_3(x)
if self.drop_channels:
new_channels = x0.shape[1] // 2
x0 = x0[:, :new_channels, ...]
if self.up_sample:
x0 = F.interpolate(x0, scale_factor=2, mode='nearest')
out = x + x0
return out
class Generator(nn.Module):
def __init__(self, config):
super(Generator, self).__init__()
self.config = config
ch = config.channel_width
condition_vector_dim = config.z_dim * 2
self.gen_z = snlinear(in_features=condition_vector_dim,
out_features=4 * 4 * 16 * ch, eps=config.eps)
layers = []
for i, layer in enumerate(config.layers):
if i == config.attention_layer_position:
layers.append(SelfAttn(ch*layer[1], eps=config.eps))
layers.append(GenBlock(ch*layer[1],
ch*layer[2],
condition_vector_dim,
up_sample=layer[0],
n_stats=config.n_stats,
eps=config.eps))
self.layers = nn.ModuleList(layers)
self.bn = BigGANBatchNorm(ch, n_stats=config.n_stats, eps=config.eps, conditional=False)
self.relu = nn.ReLU()
self.conv_to_rgb = snconv2d(in_channels=ch, out_channels=ch, kernel_size=3, padding=1, eps=config.eps)
self.tanh = nn.Tanh()
def forward(self, cond_vector, truncation):
z = self.gen_z(cond_vector[0].unsqueeze(0))
# We use this conversion step to be able to use TF weights:
# TF convention on shape is [batch, height, width, channels]
# PT convention on shape is [batch, channels, height, width]
z = z.view(-1, 4, 4, 16 * self.config.channel_width)
z = z.permute(0, 3, 1, 2).contiguous()
next_available_latent_index = 1
for layer in self.layers:
if isinstance(layer, GenBlock):
z = layer(z, cond_vector[next_available_latent_index].unsqueeze(0), truncation)
next_available_latent_index += 1
else:
z = layer(z)
z = self.bn(z, truncation)
z = self.relu(z)
z = self.conv_to_rgb(z)
z = z[:, :3, ...]
z = self.tanh(z)
return z
class BigGAN(nn.Module):
"""BigGAN Generator."""
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
model_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
model_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
try:
resolved_model_file = cached_path(model_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error("Wrong model name, should be a valid path to a folder containing "
"a {} file and a {} file or a model name in {}".format(
WEIGHTS_NAME, CONFIG_NAME, PRETRAINED_MODEL_ARCHIVE_MAP.keys()))
raise
logger.info("loading model {} from cache at {}".format(pretrained_model_name_or_path, resolved_model_file))
# Load config
config = BigGANConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
state_dict = torch.load(resolved_model_file, map_location='cpu' if not torch.cuda.is_available() else None)
model.load_state_dict(state_dict, strict=False)
return model
def __init__(self, config):
super(BigGAN, self).__init__()
self.config = config
self.embeddings = nn.Linear(config.num_classes, config.z_dim, bias=False)
self.generator = Generator(config)
def forward(self, z, class_label, truncation):
assert 0 < truncation <= 1
embed = self.embeddings(class_label)
cond_vector = torch.cat((z, embed), dim=1)
z = self.generator(cond_vector, truncation)
return z
|
def train(
text=None,
img=None,
text_min="",
lr = .07,
image_size = 512,
gradient_accumulate_every = 1,
epochs = 20,
iterations = 1050,
save_every = 50,
overwrite = False,
save_progress = False,
save_date_time = False,
bilinear = False,
open_folder = True,
seed = 0,
append_seed = False,
random = False,
torch_deterministic = False,
max_classes = None,
class_temperature = 2.,
save_best = False,
experimental_resample = False,
ema_decay = 0.5,
num_cutouts = 128,
center_bias = False,
larger_model = False
):
print(f'Starting up... v{__version__}')
if random:
seed = rnd.randint(0, 1e6)
imagine = Imagine(
text=text,
img=img,
text_min=text_min,
lr = lr,
image_size = image_size,
gradient_accumulate_every = gradient_accumulate_every,
epochs = epochs,
iterations = iterations,
save_every = save_every,
save_progress = save_progress,
bilinear = bilinear,
seed = seed,
append_seed = append_seed,
torch_deterministic = torch_deterministic,
open_folder = open_folder,
max_classes = max_classes,
class_temperature = class_temperature,
save_date_time = save_date_time,
save_best = save_best,
experimental_resample = experimental_resample,
ema_decay = ema_decay,
num_cutouts = num_cutouts,
center_bias = center_bias,
larger_clip = larger_model
)
if not overwrite and imagine.filename.exists():
answer = input('Imagined image already exists, do you want to overwrite? (y/n) ').lower()
if answer not in ('yes', 'y'):
exit()
imagine()
def main():
fire.Fire(train)
|
assert torch.cuda.is_available(), 'CUDA must be available in order to use Big Sleep'
# graceful keyboard interrupt
terminate = False
def signal_handling(signum,frame):
print('detecting keyboard interrupt, gracefully exiting')
global terminate
terminate = True
signal.signal(signal.SIGINT,signal_handling)
# helpers
def exists(val):
return val is not None
def open_folder(path):
if os.path.isfile(path):
path = os.path.dirname(path)
if not os.path.isdir(path):
return
cmd_list = None
if sys.platform == 'darwin':
cmd_list = ['open', '--', path]
elif sys.platform == 'linux2' or sys.platform == 'linux':
cmd_list = ['xdg-open', path]
elif sys.platform in ['win32', 'win64']:
cmd_list = ['explorer', path.replace('/','\\')]
if cmd_list == None:
return
try:
subprocess.check_call(cmd_list)
except subprocess.CalledProcessError:
pass
except OSError:
pass
def create_text_path(text=None, img=None, encoding=None):
input_name = ""
if text is not None:
input_name += text
if img is not None:
if isinstance(img, str):
img_name = "".join(img.split(".")[:-1]) # replace spaces by underscores, remove img extension
img_name = img_name.split("/")[-1] # only take img name, not path
else:
img_name = "PIL_img"
input_name += "_" + img_name
if encoding is not None:
input_name = "your_encoding"
return input_name.replace("-", "_").replace(",", "").replace(" ", "_").replace("|", "--").strip('-_')[:255]
# tensor helpers
def differentiable_topk(x, k, temperature=1.):
n, dim = x.shape
topk_tensors = []
for i in range(k):
is_last = i == (k - 1)
values, indices = (x / temperature).softmax(dim=-1).topk(1, dim=-1)
topks = torch.zeros_like(x).scatter_(-1, indices, values)
topk_tensors.append(topks)
if not is_last:
x = x.scatter(-1, indices, float('-inf'))
topks = torch.cat(topk_tensors, dim=-1)
return topks.reshape(n, k, dim).sum(dim = 1)
def create_clip_img_transform(image_width):
clip_mean = [0.48145466, 0.4578275, 0.40821073]
clip_std = [0.26862954, 0.26130258, 0.27577711]
transform = T.Compose([
#T.ToPILImage(),
T.Resize(image_width),
T.CenterCrop((image_width, image_width)),
T.ToTensor(),
T.Normalize(mean=clip_mean, std=clip_std)
])
return transform
def rand_cutout(image, size, center_bias=False, center_focus=2):
width = image.shape[-1]
min_offset = 0
max_offset = width - size
if center_bias:
# sample around image center
center = max_offset / 2
std = center / center_focus
offset_x = int(random.gauss(mu=center, sigma=std))
offset_y = int(random.gauss(mu=center, sigma=std))
# resample uniformly if over boundaries
offset_x = random.randint(min_offset, max_offset) if (offset_x > max_offset or offset_x < min_offset) else offset_x
offset_y = random.randint(min_offset, max_offset) if (offset_y > max_offset or offset_y < min_offset) else offset_y
else:
offset_x = random.randint(min_offset, max_offset)
offset_y = random.randint(min_offset, max_offset)
cutout = image[:, :, offset_x:offset_x + size, offset_y:offset_y + size]
return cutout
# load biggan
class Latents(torch.nn.Module):
def __init__(
self,
num_latents = 15,
num_classes = 1000,
z_dim = 128,
max_classes = None,
class_temperature = 2.
):
super().__init__()
self.normu = torch.nn.Parameter(torch.zeros(num_latents, z_dim).normal_(std = 1))
self.cls = torch.nn.Parameter(torch.zeros(num_latents, num_classes).normal_(mean = -3.9, std = .3))
self.register_buffer('thresh_lat', torch.tensor(1))
assert not exists(max_classes) or max_classes > 0 and max_classes <= num_classes, f'max_classes must be between 0 and {num_classes}'
self.max_classes = max_classes
self.class_temperature = class_temperature
def forward(self):
if exists(self.max_classes):
classes = differentiable_topk(self.cls, self.max_classes, temperature = self.class_temperature)
else:
classes = torch.sigmoid(self.cls)
return self.normu, classes
class Model(nn.Module):
def __init__(
self,
image_size,
max_classes = None,
class_temperature = 2.,
ema_decay = 0.99
):
super().__init__()
assert image_size in (128, 256, 512), 'image size must be one of 128, 256, or 512'
self.biggan = BigGAN.from_pretrained(f'biggan-deep-{image_size}')
self.max_classes = max_classes
self.class_temperature = class_temperature
self.ema_decay\
= ema_decay
self.init_latents()
def init_latents(self):
latents = Latents(
num_latents = len(self.biggan.config.layers) + 1,
num_classes = self.biggan.config.num_classes,
z_dim = self.biggan.config.z_dim,
max_classes = self.max_classes,
class_temperature = self.class_temperature
)
self.latents = EMA(latents, self.ema_decay)
def forward(self):
self.biggan.eval()
out = self.biggan(*self.latents(), 1)
return (out + 1) / 2
class BigSleep(nn.Module):
def __init__(
self,
num_cutouts = 128,
loss_coef = 100,
image_size = 512,
bilinear = False,
max_classes = None,
class_temperature = 2.,
experimental_resample = False,
ema_decay = 0.99,
center_bias = False,
larger_clip = False
):
super().__init__()
self.loss_coef = loss_coef
self.image_size = image_size
self.num_cutouts = num_cutouts
self.experimental_resample = experimental_resample
self.center_bias = center_bias
self.interpolation_settings = {'mode': 'bilinear', 'align_corners': False} if bilinear else {'mode': 'nearest'}
model_name = 'ViT-B/32' if not larger_clip else 'ViT-L/14'
self.perceptor, self.normalize_image = load(model_name, jit = False)
self.model = Model(
image_size = image_size,
max_classes = max_classes,
class_temperature = class_temperature,
ema_decay = ema_decay
)
def reset(self):
self.model.init_latents()
def sim_txt_to_img(self, text_embed, img_embed, text_type="max"):
sign = -1
if text_type == "min":
sign = 1
return sign * self.loss_coef * torch.cosine_similarity(text_embed, img_embed, dim = -1).mean()
def forward(self, text_embeds, text_min_embeds=[], return_loss = True):
width, num_cutouts = self.image_size, self.num_cutouts
out = self.model()
if not return_loss:
return out
pieces = []
for ch in range(num_cutouts):
# sample cutout size
size = int(width * torch.zeros(1,).normal_(mean=.8, std=.3).clip(.5, .95))
# get cutout
apper = rand_cutout(out, size, center_bias=self.center_bias)
if (self.experimental_resample):
apper = resample(apper, (224, 224))
else:
apper = F.interpolate(apper, (224, 224), **self.interpolation_settings)
pieces.append(apper)
into = torch.cat(pieces)
into = self.normalize_image(into)
image_embed = self.perceptor.encode_image(into)
latents, soft_one_hot_classes = self.model.latents()
num_latents = latents.shape[0]
latent_thres = self.model.latents.model.thresh_lat
lat_loss = torch.abs(1 - torch.std(latents, dim=1)).mean() + \
torch.abs(torch.mean(latents, dim = 1)).mean() + \
4 * torch.max(torch.square(latents).mean(), latent_thres)
for array in latents:
mean = torch.mean(array)
diffs = array - mean
var = torch.mean(torch.pow(diffs, 2.0))
std = torch.pow(var, 0.5)
zscores = diffs / std
skews = torch.mean(torch.pow(zscores, 3.0))
kurtoses = torch.mean(torch.pow(zscores, 4.0)) - 3.0
lat_loss = lat_loss + torch.abs(kurtoses) / num_latents + torch.abs(skews) / num_latents
cls_loss = ((50 * torch.topk(soft_one_hot_classes, largest = False, dim = 1, k = 999)[0]) ** 2).mean()
results = []
for txt_embed in text_embeds:
results.append(self.sim_txt_to_img(txt_embed, image_embed))
for txt_min_embed in text_min_embeds:
results.append(self.sim_txt_to_img(txt_min_embed, image_embed, "min"))
sim_loss = sum(results).mean()
return out, (lat_loss, cls_loss, sim_loss)
class Imagine(nn.Module):
def __init__(
self,
*,
text=None,
img=None,
encoding=None,
text_min = "",
lr = .07,
image_size = 512,
gradient_accumulate_every = 1,
save_every = 50,
epochs = 20,
iterations = 1050,
save_progress = False,
bilinear = False,
open_folder = True,
seed = None,
append_seed = False,
torch_deterministic = False,
max_classes = None,
class_temperature = 2.,
save_date_time = False,
save_best = False,
experimental_resample = False,
ema_decay = 0.99,
num_cutouts = 128,
center_bias = False,
larger_clip = False
):
super().__init__()
if torch_deterministic:
assert not bilinear, 'the deterministic (seeded) operation does not work with interpolation (PyTorch 1.7.1)'
torch.set_deterministic(True)
self.seed = seed
self.append_seed = append_seed
if exists(seed):
print(f'setting seed of {seed}')
if seed == 0:
print('you can override this with --seed argument in the command line, or --random for a randomly chosen one')
torch.manual_seed(seed)
self.epochs = epochs
self.iterations = iterations
model = BigSleep(
image_size = image_size,
bilinear = bilinear,
max_classes = max_classes,
class_temperature = class_temperature,
experimental_resample = experimental_resample,
ema_decay = ema_decay,
num_cutouts = num_cutouts,
center_bias = center_bias,
larger_clip = larger_clip
).cuda()
self.model = model
self.lr = lr
self.optimizer = Adam(model.model.latents.model.parameters(), lr)
self.gradient_accumulate_every = gradient_accumulate_every
self.save_every = save_every
self.save_progress = save_progress
self.save_date_time = save_date_time
self.save_best = save_best
self.current_best_score = 0
self.open_folder = open_folder
self.total_image_updates = (self.epochs * self.iterations) / self.save_every
self.encoded_texts = {
"max": [],
"min": []
}
# create img transform
self.clip_transform = create_clip_img_transform(224)
# create starting encoding
self.set_clip_encoding(text=text, img=img, encoding=encoding, text_min=text_min)
@property
def seed_suffix(self):
return f'.{self.seed}' if self.append_seed and exists(self.seed) else ''
def set_text(self, text):
self.set_clip_encoding(text = text)
def create_clip_encoding(self, text=None, img=None, encoding=None):
self.text = text
self.img = img
if encoding is not None:
encoding = encoding.cuda()
#elif self.create_story:
# encoding = self.update_story_encoding(epoch=0, iteration=1)
elif text is not None and img is not None:
encoding = (self.create_text_encoding(text) + self.create_img_encoding(img)) / 2
elif text is not None:
encoding = self.create_text_encoding(text)
elif img is not None:
encoding = self.create_img_encoding(img)
return encoding
def create_text_encoding(self, text):
tokenized_text = tokenize(text).cuda()
with torch.no_grad():
text_encoding = self.model.perceptor.encode_text(tokenized_text).detach()
return text_encoding
def create_img_encoding(self, img):
if isinstance(img, str):
img = Image.open(img)
normed_img = self.clip_transform(img).unsqueeze(0).cuda()
with torch.no_grad():
img_encoding = self.model.perceptor.encode_image(normed_img).detach()
return img_encoding
def encode_multiple_phrases(self, text, img=None, encoding=None, text_type="max"):
if text is not None and "|" in text:
self.encoded_texts[text_type] = [self.create_clip_encoding(text=prompt_min, img=img, encoding=encoding) for prompt_min in text.split("|")]
else:
self.encoded_texts[text_type] = [self.create_clip_encoding(text=text, img=img, encoding=encoding)]
def encode_max_and_min(self, text, img=None, encoding=None, text_min=""):
self.encode_multiple_phrases(text, img=img, encoding=encoding)
if text_min is not None and text_min != "":
self.encode_multiple_phrases(text_min, img=img, encoding=encoding, text_type="min")
def set_clip_encoding(self, text=None, img=None, encoding=None, text_min=""):
self.current_best_score = 0
self.text = text
self.text_min = text_min
if len(text_min) > 0:
text = text + "_wout_" + text_min[:255] if text is not None else "wout_" + text_min[:255]
text_path = create_text_path(text=text, img=img, encoding=encoding)
if self.save_date_time:
text_path = datetime.now().strftime("%y%m%d-%H%M%S-") + text_path
self.text_path = text_path
self.filename = Path(f'./{text_path}{self.seed_suffix}.png')
self.encode_max_and_min(text, img=img, encoding=encoding, text_min=text_min) # Tokenize and encode each prompt
def reset(self):
self.model.reset()
self.model = self.model.cuda()
self.optimizer = Adam(self.model.model.latents.parameters(), self.lr)
def train_step(self, epoch, i, pbar=None):
total_loss = 0
for _ in range(self.gradient_accumulate_every):
out, losses = self.model(self.encoded_texts["max"], self.encoded_texts["min"])
loss = sum(losses) / self.gradient_accumulate_every
total_loss += loss
loss.backward()
self.optimizer.step()
self.model.model.latents.update()
self.optimizer.zero_grad()
if (i + 1) % self.save_every == 0:
with torch.no_grad():
self.model.model.latents.eval()
out, losses = self.model(self.encoded_texts["max"], self.encoded_texts["min"])
top_score, best = torch.topk(losses[2], k=1, largest=False)
image = self.model.model()[best].cpu()
self.model.model.latents.train()
save_image(image, str(self.filename))
if pbar is not None:
pbar.update(1)
else:
print(f'image updated at "./{str(self.filename)}"')
if self.save_progress:
total_iterations = epoch * self.iterations + i
num = total_iterations // self.save_every
save_image(image, Path(f'./{self.text_path}.{num}{self.seed_suffix}.png'))
if self.save_best and top_score.item() < self.current_best_score:
self.current_best_score = top_score.item()
save_image(image, Path(f'./{self.text_path}{self.seed_suffix}.best.png'))
return out, total_loss
def forward(self):
penalizing = ""
if len(self.text_min) > 0:
penalizing = f'penalizing "{self.text_min}"'
print(f'Imagining "{self.text_path}" {penalizing}...')
with torch.no_grad():
self.model(self.encoded_texts["max"][0]) # one warmup step due to issue with CLIP and CUDA
if self.open_folder:
open_folder('./')
self.open_folder = False
image_pbar = tqdm(total=self.total_image_updates, desc='image update', position=2, leave=True)
epoch_pbar = trange(self.epochs, desc = ' epochs', position=0, leave=True)
for epoch in (ep for ep in epoch_pbar if not terminate):
pbar = trange(self.iterations, desc=' iteration', position=1, leave=True)
image_pbar.update(0)
for i in (it for it in pbar if not terminate):
out, loss = self.train_step(epoch, i, image_pbar)
pbar.set_description(f'loss: {loss.item():04.2f}')
|
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt"
}
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _transform():
return Compose([
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name])
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform()
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform()
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width
)
else:
vision_heads = vision_width // 64
self.visual = VisualTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/bpe_simple_vocab_16e6.txt")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = Path(bpe_path).read_text(encoding='utf8').split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
import gzip
_tokenizer = SimpleTokenizer()
|
class AxialPositionalEmbedding(nn.Module):
def __init__(self, dim, axial_shape, axial_dims = None):
super().__init__()
self.dim = dim
self.shape = axial_shape
self.max_seq_len = reduce(mul, axial_shape, 1)
self.summed = axial_dims is None
axial_dims = ((dim,) * len(axial_shape)) if self.summed else axial_dims
assert len(self.shape) == len(axial_dims), 'number of axial dimensions must equal the number of dimensions in the shape'
assert self.summed or not self.summed and sum(axial_dims) == dim, f'axial dimensions must sum up to the target dimension {dim}'
self.weights = ParameterList(self, 'weights', len(axial_shape))
for ind, (shape, axial_dim) in enumerate(zip(self.shape, axial_dims)):
ax_shape = [1] * len(self.shape)
ax_shape[ind] = shape
ax_shape = (1, *ax_shape, axial_dim)
ax_emb = nn.Parameter(torch.zeros(ax_shape).normal_(0, 1))
self.weights.append(ax_emb)
def forward(self, x):
b, t, e = x.shape
assert (t <= self.max_seq_len), f'Sequence length ({t}) must be less than the maximum sequence length allowed ({self.max_seq_len})'
embs = []
for ax_emb in self.weights.to_list():
axial_dim = ax_emb.shape[-1]
expand_shape = (b, *self.shape, axial_dim)
emb = ax_emb.expand(expand_shape).reshape(b, self.max_seq_len, axial_dim)
embs.append(emb)
pos_emb = sum(embs) if self.summed else torch.cat(embs, dim=-1)
return pos_emb[:, :t].to(x)
# a mock parameter list object until below issue is resolved
# https://github.com/pytorch/pytorch/issues/36035
class ParameterList(object):
def __init__(self, kls, prefix, length):
self.ind = 0
self.kls = kls
self.prefix = prefix
self.length = length
def _keyname(self, prefix, ind):
return f'{prefix}_{ind}'
def append(self, x):
setattr(self.kls, self._keyname(self.prefix, self.ind), x)
self.ind += 1
def to_list(self):
return [getattr(self.kls, self._keyname(self.prefix, i)) for i in range(self.length)]
# Axial Positional Embedding for Images
class AxialPositionalEmbeddingImage(nn.Module):
def __init__(self, dim, axial_shape, axial_dims = None):
super().__init__()
assert len(axial_shape) == 2, 'Axial shape must have 2 dimensions for images'
self.pos_emb = AxialPositionalEmbedding(dim, axial_shape, axial_dims)
def forward(self, img):
b, c, h, w = img.shape
img = img.permute(0, 2, 3, 1).reshape(b, h * w, c)
pos_emb = self.pos_emb(img)
return pos_emb.reshape(b, h, w, c).permute(0, 3, 1, 2)
|
# constants
DEVICE = None # defaults to cuda if available, else cpu
NUM_BATCHES = int(1e5)
GRADIENT_ACCUMULATE_EVERY = 16
LEARNING_RATE = 3e-4
IGNORE_INDEX = -100
THRESHOLD_LENGTH = 250
# set device
DISTOGRAM_BUCKETS = constants.DISTOGRAM_BUCKETS
DEVICE = constants.DEVICE
# helpers
def cycle(loader, cond = lambda x: True):
while True:
for data in loader:
if not cond(data):
continue
yield data
# get data
data = scn.load(
casp_version = 12,
thinning = 30,
with_pytorch = 'dataloaders',
batch_size = 1,
dynamic_batching = False
)
data = iter(data['train'])
data_cond = lambda t: t[1].shape[1] < THRESHOLD_LENGTH
dl = cycle(data, data_cond)
# model
model = Alphafold2(
dim = 256,
depth = 1,
heads = 8,
dim_head = 64
).to(DEVICE)
# optimizer
optim = Adam(model.parameters(), lr = LEARNING_RATE)
# training loop
for _ in range(NUM_BATCHES):
for _ in range(GRADIENT_ACCUMULATE_EVERY):
batch = next(dl)
seq, coords, mask = batch.seqs, batch.crds, batch.msks
b, l, _ = seq.shape
# prepare mask, labels
seq, coords, mask = seq.argmax(dim = -1).to(DEVICE), coords.to(DEVICE), mask.to(DEVICE).bool()
coords = rearrange(coords, 'b (l c) d -> b l c d', l = l)
discretized_distances = get_bucketed_distance_matrix(coords[:, :, 1], mask, DISTOGRAM_BUCKETS, IGNORE_INDEX)
# predict
distogram = model(seq, mask = mask)
distogram = rearrange(distogram, 'b i j c -> b c i j')
# loss
loss = F.cross_entropy(
distogram,
discretized_distances,
ignore_index = IGNORE_INDEX
)
loss.backward()
print('loss:', loss.item())
optim.step()
optim.zero_grad()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.