code stringlengths 17 6.64M |
|---|
class JunctionTree():
' A JunctionTree is a transformation of a GraphicalModel into a tree structure. It is used\n to find the maximal cliques in the graphical model, and for specifying the message passing\n order for belief propagation. The JunctionTree is characterized by an elimination_order,\n which is chosen greedily by default, but may be passed in if desired.\n '
def __init__(self, domain, cliques, elimination_order=None):
self.cliques = [tuple(cl) for cl in cliques]
self.domain = domain
self.graph = self._make_graph()
(self.tree, self.order) = self._make_tree(elimination_order)
def maximal_cliques(self):
' return the list of maximal cliques in the model '
return list(nx.dfs_preorder_nodes(self.tree))
def mp_order(self):
' return a valid message passing order '
edges = set()
messages = ([(a, b) for (a, b) in self.tree.edges()] + [(b, a) for (a, b) in self.tree.edges()])
for m1 in messages:
for m2 in messages:
if ((m1[1] == m2[0]) and (m1[0] != m2[1])):
edges.add((m1, m2))
G = nx.DiGraph()
G.add_nodes_from(messages)
G.add_edges_from(edges)
return list(nx.topological_sort(G))
def separator_axes(self):
return {(i, j): tuple((set(i) & set(j))) for (i, j) in self.mp_order()}
def neighbors(self):
return {i: set(self.tree.neighbors(i)) for i in self.maximal_cliques()}
def _make_graph(self):
G = nx.Graph()
G.add_nodes_from(self.domain.attrs)
for cl in self.cliques:
G.add_edges_from(itertools.combinations(cl, 2))
return G
def _triangulated(self, order):
edges = set()
G = nx.Graph(self.graph)
for node in order:
tmp = set(itertools.combinations(G.neighbors(node), 2))
edges |= tmp
G.add_edges_from(tmp)
G.remove_node(node)
tri = nx.Graph(self.graph)
tri.add_edges_from(edges)
cliques = [tuple(c) for c in nx.find_cliques(tri)]
cost = sum((self.domain.project(cl).size() for cl in cliques))
return (tri, cost)
def _greedy_order(self, stochastic=True):
order = []
(domain, cliques) = (self.domain, self.cliques)
unmarked = list(domain.attrs)
cliques = set(cliques)
total_cost = 0
for k in range(len(domain)):
cost = OrderedDict()
for a in unmarked:
neighbors = list(filter((lambda cl: (a in cl)), cliques))
variables = tuple(set.union(set(), *map(set, neighbors)))
newdom = domain.project(variables)
cost[a] = newdom.size()
if stochastic:
choices = list(unmarked)
costs = np.array([cost[a] for a in choices], dtype=float)
probas = ((np.max(costs) - costs) + 1)
probas /= probas.sum()
i = np.random.choice(probas.size, p=probas)
a = choices[i]
else:
a = min(cost, key=(lambda a: cost[a]))
order.append(a)
unmarked.remove(a)
neighbors = list(filter((lambda cl: (a in cl)), cliques))
variables = tuple((set.union(set(), *map(set, neighbors)) - {a}))
cliques -= set(neighbors)
cliques.add(variables)
total_cost += cost[a]
return (order, total_cost)
def _make_tree(self, order=None):
if (order is None):
order = self._greedy_order(stochastic=False)[0]
elif (type(order) is int):
orders = ([self._greedy_order(stochastic=False)] + [self._greedy_order(stochastic=True) for _ in range(order)])
order = min(orders, key=(lambda x: x[1]))[0]
self.elimination_order = order
(tri, cost) = self._triangulated(order)
cliques = sorted([self.domain.canonical(c) for c in nx.find_cliques(tri)])
complete = nx.Graph()
complete.add_nodes_from(cliques)
for (c1, c2) in itertools.combinations(cliques, 2):
wgt = len((set(c1) & set(c2)))
complete.add_edge(c1, c2, weight=(- wgt))
spanning = nx.minimum_spanning_tree(complete)
return (spanning, order)
|
class LocalInference():
def __init__(self, domain, backend='numpy', structural_zeros={}, metric='L2', log=False, iters=1000, warm_start=False, marginal_oracle='convex', inner_iters=1):
'\n Class for learning a GraphicalModel from noisy measurements on a data distribution\n \n :param domain: The domain information (A Domain object)\n :param backend: numpy or torch backend\n :param structural_zeros: An encoding of the known (structural) zeros in the distribution.\n Specified as a dictionary where \n - each key is a subset of attributes of size r\n - each value is a list of r-tuples corresponding to impossible attribute settings\n :param metric: The optimization metric. May be L1, L2 or a custom callable function\n - custom callable function must consume the marginals and produce the loss and gradient\n - see FactoredInference._marginal_loss for more information\n :param log: flag to log iterations of optimization\n :param iters: number of iterations to optimize for\n :param warm_start: initialize new model or reuse last model when calling infer multiple times\n :param marginal_oracle: One of\n - convex (Region graph, convex Kikuchi entropy)\n - approx (Region graph, Kikuchi entropy)\n - pairwise-convex (Factor graph, convex Bethe entropy)\n - pairwise (Factor graph, Bethe entropy)\n - Can also pass any and FactorGraph or RegionGraph object\n '
self.domain = domain
self.backend = backend
self.metric = metric
self.log = log
self.iters = iters
self.warm_start = warm_start
self.history = []
self.marginal_oracle = marginal_oracle
self.inner_iters = inner_iters
if (backend == 'torch'):
from mbi.torch_factor import Factor
self.Factor = Factor
else:
from mbi import Factor
self.Factor = Factor
self.structural_zeros = CliqueVector({})
for cl in structural_zeros:
dom = self.domain.project(cl)
fact = structural_zeros[cl]
self.structural_zeros[cl] = self.Factor.active(dom, fact)
def estimate(self, measurements, total=None, callback=None, options={}):
' \n Estimate a GraphicalModel from the given measurements\n\n :param measurements: a list of (Q, y, noise, proj) tuples, where\n Q is the measurement matrix (a numpy array or scipy sparse matrix or LinearOperator)\n y is the noisy answers to the measurement queries\n noise is the standard deviation of the noise added to y\n proj defines the marginal used for this measurement set (a subset of attributes)\n :param total: The total number of records (if known)\n :param callback: a function to be called after each iteration of optimization\n :param options: solver specific options passed as a dictionary\n { param_name : param_value }\n \n :return model: A GraphicalModel that best matches the measurements taken\n '
options['callback'] = callback
if ((callback is None) and self.log):
options['callback'] = callbacks.Logger(self)
self.mirror_descent(measurements, total, **options)
return self.model
def mirror_descent_auto(self, alpha, iters, callback=None):
model = self.model
theta0 = model.potentials
messages0 = deepcopy(model.messages)
theta = theta0
mu = model.belief_propagation(theta)
(l0, _) = self._marginal_loss(mu)
prev_l = np.inf
for t in range(iters):
if (callback is not None):
callback(mu)
(l, dL) = self._marginal_loss(mu)
theta = (theta - (alpha * dL))
mu = model.belief_propagation(theta)
if (l > prev_l):
if (t <= 50):
if self.log:
print('Reducing learning rate and restarting', (alpha / 2))
model.potentials = theta0
model.messages = messages0
return self.mirror_descent_auto((alpha / 2), iters, callback)
else:
model.damping = ((0.9 + model.damping) / 2.0)
if self.log:
print('Increasing damping and continuing', model.damping)
alpha *= 0.5
prev_l = l
for _ in range(1000):
if (model.primal_feasibility(mu) < 1.0):
break
mu = model.belief_propagation(theta)
if (callback is not None):
callback(mu)
return (l, theta, mu)
def mirror_descent(self, measurements, total=None, initial_alpha=10.0, callback=None):
' Use the mirror descent algorithm to estimate the GraphicalModel\n See https://web.iem.technion.ac.il/images/user-files/becka/papers/3.pdf\n\n :param measurements: a list of (Q, y, noise, proj) tuples, where\n Q is the measurement matrix (a numpy array or scipy sparse matrix or LinearOperator)\n y is the noisy answers to the measurement queries\n noise is the standard deviation of the noise added to y\n proj defines the marginal used for this measurement set (a subset of attributes)\n :param total: The total number of records (if known)\n :param stepsize: the learning rate function\n :param callback: a function to be called after each iteration of optimization\n '
self._setup(measurements, total)
(l, theta, mu) = self.mirror_descent_auto(alpha=initial_alpha, iters=self.iters, callback=callback)
self.model.potentials = theta
self.model.marginals = mu
return l
def _marginal_loss(self, marginals, metric=None):
' Compute the loss and gradient for a given dictionary of marginals\n\n :param marginals: A dictionary with keys as projections and values as Factors\n :return loss: the loss value\n :return grad: A dictionary with gradient for each marginal \n '
if (metric is None):
metric = self.metric
if callable(metric):
return metric(marginals)
loss = 0.0
gradient = {}
for cl in marginals:
mu = marginals[cl]
gradient[cl] = self.Factor.zeros(mu.domain)
for (Q, y, noise, proj) in self.groups[cl]:
c = (1.0 / noise)
mu2 = mu.project(proj)
x = mu2.datavector()
diff = (c * ((Q @ x) - y))
if (metric == 'L1'):
loss += abs(diff).sum()
sign = (diff.sign() if hasattr(diff, 'sign') else np.sign(diff))
grad = (c * (Q.T @ sign))
else:
loss += (0.5 * (diff @ diff))
grad = (c * (Q.T @ diff))
gradient[cl] += self.Factor(mu2.domain, grad)
return (float(loss), CliqueVector(gradient))
def _setup(self, measurements, total):
' Perform necessary setup for running estimation algorithms\n \n 1. If total is None, find the minimum variance unbiased estimate for total and use that\n 2. Construct the GraphicalModel \n * If there are structural_zeros in the distribution, initialize factors appropriately\n 3. Pre-process measurements into groups so that _marginal_loss may be evaluated efficiently\n '
if (total is None):
variances = np.array([])
estimates = np.array([])
for (Q, y, noise, proj) in measurements:
o = np.ones(Q.shape[1])
v = lsmr(Q.T, o, atol=0, btol=0)[0]
if np.allclose(Q.T.dot(v), o):
variances = np.append(variances, ((noise ** 2) * np.dot(v, v)))
estimates = np.append(estimates, np.dot(v, y))
if (estimates.size == 0):
total = 1
else:
variance = (1.0 / np.sum((1.0 / variances)))
estimate = (variance * np.sum((estimates / variances)))
total = max(1, estimate)
cliques = [m[3] for m in measurements]
if (self.structural_zeros is not None):
cliques += list(self.structural_zeros.keys())
if (self.marginal_oracle == 'approx'):
model = RegionGraph(self.domain, cliques, total, convex=False, iters=self.inner_iters)
elif (self.marginal_oracle == 'convex'):
model = RegionGraph(self.domain, cliques, total, convex=True, iters=self.inner_iters)
elif (self.marginal_oracle == 'pairwise'):
model = FactorGraph(self.domain, cliques, total, convex=False, iters=self.inner_iters)
elif (self.marginal_oracle == 'pairwise-convex'):
model = FactorGraph(self.domain, cliques, total, convex=True, iters=self.inner_iters)
else:
model = self.marginal_oracle
model.total = total
if (type(self.marginal_oracle) is str):
model.potentials = CliqueVector.zeros(self.domain, model.cliques)
model.potentials.combine(self.structural_zeros)
if (self.warm_start and hasattr(self, 'model')):
model.potentials.combine(self.model.potentials)
self.model = model
cliques = self.model.cliques
self.groups = defaultdict((lambda : []))
for (Q, y, noise, proj) in measurements:
if (self.backend == 'torch'):
import torch
device = self.Factor.device
y = torch.tensor(y, dtype=torch.float32, device=device)
if isinstance(Q, np.ndarray):
Q = torch.tensor(Q, dtype=torch.float32, device=device)
elif sparse.issparse(Q):
Q = Q.tocoo()
idx = torch.LongTensor([Q.row, Q.col])
vals = torch.FloatTensor(Q.data)
Q = torch.sparse.FloatTensor(idx, vals).to(device)
m = (Q, y, noise, proj)
for cl in sorted(cliques, key=model.domain.size):
if (set(proj) <= set(cl)):
self.groups[cl].append(m)
break
|
def run(dataset, measurements, eps=1.0, delta=0.0, bounded=True, engine='MD', options={}, iters=10000, seed=None, metric='L2', elim_order=None, frequency=1, workload=None, oracle='exact'):
'\n Run a mechanism that measures the given measurements and runs inference.\n This is a convenience method for running end-to-end experiments.\n '
domain = dataset.domain
total = None
state = np.random.RandomState(seed)
if ((len(measurements) >= 1) and (type(measurements[0][0]) is str)):
matrix = (lambda proj: sparse.eye(domain.project(proj).size()))
measurements = [(proj, matrix(proj)) for proj in measurements]
l1 = 0
l2 = 0
for (_, Q) in measurements:
l1 += np.abs(Q).sum(axis=0).max()
try:
l2 += Q.power(2).sum(axis=0).max()
except:
l2 += np.square(Q).sum(axis=0).max()
if bounded:
total = dataset.df.shape[0]
l1 *= 2
l2 *= 2
if (delta > 0):
noise = norm(loc=0, scale=(np.sqrt(((l2 * 2) * np.log((2 / delta)))) / eps))
else:
noise = laplace(loc=0, scale=(l1 / eps))
if (workload is None):
workload = measurements
truth = []
for (proj, W) in workload:
x = dataset.project(proj).datavector()
y = W.dot(x)
truth.append((W, y, proj))
answers = []
for (proj, Q) in measurements:
x = dataset.project(proj).datavector()
z = noise.rvs(size=Q.shape[0], random_state=state)
y = Q.dot(x)
answers.append((Q, (y + z), 1.0, proj))
if (oracle == 'exact'):
estimator = FactoredInference(domain, metric=metric, iters=iters, warm_start=False, elim_order=elim_order)
else:
estimator = LocalInference(domain, metric=metric, iters=iters, warm_start=False, marginal_oracle=oracle)
logger = Logger(estimator, true_answers=truth, frequency=frequency)
model = estimator.estimate(answers, total, engine=engine, callback=logger, options=options)
return (model, logger, answers)
|
def estimate_total(measurements):
variances = np.array([])
estimates = np.array([])
for (Q, y, noise, proj) in measurements:
o = np.ones(Q.shape[1])
v = lsmr(Q.T, o, atol=0, btol=0)[0]
if np.allclose(Q.T.dot(v), o):
variances = np.append(variances, ((noise ** 2) * np.dot(v, v)))
estimates = np.append(estimates, np.dot(v, y))
if (estimates.size == 0):
return 1
else:
variance = (1.0 / np.sum((1.0 / variances)))
estimate = (variance * np.sum((estimates / variances)))
return max(1, estimate)
|
def adam(loss_and_grad, x0, iters=250):
a = 1.0
(b1, b2) = (0.9, 0.999)
eps = 1e-07
x = x0
m = np.zeros_like(x)
v = np.zeros_like(x)
for t in range(1, (iters + 1)):
(l, g) = loss_and_grad(x)
m = ((b1 * m) + ((1 - b1) * g))
v = ((b2 * v) + ((1 - b2) * (g ** 2)))
mhat = (m / (1 - (b1 ** t)))
vhat = (v / (1 - (b2 ** t)))
x = (x - ((a * mhat) / (np.sqrt(vhat) + eps)))
return x
|
def synthetic_col(counts, total):
counts *= (total / counts.sum())
(frac, integ) = np.modf(counts)
integ = integ.astype(int)
extra = (total - integ.sum())
if (extra > 0):
idx = np.random.choice(counts.size, extra, False, (frac / frac.sum()))
integ[idx] += 1
vals = np.repeat(np.arange(counts.size), integ)
np.random.shuffle(vals)
return vals
|
class MixtureOfProducts():
def __init__(self, products, domain, total):
self.products = products
self.domain = domain
self.total = total
self.num_components = next(iter(products.values())).shape[0]
def project(self, cols):
products = {col: self.products[col] for col in cols}
domain = self.domain.project(cols)
return MixtureOfProducts(products, domain, self.total)
def datavector(self, flatten=True):
letters = 'bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'[:len(self.domain)]
formula = ((','.join([('a%s' % l) for l in letters]) + '->') + ''.join(letters))
components = [self.products[col] for col in self.domain]
ans = ((np.einsum(formula, *components) * self.total) / self.num_components)
return (ans.flatten() if flatten else ans)
def synthetic_data(self, rows=None):
total = (rows or int(self.total))
subtotal = ((total // self.num_components) + 1)
dfs = []
for i in range(self.num_components):
df = pd.DataFrame()
for col in self.products:
counts = self.products[col][i]
df[col] = synthetic_col(counts, subtotal)
dfs.append(df)
df = pd.concat(dfs).sample(frac=1).reset_index(drop=True)[:total]
return Dataset(df, self.domain)
|
class MixtureInference():
def __init__(self, domain, components=10, metric='L2', iters=2500, warm_start=False):
'\n :param domain: A Domain object\n :param components: The number of mixture components\n :metric: The metric to use for the loss function (can be callable)\n '
self.domain = domain
self.components = components
self.metric = metric
self.iters = iters
self.warm_start = warm_start
self.params = np.random.normal(loc=0, scale=0.25, size=(sum(domain.shape) * components))
def estimate(self, measurements, total=None, alpha=0.1):
if (total == None):
total = estimate_total(measurements)
self.measurements = measurements
cliques = [M[(- 1)] for M in measurements]
letters = 'bcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
def get_products(params):
products = {}
idx = 0
for col in self.domain:
n = self.domain[col]
k = self.components
products[col] = jax_softmax(params[idx:(idx + (k * n))].reshape(k, n), axis=1)
idx += (k * n)
return products
def marginals_from_params(params):
products = get_products(params)
mu = {}
for cl in cliques:
let = letters[:len(cl)]
formula = ((','.join([('a%s' % l) for l in let]) + '->') + ''.join(let))
components = [products[col] for col in cl]
ans = ((jnp.einsum(formula, *components) * total) / self.components)
mu[cl] = ans.flatten()
return mu
def loss_and_grad(params):
params = jnp.array(params)
(mu, backprop) = vjp(marginals_from_params, params)
mu = {cl: np.array(mu[cl]) for cl in cliques}
(loss, dL) = self._marginal_loss(mu)
dL = {cl: jnp.array(dL[cl]) for cl in cliques}
dparams = backprop(dL)
return (loss, np.array(dparams[0]))
if (not self.warm_start):
self.params = np.random.normal(loc=0, scale=0.25, size=(sum(self.domain.shape) * self.components))
self.params = adam(loss_and_grad, self.params, iters=self.iters)
products = get_products(self.params)
return MixtureOfProducts(products, self.domain, total)
def _marginal_loss(self, marginals, metric=None):
' Compute the loss and gradient for a given dictionary of marginals\n\n :param marginals: A dictionary with keys as projections and values as Factors\n :return loss: the loss value\n :return grad: A dictionary with gradient for each marginal \n '
if (metric is None):
metric = self.metric
loss = 0.0
gradient = {cl: np.zeros_like(marginals[cl]) for cl in marginals}
for (Q, y, noise, cl) in self.measurements:
x = marginals[cl]
c = (1.0 / noise)
diff = (c * ((Q @ x) - y))
if (metric == 'L1'):
loss += abs(diff).sum()
sign = (diff.sign() if hasattr(diff, 'sign') else np.sign(diff))
grad = (c * (Q.T @ sign))
else:
loss += (0.5 * (diff @ diff))
grad = (c * (Q.T @ diff))
gradient[cl] += grad
return (float(loss), gradient)
|
def entropic_mirror_descent(loss_and_grad, x0, total, iters=250):
logP = ((np.log((x0 + np.nextafter(0, 1))) + np.log(total)) - np.log(x0.sum()))
P = np.exp(logP)
P = ((x0 * total) / x0.sum())
(loss, dL) = loss_and_grad(P)
alpha = 1.0
begun = False
for _ in range(iters):
logQ = (logP - (alpha * dL))
logQ += (np.log(total) - logsumexp(logQ))
Q = np.exp(logQ)
(new_loss, new_dL) = loss_and_grad(Q)
if ((loss - new_loss) >= ((0.5 * alpha) * dL.dot((P - Q)))):
logP = logQ
(loss, dL) = (new_loss, new_dL)
if (not begun):
alpha *= 2
else:
alpha *= 0.5
begun = True
return np.exp(logP)
|
def estimate_total(measurements):
variances = np.array([])
estimates = np.array([])
for (Q, y, noise, proj) in measurements:
o = np.ones(Q.shape[1])
v = lsmr(Q.T, o, atol=0, btol=0)[0]
if np.allclose(Q.T.dot(v), o):
variances = np.append(variances, ((noise ** 2) * np.dot(v, v)))
estimates = np.append(estimates, np.dot(v, y))
if (estimates.size == 0):
return 1
else:
variance = (1.0 / np.sum((1.0 / variances)))
estimate = (variance * np.sum((estimates / variances)))
return max(1, estimate)
|
class PublicInference():
def __init__(self, public_data, metric='L2'):
self.public_data = public_data
self.metric = metric
self.weights = np.ones(self.public_data.records)
def estimate(self, measurements, total=None):
if (total is None):
total = estimate_total(measurements)
self.measurements = measurements
cliques = [M[(- 1)] for M in measurements]
def loss_and_grad(weights):
est = Dataset(self.public_data.df, self.public_data.domain, weights)
mu = CliqueVector.from_data(est, cliques)
(loss, dL) = self._marginal_loss(mu)
dweights = np.zeros(weights.size)
for cl in dL:
idx = est.project(cl).df.values
dweights += dL[cl].values[tuple(idx.T)]
return (loss, dweights)
self.weights = entropic_mirror_descent(loss_and_grad, self.weights, total)
return Dataset(self.public_data.df, self.public_data.domain, self.weights)
def _marginal_loss(self, marginals, metric=None):
' Compute the loss and gradient for a given dictionary of marginals\n\n :param marginals: A dictionary with keys as projections and values as Factors\n :return loss: the loss value\n :return grad: A dictionary with gradient for each marginal \n '
if (metric is None):
metric = self.metric
if callable(metric):
return metric(marginals)
loss = 0.0
gradient = {cl: Factor.zeros(marginals[cl].domain) for cl in marginals}
for (Q, y, noise, cl) in self.measurements:
mu = marginals[cl]
c = (1.0 / noise)
x = mu.datavector()
diff = (c * ((Q @ x) - y))
if (metric == 'L1'):
loss += abs(diff).sum()
sign = (diff.sign() if hasattr(diff, 'sign') else np.sign(diff))
grad = (c * (Q.T @ sign))
else:
loss += (0.5 * (diff @ diff))
grad = (c * (Q.T @ diff))
gradient[cl] += Factor(mu.domain, grad)
return (float(loss), CliqueVector(gradient))
|
class RegionGraph():
def __init__(self, domain, cliques, total=1.0, minimal=True, convex=True, iters=25, convergence=0.001, damping=0.5):
self.domain = domain
self.cliques = cliques
if (not convex):
self.cliques = []
for r in cliques:
if (not any(((set(r) < set(s)) for s in cliques))):
self.cliques.append(r)
self.total = total
self.minimal = minimal
self.convex = convex
self.iters = iters
self.convergence = convergence
self.damping = damping
if convex:
self.belief_propagation = self.hazan_peng_shashua
else:
self.belief_propagation = self.generalized_belief_propagation
self.build_graph()
self.cliques = sorted(self.regions, key=len)
self.potentials = CliqueVector.zeros(domain, self.cliques)
self.marginals = (CliqueVector.uniform(domain, self.cliques) * total)
def show(self):
import matplotlib.pyplot as plt
labels = {r: ''.join(r) for r in self.regions}
pos = {}
xloc = defaultdict((lambda : 0))
for r in sorted(self.regions):
y = len(r)
pos[r] = ((xloc[y] + (0.5 * (y % 2))), y)
xloc[y] += 1
colormap = {r: ('red' if (r in self.cliques) else 'blue') for r in self.regions}
nx.draw(self.G, pos=pos, node_color='orange', node_size=1000)
nx.draw(self.G, pos=pos, nodelist=self.cliques, node_color='green', node_size=1000)
nx.draw_networkx_labels(self.G, pos=pos, labels=labels)
plt.show()
def project(self, attrs, maxiter=100, alpha=None):
if (type(attrs) is list):
attrs = tuple(attrs)
for cl in self.cliques:
if (set(attrs) <= set(cl)):
return self.marginals[cl].project(attrs)
intersections = [(set(cl) & set(attrs)) for cl in self.cliques]
target_cliques = [tuple(t) for t in intersections if (not any(((t < s) for s in intersections)))]
target_cliques = list(set(target_cliques))
target_mu = CliqueVector.from_data(self, target_cliques)
if (len(target_cliques) == 0):
return (Factor.uniform(self.domain.project(attrs)) * self.total)
P = estimate_kikuchi_marginal(self.domain.project(attrs), self.total, target_mu)
if (alpha is None):
alpha = (1.0 / (self.total * len(target_cliques)))
curr_mu = CliqueVector.from_data(P, target_cliques)
diff = (curr_mu - target_mu)
(curr_loss, dL) = (diff.dot(diff), sum(diff.values()).expand(P.domain))
begun = False
for _ in range(maxiter):
if (curr_loss <= 1e-08):
return P
Q = (P * ((- alpha) * dL).exp())
Q *= (self.total / Q.sum())
curr_mu = CliqueVector.from_data(Q, target_cliques)
diff = (curr_mu - target_mu)
loss = diff.dot(diff)
if ((curr_loss - loss) >= ((0.5 * alpha) * dL.dot((P - Q)))):
P = Q
curr_loss = loss
dL = sum(diff.values()).expand(P.domain)
if (not begun):
alpha *= 2
else:
alpha *= 0.5
begun = True
return P
def primal_feasibility(self, mu):
ans = 0
count = 0
for r in self.cliques:
for s in self.children[r]:
x = mu[r].project(s).datavector()
y = mu[s].datavector()
err = np.linalg.norm((x - y), 1)
ans += err
count += 1
return (0 if (count == 0) else (ans / count))
def is_converged(self, mu):
return (self.primal_feasibility(mu) <= self.convergence)
def build_graph(self):
regions = set(self.cliques)
size = 0
while (len(regions) > size):
size = len(regions)
for (r1, r2) in itertools.combinations(regions, 2):
z = tuple(sorted((set(r1) & set(r2))))
if ((len(z) > 0) and (not (z in regions))):
regions.update({z})
G = nx.DiGraph()
G.add_nodes_from(regions)
for r1 in regions:
for r2 in regions:
if ((set(r2) < set(r1)) and (not any((((set(r2) < set(r3)) and (set(r3) < set(r1))) for r3 in regions)))):
G.add_edge(r1, r2)
H = G.reverse()
(G1, H1) = (nx.transitive_closure(G), nx.transitive_closure(H))
self.children = {r: list(G.neighbors(r)) for r in regions}
self.parents = {r: list(H.neighbors(r)) for r in regions}
self.descendants = {r: list(G1.neighbors(r)) for r in regions}
self.ancestors = {r: list(H1.neighbors(r)) for r in regions}
self.forebears = {r: set(([r] + self.ancestors[r])) for r in regions}
self.downp = {r: set(([r] + self.descendants[r])) for r in regions}
if self.minimal:
min_edges = []
for r in regions:
ds = DisjointSet()
for u in self.parents[r]:
ds.find(u)
for (u, v) in itertools.combinations(self.parents[r], 2):
uv = (set(self.ancestors[u]) & set(self.ancestors[v]))
if (len(uv) > 0):
ds.union(u, v)
canonical = set()
for u in self.parents[r]:
canonical.update({ds.find(u)})
min_edges.extend([(u, r) for u in canonical])
G = nx.DiGraph()
G.add_nodes_from(regions)
G.add_edges_from(min_edges)
H = G.reverse()
(G1, H1) = (nx.transitive_closure(G), nx.transitive_closure(H))
self.children = {r: list(G.neighbors(r)) for r in regions}
self.parents = {r: list(H.neighbors(r)) for r in regions}
self.G = G
self.regions = regions
if self.convex:
self.counting_numbers = {r: 1.0 for r in regions}
else:
moebius = {}
def get_counting_number(r):
if (not (r in moebius)):
moebius[r] = (1 - sum((get_counting_number(s) for s in self.ancestors[r])))
return moebius[r]
for r in regions:
get_counting_number(r)
self.counting_numbers = moebius
if self.minimal:
(N, D, B) = ({}, {}, {})
for r in regions:
B[r] = set()
for p in self.parents[r]:
B[r].add((p, r))
for d in self.descendants[r]:
for p in ((set(self.parents[d]) - {r}) - set(self.descendants[r])):
B[r].add((p, d))
for p in self.regions:
for r in self.children[p]:
(N[(p, r)], D[(p, r)]) = (set(), set())
for s in self.parents[p]:
N[(p, r)].add((s, p))
for d in self.descendants[p]:
for s in ((set(self.parents[d]) - {p}) - set(self.descendants[p])):
N[(p, r)].add((s, d))
for s in (set(self.parents[r]) - {p}):
D[(p, r)].add((s, r))
for d in self.descendants[r]:
for p1 in ((set(self.parents[d]) - {r}) - set(self.descendants[r])):
D[(p, r)].add((p1, d))
cancel = (N[(p, r)] & D[(p, r)])
N[(p, r)] = (N[(p, r)] - cancel)
D[(p, r)] = (D[(p, r)] - cancel)
(self.N, self.D, self.B) = (N, D, B)
else:
(N, D, B) = ({}, {}, {})
for r in regions:
B[r] = [(ru, r) for ru in self.parents[r]]
for rd in self.descendants[r]:
for ru in (set(self.parents[rd]) - self.downp[r]):
B[r].append((ru, rd))
for ru in regions:
for rd in self.children[ru]:
(fu, fd) = (self.downp[ru], self.downp[rd])
cond = (lambda r: ((not (r[0] in fu)) and (r[1] in (fu - fd))))
N[(ru, rd)] = [e for e in G.edges if cond(e)]
cond = (lambda r: ((r[0] in (fu - fd)) and (r[1] in fd) and (r != (ru, rd))))
D[(ru, rd)] = [e for e in G.edges if cond(e)]
(self.N, self.D, self.B) = (N, D, B)
self.messages = {}
self.message_order = []
for ru in sorted(regions, key=len):
for rd in self.children[ru]:
self.message_order.append((ru, rd))
self.messages[(ru, rd)] = Factor.zeros(self.domain.project(rd))
self.messages[(rd, ru)] = Factor.zeros(self.domain.project(rd))
def generalized_belief_propagation(self, potentials, callback=None):
pot = {}
for r in self.regions:
if (r in self.cliques):
pot[r] = potentials[r]
else:
pot[r] = Factor.zeros(self.domain.project(r))
for _ in range(self.iters):
new = {}
for (ru, rd) in self.message_order:
num = pot[ru]
num = (num + sum((self.messages[(r1, r2)] for (r1, r2) in self.N[(ru, rd)])))
denom = sum((new[(r1, r2)] for (r1, r2) in self.D[(ru, rd)]))
diff = tuple((set(ru) - set(rd)))
new[(ru, rd)] = (num.logsumexp(diff) - denom)
new[(ru, rd)] -= new[(ru, rd)].logsumexp()
for (ru, rd) in self.message_order:
self.messages[(ru, rd)] = ((0.5 * self.messages[(ru, rd)]) + (0.5 * new[(ru, rd)]))
marginals = {}
for r in self.cliques:
belief = (potentials[r] + sum((self.messages[(r1, r2)] for (r1, r2) in self.B[r])))
belief += (np.log(self.total) - belief.logsumexp())
marginals[r] = belief.exp()
return CliqueVector(marginals)
def hazan_peng_shashua(self, potentials, callback=None):
c0 = self.counting_numbers
pot = {}
for r in self.regions:
if (r in self.cliques):
pot[r] = potentials[r]
else:
pot[r] = Factor.zeros(self.domain.project(r))
messages = self.messages
cc = {}
for r in self.regions:
for p in self.parents[r]:
cc[(p, r)] = (c0[p] / (c0[r] + sum((c0[p1] for p1 in self.parents[r]))))
for _ in range(self.iters):
new = {}
for r in self.regions:
for p in self.parents[r]:
new[(p, r)] = (((pot[p] + sum((messages[(c, p)] for c in self.children[p] if (c != r)))) - sum((messages[(p, p1)] for p1 in self.parents[p]))) / c0[p])
new[(p, r)] = (c0[p] * new[(p, r)].logsumexp(tuple((set(p) - set(r)))))
new[(p, r)] -= new[(p, r)].logsumexp()
for r in self.regions:
for p in self.parents[r]:
new[(r, p)] = ((cc[(p, r)] * ((pot[r] + sum((messages[(c, r)] for c in self.children[r]))) + sum((messages[(p1, r)] for p1 in self.parents[r])))) - messages[(p, r)])
new[(r, p)] -= new[(r, p)].logsumexp()
rho = self.damping
for p in self.regions:
for r in self.children[p]:
messages[(p, r)] = ((rho * messages[(p, r)]) + ((1.0 - rho) * new[(p, r)]))
messages[(r, p)] = ((rho * messages[(r, p)]) + ((1.0 - rho) * new[(r, p)]))
mu = {}
for r in self.regions:
belief = (((pot[r] + sum((messages[(c, r)] for c in self.children[r]))) - sum((messages[(r, p)] for p in self.parents[r]))) / c0[r])
belief += (np.log(self.total) - belief.logsumexp())
mu[r] = belief.exp()
if (callback is not None):
callback(mu)
if self.is_converged(mu):
self.messages = messages
return CliqueVector(mu)
self.messages = messages
return CliqueVector(mu)
def wiegerinck(self, potentials, callback=None):
c = self.counting_numbers
m = {}
for delta in self.regions:
m[delta] = 0
for alpha in self.ancestors[delta]:
m[delta] += c[alpha]
Q = {}
for r in self.regions:
if (r in self.cliques):
Q[r] = (potentials[r] / c[r])
else:
Q[r] = Factor.zeros(self.domain.project(r))
inner = [r for r in self.regions if (len(self.parents[r]) > 0)]
diff = (lambda r, s: tuple((set(r) - set(s))))
for _ in range(self.iters):
for r in inner:
A = (c[r] / (m[r] + c[r]))
B = (m[r] / (m[r] + c[r]))
Qbar = (sum(((c[s] * Q[s].logsumexp(diff(s, r))) for s in self.ancestors[r])) / m[r])
Q[r] = ((Q[r] * A) + (Qbar * B))
Q[r] -= Q[r].logsumexp()
for s in self.ancestors[r]:
Q[s] = ((Q[s] + Q[r]) - Q[s].logsumexp(diff(s, r)))
Q[s] -= Q[s].logsumexp()
marginals = {}
for r in self.regions:
marginals[r] = ((Q[r] + np.log(self.total)) - Q[r].logsumexp()).exp()
if (callback is not None):
callback(marginals)
return CliqueVector(marginals)
def loh_wibisono(self, potentials, callback=None):
pot = {}
for r in self.regions:
if (r in self.cliques):
pot[r] = potentials[r]
else:
pot[r] = Factor.zeros(self.domain.project(r))
rho = self.counting_numbers
for _ in range(self.iters):
new = {}
for (s, r) in self.message_order:
diff = tuple((set(s) - set(r)))
num = (pot[s] / rho[s])
for v in self.parents[s]:
num += ((self.messages[(v, s)] * rho[v]) / rho[s])
for w in self.children[s]:
if (w != r):
num -= self.messages[(s, w)]
num = num.logsumexp(diff)
denom = (pot[r] / rho[r])
for u in self.parents[r]:
if (u != s):
denom += ((self.messages[(u, r)] * rho[u]) / rho[r])
for t in self.children[r]:
denom -= self.messages[(r, t)]
new[(s, r)] = ((rho[r] / (rho[r] + rho[s])) * (num - denom))
new[(s, r)] -= new[(s, r)].logsumexp()
for (ru, rd) in self.message_order:
self.messages[(ru, rd)] = ((0.5 * self.messages[(ru, rd)]) + (0.5 * new[(ru, rd)]))
marginals = {}
for r in self.regions:
belief = (pot[r] / rho[r])
for s in self.parents[r]:
belief += ((self.messages[(s, r)] * rho[s]) / rho[r])
for t in self.children[r]:
belief -= self.messages[(r, t)]
belief += (np.log(self.total) - belief.logsumexp())
marginals[r] = belief.exp()
if (callback is not None):
callback(marginals)
return CliqueVector(marginals)
def kikuchi_entropy(self, marginals):
'\n Return the Bethe Entropy and the gradient with respect to the marginals\n \n '
weights = self.counting_numbers
entropy = 0
dmarginals = {}
for cl in self.regions:
mu = (marginals[cl] / self.total)
entropy += (weights[cl] * (mu * mu.log()).sum())
dmarginals[cl] = ((weights[cl] * (1 + mu.log())) / self.total)
return ((- entropy), ((- 1) * CliqueVector(dmarginals)))
def mle(self, mu):
return ((- 1) * self.kikuchi_entropy(mu)[1])
|
def estimate_kikuchi_marginal(domain, total, marginals):
marginals = dict(marginals)
regions = set(marginals.keys())
size = 0
while (len(regions) > size):
size = len(regions)
for (r1, r2) in itertools.combinations(regions, 2):
z = tuple(sorted((set(r1) & set(r2))))
if ((len(z) > 0) and (not (z in regions))):
marginals[z] = marginals[r1].project(z)
regions.update({z})
G = nx.DiGraph()
G.add_nodes_from(regions)
for r1 in regions:
for r2 in regions:
if ((set(r2) < set(r1)) and (not any((((set(r2) < set(r3)) and (set(r3) < set(r1))) for r3 in regions)))):
G.add_edge(r1, r2)
H1 = nx.transitive_closure(G.reverse())
ancestors = {r: list(H1.neighbors(r)) for r in regions}
moebius = {}
def get_counting_number(r):
if (not (r in moebius)):
moebius[r] = (1 - sum((get_counting_number(s) for s in ancestors[r])))
return moebius[r]
logP = Factor.zeros(domain)
for r in regions:
kr = get_counting_number(r)
logP += (kr * marginals[r].log())
logP += (np.log(total) - logP.logsumexp())
return logP.exp()
|
class Factor():
device = ('cuda' if torch.cuda.is_available() else 'cpu')
def __init__(self, domain, values):
' Initialize a factor over the given domain\n\n :param domain: the domain of the factor\n :param values: the ndarray or tensor of factor values (for each element of the domain)\n\n Note: values may be a flattened 1d array or a ndarray with same shape as domain\n '
if (type(values) == np.ndarray):
values = torch.tensor(values, dtype=torch.float32, device=Factor.device)
assert (domain.size() == values.nelement()), 'domain size does not match values size'
assert ((len(values.shape) == 1) or (values.shape == domain.shape)), 'invalid shape for values array'
self.domain = domain
self.values = values.reshape(domain.shape).to(Factor.device)
@staticmethod
def zeros(domain):
return Factor(domain, torch.zeros(domain.shape, device=Factor.device))
@staticmethod
def ones(domain):
return Factor(domain, torch.ones(domain.shape, device=Factor.device))
@staticmethod
def random(domain):
return Factor(domain, torch.rand(domain.shape, device=Factor.device))
@staticmethod
def uniform(domain):
return (Factor.ones(domain) / domain.size())
@staticmethod
def active(domain, structural_zeros):
" create a factor that is 0 everywhere except in positions present in \n 'structural_zeros', where it is -infinity\n\n :param: domain: the domain of this factor\n :param: structural_zeros: a list of values that are not possible\n "
idx = tuple(np.array(structural_zeros).T)
vals = torch.zeros(domain.shape, device=Factor.device)
vals[idx] = (- np.inf)
return Factor(domain, vals)
def expand(self, domain):
assert domain.contains(self.domain), 'expanded domain must contain current domain'
dims = (len(domain) - len(self.domain))
values = self.values.view((self.values.size() + tuple(([1] * dims))))
ax = domain.axes(self.domain.attrs)
ax = (ax + tuple((i for i in range(len(domain)) if (not (i in ax)))))
ax = tuple(np.argsort(ax))
values = values.permute(ax)
values = values.expand(domain.shape)
return Factor(domain, values)
def transpose(self, attrs):
assert (set(attrs) == set(self.domain.attrs)), 'attrs must be same as domain attributes'
newdom = self.domain.project(attrs)
ax = newdom.axes(self.domain.attrs)
ax = tuple(np.argsort(ax))
values = self.values.permute(ax)
return Factor(newdom, values)
def project(self, attrs, agg='sum'):
' \n project the factor onto a list of attributes (in order)\n using either sum or logsumexp to aggregate along other attributes\n '
assert (agg in ['sum', 'logsumexp']), 'agg must be sum or logsumexp'
marginalized = self.domain.marginalize(attrs)
if (agg == 'sum'):
ans = self.sum(marginalized.attrs)
elif (agg == 'logsumexp'):
ans = self.logsumexp(marginalized.attrs)
return ans.transpose(attrs)
def sum(self, attrs=None):
if (attrs is None):
return float(self.values.sum())
elif (attrs == tuple()):
return self
axes = self.domain.axes(attrs)
values = self.values.sum(dim=axes)
newdom = self.domain.marginalize(attrs)
return Factor(newdom, values)
def logsumexp(self, attrs=None):
if (attrs is None):
return float(self.values.logsumexp(dim=tuple(range(len(self.values.shape)))))
elif (attrs == tuple()):
return self
axes = self.domain.axes(attrs)
values = self.values.logsumexp(dim=axes)
newdom = self.domain.marginalize(attrs)
return Factor(newdom, values)
def logaddexp(self, other):
return NotImplementedError
def max(self, attrs=None):
if (attrs is None):
return float(self.values.max())
return NotImplementedError
def condition(self, evidence):
' evidence is a dictionary where \n keys are attributes, and \n values are elements of the domain for that attribute '
slices = [(evidence[a] if (a in evidence) else slice(None)) for a in self.domain]
newdom = self.domain.marginalize(evidence.keys())
values = self.values[tuple(slices)]
return Factor(newdom, values)
def copy(self, out=None):
if (out is None):
return Factor(self.domain, self.values.clone())
np.copyto(out.values, self.values)
return out
def __mul__(self, other):
if np.isscalar(other):
return Factor(self.domain, (other * self.values))
newdom = self.domain.merge(other.domain)
factor1 = self.expand(newdom)
factor2 = other.expand(newdom)
return Factor(newdom, (factor1.values * factor2.values))
def __add__(self, other):
if np.isscalar(other):
return Factor(self.domain, (other + self.values))
newdom = self.domain.merge(other.domain)
factor1 = self.expand(newdom)
factor2 = other.expand(newdom)
return Factor(newdom, (factor1.values + factor2.values))
def __iadd__(self, other):
if np.isscalar(other):
self.values += other
return self
factor2 = other.expand(self.domain)
self.values += factor2.values
return self
def __imul__(self, other):
if np.isscalar(other):
self.values *= other
return self
factor2 = other.expand(self.domain)
self.values *= factor2.values
return self
def __radd__(self, other):
return self.__add__(other)
def __rmul__(self, other):
return self.__mul__(other)
def __sub__(self, other):
if np.isscalar(other):
return Factor(self.domain, (self.values - other))
zero = torch.tensor(0.0, device=Factor.device)
inf = torch.tensor(np.inf, device=Factor.device)
values = torch.where((other.values == (- inf)), zero, (- other.values))
other = Factor(other.domain, values)
return (self + other)
def __truediv__(self, other):
if np.isscalar(other):
return (self * (1.0 / other))
tmp = other.expand(self.domain)
vals = torch.div(self.values, tmp.values)
vals[(tmp.values <= 0)] = 0.0
return Factor(self.domain, vals)
def exp(self, out=None):
if (out is None):
return Factor(self.domain, self.values.exp())
torch.exp(self.values, out=out.values)
return out
def log(self, out=None):
if (out is None):
return Factor(self.domain, torch.log((self.values + 1e-100)))
torch.log(self.values, out=out.values)
return out
def datavector(self, flatten=True):
' Materialize the data vector as a numpy array '
ans = self.values.to('cpu').numpy()
return (ans.flatten() if flatten else ans)
|
class TestDomain(unittest.TestCase):
def setUp(self):
attrs = ['a', 'b', 'c', 'd']
shape = [3, 4, 5, 6]
domain = Domain(attrs, shape)
self.data = Dataset.synthetic(domain, 100)
def test_project(self):
proj = self.data.project(['a', 'b'])
ans = Domain(['a', 'b'], [3, 4])
self.assertEqual(proj.domain, ans)
proj = self.data.project(('a', 'b'))
self.assertEqual(proj.domain, ans)
proj = self.data.project('c')
self.assertEqual(proj.domain, Domain(['c'], [5]))
def test_datavector(self):
vec = self.data.datavector()
self.assertTrue(vec.size, (((3 * 4) * 5) * 6))
|
class TestDomain(unittest.TestCase):
def setUp(self):
attrs = ['a', 'b', 'c', 'd']
shape = [10, 20, 30, 40]
self.domain = Domain(attrs, shape)
def test_eq(self):
attrs = ['a', 'b', 'c', 'd']
shape = [10, 20, 30, 40]
ans = Domain(attrs, shape)
self.assertEqual(self.domain, ans)
attrs = ['b', 'a', 'c', 'd']
ans = Domain(attrs, shape)
self.assertNotEqual(self.domain, ans)
def test_project(self):
ans = Domain(['a', 'b'], [10, 20])
res = self.domain.project(['a', 'b'])
self.assertEqual(ans, res)
ans = Domain(['c', 'b'], [30, 20])
res = self.domain.project(['c', 'b'])
self.assertEqual(ans, res)
def test_marginalize(self):
ans = Domain(['a', 'b'], [10, 20])
res = self.domain.marginalize(['c', 'd'])
self.assertEqual(ans, res)
res = self.domain.marginalize(['c', 'd', 'e'])
self.assertEqual(ans, res)
def test_axes(self):
ans = (1, 3)
res = self.domain.axes(['b', 'd'])
self.assertEqual(ans, res)
def test_transpose(self):
ans = Domain(['b', 'd', 'a', 'c'], [20, 40, 10, 30])
res = self.domain.transpose(['b', 'd', 'a', 'c'])
self.assertEqual(ans, res)
def test_merge(self):
ans = Domain(['a', 'b', 'c', 'd', 'e', 'f'], [10, 20, 30, 40, 50, 60])
new = Domain(['b', 'd', 'e', 'f'], [20, 40, 50, 60])
res = self.domain.merge(new)
self.assertEqual(ans, res)
def test_contains(self):
new = Domain(['b', 'd'], [20, 40])
self.assertTrue(self.domain.contains(new))
new = Domain(['b', 'e'], [20, 50])
self.assertFalse(self.domain.contains(new))
def test_iter(self):
self.assertEqual(len(self.domain), 4)
for (a, b, c) in zip(self.domain, ['a', 'b', 'c', 'd'], [10, 20, 30, 40]):
self.assertEqual(a, b)
self.assertEqual(self.domain[a], c)
|
class TestFactor(unittest.TestCase):
def setUp(self):
attrs = ['a', 'b', 'c']
shape = [2, 3, 4]
domain = Domain(attrs, shape)
values = np.random.rand(*shape)
self.factor = Factor(domain, values)
def test_expand(self):
domain = Domain(['a', 'b', 'c', 'd'], [2, 3, 4, 5])
res = self.factor.expand(domain)
self.assertEqual(res.domain, domain)
self.assertEqual(res.values.shape, domain.shape)
res = (res.sum(['d']) * 0.2)
self.assertTrue(np.allclose(res.values, self.factor.values))
def test_transpose(self):
attrs = ['b', 'c', 'a']
tr = self.factor.transpose(attrs)
ans = Domain(attrs, [3, 4, 2])
self.assertEqual(tr.domain, ans)
def test_project(self):
res = self.factor.project(['c', 'a'], agg='sum')
ans = Domain(['c', 'a'], [4, 2])
self.assertEqual(res.domain, ans)
self.assertEqual(res.values.shape, (4, 2))
res = self.factor.project(['c', 'a'], agg='logsumexp')
self.assertEqual(res.domain, ans)
self.assertEqual(res.values.shape, (4, 2))
self.factor.project('a')
def test_sum(self):
res = self.factor.sum(['a', 'b'])
self.assertEqual(res.domain, Domain(['c'], [4]))
self.assertTrue(np.allclose(res.values, self.factor.values.sum(axis=(0, 1))))
def test_logsumexp(self):
res = self.factor.logsumexp(['a', 'c'])
values = self.factor.values
ans = np.log(np.sum(np.exp(values), axis=(0, 2)))
self.assertEqual(res.domain, Domain(['b'], [3]))
self.assertTrue(np.allclose(res.values, ans))
def test_binary(self):
dom = Domain(['b', 'd', 'e'], [3, 5, 6])
vals = np.random.rand(3, 5, 6)
factor = Factor(dom, vals)
res = (self.factor * factor)
ans = Domain(['a', 'b', 'c', 'd', 'e'], [2, 3, 4, 5, 6])
self.assertEqual(res.domain, ans)
res = (self.factor + factor)
self.assertEqual(res.domain, ans)
res = (self.factor * 2.0)
self.assertEqual(res.domain, self.factor.domain)
res = (self.factor + 2.0)
self.assertEqual(res.domain, self.factor.domain)
res = (self.factor - 2.0)
self.assertEqual(res.domain, self.factor.domain)
res = self.factor.exp().log()
self.assertEqual(res.domain, self.factor.domain)
self.assertTrue(np.allclose(res.values, self.factor.values))
|
class TestGraphicalModel(unittest.TestCase):
def setUp(self):
attrs = ['a', 'b', 'c', 'd']
shape = [2, 3, 4, 5]
domain = Domain(attrs, shape)
cliques = [('a', 'b'), ('b', 'c'), ('c', 'd')]
self.model = GraphicalModel(domain, cliques)
zeros = {cl: Factor.zeros(domain.project(cl)) for cl in self.model.cliques}
self.model.potentials = CliqueVector(zeros)
def test_datavector(self):
x = self.model.datavector()
ans = (np.ones((((2 * 3) * 4) * 5)) / (((2 * 3) * 4) * 5))
self.assertTrue(np.allclose(x, ans))
def test_project(self):
model = self.model.project(['d', 'a'])
x = model.datavector()
ans = (np.ones((2 * 5)) / 10.0)
self.assertEqual(x.size, 10)
self.assertTrue(np.allclose(x, ans))
model = self.model
pot = {cl: Factor.random(model.domain.project(cl)) for cl in model.cliques}
model.potentials = CliqueVector(pot)
x = model.datavector(flatten=False)
y0 = x.sum(axis=(2, 3)).flatten()
y1 = model.project(['a', 'b']).datavector()
self.assertEqual(y0.size, y1.size)
self.assertTrue(np.allclose(y0, y1))
x = model.project('a').datavector()
def test_krondot(self):
model = self.model
pot = {cl: Factor.random(model.domain.project(cl)) for cl in model.cliques}
model.potentials = CliqueVector(pot)
A = np.ones((1, 2))
B = np.eye(3)
C = np.ones((1, 4))
D = np.eye(5)
res = model.krondot([A, B, C, D])
x = model.datavector(flatten=False)
ans = x.sum(axis=(0, 2), keepdims=True)
self.assertEqual(res.shape, ans.shape)
self.assertTrue(np.allclose(res, ans))
def test_calculate_many_marginals(self):
proj = [[], ['a'], ['b'], ['c'], ['d'], ['a', 'b'], ['a', 'c'], ['a', 'd'], ['b', 'c'], ['b', 'd'], ['c', 'd'], ['a', 'b', 'c'], ['a', 'b', 'd'], ['a', 'c', 'd'], ['b', 'c', 'd'], ['a', 'b', 'c', 'd']]
proj = [tuple(p) for p in proj]
model = self.model
model.total = 10.0
pot = {cl: Factor.random(model.domain.project(cl)) for cl in model.cliques}
model.potentials = CliqueVector(pot)
results = model.calculate_many_marginals(proj)
for pr in proj:
ans = model.project(pr).values
close = np.allclose(results[pr].values, ans)
print(pr, close, results[pr].values, ans)
self.assertTrue(close)
def test_belief_prop(self):
pot = self.model.potentials
self.model.total = 10
mu = self.model.belief_propagation(pot)
for key in mu:
ans = (self.model.total / np.prod(mu[key].domain.shape))
self.assertTrue(np.allclose(mu[key].values, ans))
pot = {cl: Factor.random(pot[cl].domain) for cl in pot}
mu = self.model.belief_propagation(pot)
logp = sum(pot.values())
logp -= logp.logsumexp()
dist = (logp.exp() * self.model.total)
for key in mu:
ans = dist.project(key).values
res = mu[key].values
self.assertTrue(np.allclose(ans, res))
def test_synthetic_data(self):
model = self.model
sy = model.synthetic_data()
self.assertTrue(True)
|
class TestInference(unittest.TestCase):
def setUp(self):
attrs = ['a', 'b', 'c', 'd', 'e']
shape = [2, 3, 4, 5, 6]
self.domain = Domain(attrs, shape)
self.measurements = []
for i in range(4):
I = np.eye(shape[i])
y = np.random.rand(shape[i])
y /= y.sum()
self.measurements.append((I, y, 1.0, attrs[i]))
self.engine = FactoredInference(self.domain, backend='numpy', log=True, iters=100, warm_start=True)
def test_estimate(self):
self.engine.estimate(self.measurements, 1.0)
self.assertEqual(self.engine.model.total, 1.0)
def test_mirror_descent(self):
loss = self.engine.mirror_descent(self.measurements, 1.0)
self.assertEqual(self.engine.model.total, 1.0)
self.assertTrue((loss <= 0.0001))
def test_dual_averaging(self):
loss = self.engine.dual_averaging(self.measurements, 1.0)
self.assertEqual(self.engine.model.total, 1.0)
def test_interior_gradient(self):
loss = self.engine.interior_gradient(self.measurements, 1.0)
self.assertEqual(self.engine.model.total, 1.0)
def test_warm_start(self):
self.engine.estimate(self.measurements, 1.0)
new = (np.eye((2 * 3)), np.random.rand(6), 1.0, ('a', 'b'))
self.engine.estimate((self.measurements + [new]), 1.0)
def test_lipschitz(self):
self.engine._setup(self.measurements, None)
lip = self.engine._lipschitz(self.measurements)
def rand():
ans = {}
for cl in self.engine.model.cliques:
ans[cl] = self.engine.Factor.random(self.engine.domain.project(cl))
return CliqueVector(ans)
for _ in range(100):
x = rand()
y = rand()
(_, gx) = self.engine._marginal_loss(x)
(_, gy) = self.engine._marginal_loss(y)
A = (gx - gy).dot((gx - gy))
B = (x - y).dot((x - y))
ratio = np.sqrt((A / B))
self.assertTrue((ratio <= lip))
|
class TestJunctionTree(unittest.TestCase):
def setUp(self):
attrs = ['a', 'b', 'c', 'd']
shape = [10, 20, 30, 40]
domain = Domain(attrs, shape)
cliques = [('a', 'b'), ('b', 'c'), ('c', 'd')]
self.tree = JunctionTree(domain, cliques)
def test_maximal_cliques(self):
ans = [set(x) for x in [('a', 'b'), ('b', 'c'), ('c', 'd')]]
res = self.tree.maximal_cliques()
for cl in res:
self.assertTrue((set(cl) in ans))
def test_mp_order(self):
order = self.tree.mp_order()
self.assertEqual(len(order), 4)
print(order)
def test_separator_axes(self):
res = self.tree.separator_axes()
ans = {'b', 'c'}
res = set.union(*map(set, res.values()))
self.assertEqual(res, ans)
def test_neighbors(self):
res = self.tree.neighbors()
|
class TestFactor(unittest.TestCase):
def setUp(self):
if skip:
raise unittest.SkipTest('PyTorch not installed')
attrs = ['a', 'b', 'c']
shape = [2, 3, 4]
domain = Domain(attrs, shape)
values = torch.rand(*shape)
self.factor = Factor(domain, values)
def test_expand(self):
domain = Domain(['a', 'b', 'c', 'd'], [2, 3, 4, 5])
res = self.factor.expand(domain)
self.assertEqual(res.domain, domain)
self.assertEqual(res.values.shape, domain.shape)
res = (res.sum(['d']) * 0.2)
self.assertTrue(torch.allclose(res.values, self.factor.values))
def test_transpose(self):
attrs = ['b', 'c', 'a']
tr = self.factor.transpose(attrs)
ans = Domain(attrs, [3, 4, 2])
self.assertEqual(tr.domain, ans)
def test_project(self):
res = self.factor.project(['c', 'a'], agg='sum')
ans = Domain(['c', 'a'], [4, 2])
self.assertEqual(res.domain, ans)
self.assertEqual(res.values.shape, (4, 2))
res = self.factor.project(['c', 'a'], agg='logsumexp')
self.assertEqual(res.domain, ans)
self.assertEqual(res.values.shape, (4, 2))
def test_sum(self):
res = self.factor.sum(['a', 'b'])
self.assertEqual(res.domain, Domain(['c'], [4]))
self.assertTrue(torch.allclose(res.values, self.factor.values.sum(dim=(0, 1))))
def test_logsumexp(self):
res = self.factor.logsumexp(['a', 'c'])
values = self.factor.values
ans = torch.log(torch.sum(torch.exp(values), dim=(0, 2)))
self.assertEqual(res.domain, Domain(['b'], [3]))
self.assertTrue(torch.allclose(res.values, ans))
def test_binary(self):
dom = Domain(['b', 'd', 'e'], [3, 5, 6])
vals = torch.rand(3, 5, 6)
factor = Factor(dom, vals)
res = (self.factor * factor)
ans = Domain(['a', 'b', 'c', 'd', 'e'], [2, 3, 4, 5, 6])
self.assertEqual(res.domain, ans)
res = (self.factor + factor)
self.assertEqual(res.domain, ans)
res = (self.factor * 2.0)
self.assertEqual(res.domain, self.factor.domain)
res = (self.factor + 2.0)
self.assertEqual(res.domain, self.factor.domain)
res = (self.factor - 2.0)
self.assertEqual(res.domain, self.factor.domain)
res = self.factor.exp().log()
self.assertEqual(res.domain, self.factor.domain)
self.assertTrue(np.allclose(res.datavector(), self.factor.datavector()))
|
class TestTorch(test_inference.TestInference):
def setUp(self):
if skip:
raise unittest.SkipTest('PyTorch not installed')
test_inference.TestInference.setUp(self)
self.engine = FactoredInference(self.domain, backend='torch', log=True)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes)))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (self.expansion * planes), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((self.expansion * planes))
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes)))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
|
class ResNet(nn.Module):
def __init__(self, block, num_blocks, feature_dim=512):
super(ResNet, self).__init__()
self.in_planes = 64
self.feature_dim = feature_dim
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.reshape = torch.nn.Sequential(nn.Linear((512 * block.expansion), 512, bias=False), nn.BatchNorm1d(512), nn.ReLU(inplace=True), nn.Linear(512, feature_dim, bias=True))
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.reshape(out)
return F.normalize(out)
|
class ResNetControl(nn.Module):
def __init__(self, block, num_blocks, feature_dim=512):
super(ResNetControl, self).__init__()
self.in_planes = 64
self.feature_dim = feature_dim
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, feature_dim, num_blocks[3], stride=2)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
return F.normalize(out)
|
def ResNet18(feature_dim=512):
return ResNet(BasicBlock, [2, 2, 2, 2], feature_dim)
|
def ResNet18Control(feature_dim=512):
return ResNetControl(BasicBlock, [2, 2, 2, 2], feature_dim)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes)))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (self.expansion * planes), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((self.expansion * planes))
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes)))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
|
class ResNetMNIST(nn.Module):
def __init__(self, block, num_blocks, feature_dim=512):
super(ResNetMNIST, self).__init__()
self.in_planes = 64
self.feature_dim = feature_dim
self.conv1 = nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, feature_dim, num_blocks[3], stride=2)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
return F.normalize(out)
|
def ResNet10MNIST(feature_dim=512):
return ResNetMNIST(BasicBlock, [1, 1, 1, 1], feature_dim)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes)))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (self.expansion * planes), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((self.expansion * planes))
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes)))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
|
class ResNetSTL(nn.Module):
def __init__(self, block, num_blocks, feature_dim=512):
super(ResNetSTL, self).__init__()
self.in_planes = 32
self.feature_dim = feature_dim
self.conv1 = nn.Conv2d(3, 32, kernel_size=5, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 32, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, feature_dim, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = F.relu(out)
out = self.maxpool(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = out.view(out.size(0), (- 1))
return F.normalize(out)
|
def ResNet18STL(feature_dim=128):
return ResNetSTL(BasicBlock, [2, 2, 2, 2], feature_dim)
|
class Block(nn.Module):
'Grouped convolution block.'
expansion = 2
def __init__(self, in_planes, cardinality=32, bottleneck_width=4, stride=1):
super(Block, self).__init__()
group_width = (cardinality * bottleneck_width)
self.conv1 = nn.Conv2d(in_planes, group_width, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(group_width)
self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn2 = nn.BatchNorm2d(group_width)
self.conv3 = nn.Conv2d(group_width, (self.expansion * group_width), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((self.expansion * group_width))
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * group_width))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * group_width), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * group_width)))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
|
class ResNeXt(nn.Module):
def __init__(self, num_blocks, cardinality, bottleneck_width, feature_dim=128):
super(ResNeXt, self).__init__()
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(num_blocks[0], 1)
self.layer2 = self._make_layer(num_blocks[1], 2)
self.layer3 = self._make_layer(num_blocks[2], 2)
self.reshape = torch.nn.Sequential(nn.Linear(((cardinality * bottleneck_width) * 8), 512, bias=False), nn.BatchNorm1d(512), nn.ReLU(inplace=True), nn.Linear(512, feature_dim, bias=True))
def _make_layer(self, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(Block(self.in_planes, self.cardinality, self.bottleneck_width, stride))
self.in_planes = ((Block.expansion * self.cardinality) * self.bottleneck_width)
self.bottleneck_width *= 2
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), (- 1))
out = self.reshape(out)
return F.normalize(out)
|
def ResNeXt29_2x64d(feature_dim=128):
return ResNeXt(num_blocks=[3, 3, 3], cardinality=2, bottleneck_width=64, feature_dim=feature_dim)
|
def ResNeXt29_4x64d(feature_dim=128):
return ResNeXt(num_blocks=[3, 3, 3], cardinality=4, bottleneck_width=64, feature_dim=feature_dim)
|
def ResNeXt29_8x64d(feature_dim=128):
return ResNeXt(num_blocks=[3, 3, 3], cardinality=8, bottleneck_width=64, feature_dim=feature_dim)
|
def ResNeXt29_32x4d(feature_dim=128):
return ResNeXt(num_blocks=[3, 3, 3], cardinality=32, bottleneck_width=4, feature_dim=feature_dim)
|
class VGG(nn.Module):
def __init__(self, vgg_name='VGG11', feature_dim=128):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 10)
self.reshape = torch.nn.Sequential(nn.Linear(512, 512, bias=False), nn.BatchNorm1d(512), nn.ReLU(inplace=True), nn.Linear(512, feature_dim, bias=True))
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), (- 1))
out = self.reshape(out)
return F.normalize(out)
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if (x == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
|
def VGG11(feature_dim=128):
return VGG('VGG11', feature_dim=feature_dim)
|
class AugmentLoader():
"Dataloader that includes augmentation functionality.\n \n Parameters:\n dataset (torch.data.dataset): trainset or testset PyTorch object\n batch_size (int): the size of each batch, including augmentations\n sampler (str): choice of sampler ('balance' or 'random')\n - 'balance': samples data such that each class has the same number of samples\n - 'random': samples data randomly\n transforms (torchvision.transforms): Transformations applied to each augmentation\n num_aug (int): number of augmentation for each image in a batch\n shuffle (bool): shuffle data\n \n Attributes:\n dataset (torch.data.dataset): trainset or testset PyTorch object\n batch_size (int): the size of each batch, including augmentations\n transforms (torchvision.transforms): Transformations applied to each augmentation\n num_aug (int): number of augmentation for each image in a batch\n shuffle (bool): shuffle data\n size (int): number of samples in dataset\n sample_indices (np.ndarray): indices for sampling\n\n Notes:\n - number of augmetations and batch size are used to calculate the number of original \n images used in a batch\n - if num_aug = 0, then this dataloader is the same as an PyTorch dataloader, with \n the number of original images equal to the batch size, and each image is transformed \n using transforms from object argument.\n - Auygmentloder first samples from the dataset num_img of images, then apply augmentation \n to all images. The first augmentation is always the identity transform. \n\n "
def __init__(self, dataset, batch_size, sampler='random', transforms=transforms.ToTensor(), num_aug=0, shuffle=False):
self.dataset = dataset
self.batch_size = batch_size
self.transforms = transforms
self.sampler = sampler
self.num_aug = num_aug
self.shuffle = shuffle
def __iter__(self):
if (self.sampler == 'balance'):
sampler = BalanceSampler(self.dataset)
num_img = (self.batch_size // self.num_aug)
return _Iter(self, sampler, num_img, self.num_aug)
elif (self.sampler == 'random'):
size = ((len(self.dataset.targets) // self.batch_size) * self.batch_size)
sampler = RandomSampler(self.dataset, size, shuffle=self.shuffle)
num_img = (self.batch_size // self.num_aug)
return _Iter(self, sampler, num_img, self.num_aug)
else:
raise NameError(f'sampler {self.sampler} not found.')
def update_labels(self, targets):
self.dataset.targets = targets
def apply_augments(self, sample):
if (self.num_aug is None):
return self.transforms(sample).unsqueeze(0)
batch_imgs = [transforms.ToTensor()(sample).unsqueeze(0)]
for _ in range((self.num_aug - 1)):
transformed = self.transforms(sample)
batch_imgs.append(transformed.unsqueeze(0))
return torch.cat(batch_imgs, axis=0)
|
class _Iter():
def __init__(self, loader, sampler, num_img, num_aug, size=None):
self.loader = loader
self.sampler = sampler
self.num_img = num_img
self.num_aug = num_aug
self.size = size
def __next__(self):
if self.sampler.stop():
raise StopIteration
batch_imgs = []
batch_lbls = []
batch_idx = []
(sampled_imgs, sampled_lbls) = self.sampler.sample(self.num_img)
for i in range(self.num_img):
img_augments = self.loader.apply_augments(sampled_imgs[i])
batch_imgs.append(img_augments)
batch_lbls.append(np.repeat(sampled_lbls[i], self.num_aug))
batch_idx.append(np.repeat(i, self.num_aug))
batch_imgs = torch.cat(batch_imgs, axis=0).float()
batch_lbls = torch.from_numpy(np.hstack(batch_lbls))
batch_idx = torch.from_numpy(np.hstack(batch_idx))
return (batch_imgs, batch_lbls, batch_idx)
|
class BalanceSampler():
'Samples data such that each class has the same number of samples. Performs sampling \n by first sorting data then unfiormly sample from batch with replacement.'
def __init__(self, dataset):
self.dataset = dataset
self.size = len(self.dataset.targets)
self.num_classes = (np.max(self.dataset.targets) + 1)
self.num_sampled = 0
self.sort()
def sort(self):
sorted_data = [[] for _ in range(self.num_classes)]
for (i, lbl) in enumerate(self.dataset.targets):
sorted_data[lbl].append(self.dataset[i][0])
self.sorted_data = sorted_data
self.sorted_labels = [np.repeat(i, len(sorted_data[i])) for i in range(self.num_classes)]
def sample(self, num_imgs):
num_imgs_per_class = (num_imgs // self.num_classes)
assert ((num_imgs_per_class * self.num_classes) == num_imgs), 'cannot sample uniformly'
(batch_imgs, batch_lbls) = ([], [])
for c in range(self.num_classes):
(img_c, lbl_c) = (self.sorted_data[c], self.sorted_labels[c])
sample_indices = np.random.choice(len(img_c), num_imgs_per_class)
for i in sample_indices:
batch_imgs.append(img_c[i])
batch_lbls.append(lbl_c[i])
self.increment_step(num_imgs)
return (batch_imgs, batch_lbls)
def increment_step(self, num_imgs):
self.num_sampled += num_imgs
def stop(self):
if (self.num_sampled < self.size):
return False
return True
|
class RandomSampler():
'Samples data randomly. Sampler initializes sample indices when Sampler is instantiated.\n Sample indices are shuffled if shuffle option is True. Performs sampling by popping off \n first index each time.'
def __init__(self, dataset, size, shuffle=False):
self.dataset = dataset
self.size = size
self.shuffle = shuffle
self.num_sampled = 0
self.sample_indices = self.reset_index()
def reset_index(self):
if self.shuffle:
return np.random.choice(len(self.dataset.targets), self.size, replace=False).tolist()
else:
return np.arange(self.size).tolist()
def sample(self, num_img):
indices = [self.sample_indices.pop(0) for _ in range(num_img)]
(batch_imgs, batch_lbls) = ([], [])
for i in indices:
(img, lbl) = self.dataset[i]
batch_imgs.append(img)
batch_lbls.append(lbl)
self.increment_step(num_img)
return (batch_imgs, batch_lbls)
def increment_step(self, num_img):
self.num_sampled += num_img
def stop(self):
if (self.num_sampled < self.size):
return False
return True
|
def svm(args, train_features, train_labels, test_features, test_labels):
svm = LinearSVC(verbose=0, random_state=10)
svm.fit(train_features, train_labels)
acc_train = svm.score(train_features, train_labels)
acc_test = svm.score(test_features, test_labels)
print('SVM: {}'.format(acc_test))
return (acc_train, acc_test)
|
def knn(args, train_features, train_labels, test_features, test_labels):
'Perform k-Nearest Neighbor classification using cosine similaristy as metric.\n\n Options:\n k (int): top k features for kNN\n \n '
sim_mat = (train_features @ test_features.T)
topk = sim_mat.topk(k=args.k, dim=0)
topk_pred = train_labels[topk.indices]
test_pred = topk_pred.mode(0).values.detach()
acc = utils.compute_accuracy(test_pred.numpy(), test_labels.numpy())
print('kNN: {}'.format(acc))
return acc
|
def nearsub(args, train_features, train_labels, test_features, test_labels):
'Perform nearest subspace classification.\n \n Options:\n n_comp (int): number of components for PCA or SVD\n \n '
scores_pca = []
scores_svd = []
num_classes = (train_labels.numpy().max() + 1)
(features_sort, _) = utils.sort_dataset(train_features.numpy(), train_labels.numpy(), num_classes=num_classes, stack=False)
fd = features_sort[0].shape[1]
for j in range(num_classes):
pca = PCA(n_components=args.n_comp).fit(features_sort[j])
pca_subspace = pca.components_.T
mean = np.mean(features_sort[j], axis=0)
pca_j = ((np.eye(fd) - (pca_subspace @ pca_subspace.T)) @ (test_features.numpy() - mean).T)
score_pca_j = np.linalg.norm(pca_j, ord=2, axis=0)
svd = TruncatedSVD(n_components=args.n_comp).fit(features_sort[j])
svd_subspace = svd.components_.T
svd_j = ((np.eye(fd) - (svd_subspace @ svd_subspace.T)) @ test_features.numpy().T)
score_svd_j = np.linalg.norm(svd_j, ord=2, axis=0)
scores_pca.append(score_pca_j)
scores_svd.append(score_svd_j)
test_predict_pca = np.argmin(scores_pca, axis=0)
test_predict_svd = np.argmin(scores_svd, axis=0)
acc_pca = utils.compute_accuracy(test_predict_pca, test_labels.numpy())
acc_svd = utils.compute_accuracy(test_predict_svd, test_labels.numpy())
print('PCA: {}'.format(acc_pca))
print('SVD: {}'.format(acc_svd))
return acc_svd
|
def kmeans(args, train_features, train_labels):
'Perform KMeans clustering. \n \n Options:\n n (int): number of clusters used in KMeans.\n\n '
return cluster.kmeans(args, train_features, train_labels)
|
def ensc(args, train_features, train_labels):
'Perform Elastic Net Subspace Clustering.\n \n Options:\n gam (float): gamma parameter in EnSC\n tau (float): tau parameter in EnSC\n\n '
return cluster.ensc(args, train_features, train_labels)
|
def make_tarfile(output_filename, source_dir):
with tarfile.open(output_filename, 'w:gz') as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
|
def gen_testloss(args):
params = utils.load_params(args.model_dir)
ckpt_dir = os.path.join(args.model_dir, 'checkpoints')
ckpt_paths = [int(e[11:(- 3)]) for e in os.listdir(ckpt_dir) if (e[(- 3):] == '.pt')]
ckpt_paths = np.sort(ckpt_paths)
headers = ['epoch', 'step', 'loss', 'discrimn_loss_e', 'compress_loss_e', 'discrimn_loss_t', 'compress_loss_t']
csv_path = utils.create_csv(args.model_dir, 'losses_test.csv', headers)
print('writing to:', csv_path)
test_transforms = tf.load_transforms('test')
testset = tf.load_trainset(params['data'], test_transforms, train=False)
testloader = DataLoader(testset, batch_size=params['bs'], shuffle=False, num_workers=4)
criterion = MaximalCodingRateReduction(gam1=params['gam1'], gam2=params['gam2'], eps=params['eps'])
for (epoch, ckpt_path) in enumerate(ckpt_paths):
(net, epoch) = tf.load_checkpoint(args.model_dir, epoch=epoch, eval_=True)
for (step, (batch_imgs, batch_lbls)) in enumerate(testloader):
features = net(batch_imgs.cuda())
(loss, loss_empi, loss_theo) = criterion(features, batch_lbls, num_classes=len(testset.num_classes))
utils.save_state(args.model_dir, epoch, step, loss.item(), *loss_empi, *loss_theo, filename='losses_test.csv')
print('Finished generating test loss.')
|
def gen_training_accuracy(args):
params = utils.load_params(args.model_dir)
ckpt_dir = os.path.join(args.model_dir, 'checkpoints')
ckpt_paths = [int(e[11:(- 3)]) for e in os.listdir(ckpt_dir) if (e[(- 3):] == '.pt')]
ckpt_paths = np.sort(ckpt_paths)
headers = ['epoch', 'acc_train', 'acc_test']
csv_path = utils.create_csv(args.model_dir, 'accuracy.csv', headers)
for (epoch, ckpt_paths) in enumerate(ckpt_paths):
if ((epoch % 5) != 0):
continue
(net, epoch) = tf.load_checkpoint(args.model_dir, epoch=epoch, eval_=True)
train_transforms = tf.load_transforms('test')
trainset = tf.load_trainset(params['data'], train_transforms, train=True)
trainloader = DataLoader(trainset, batch_size=500, num_workers=4)
(train_features, train_labels) = tf.get_features(net, trainloader, verbose=False)
test_transforms = tf.load_transforms('test')
testset = tf.load_trainset(params['data'], test_transforms, train=False)
testloader = DataLoader(testset, batch_size=500, num_workers=4)
(test_features, test_labels) = tf.get_features(net, testloader, verbose=False)
(acc_train, acc_test) = svm(args, train_features, train_labels, test_features, test_labels)
utils.save_state(args.model_dir, epoch, acc_train, acc_test, filename='accuracy.csv')
print('Finished generating accuracy.')
|
class MaximalCodingRateReduction(torch.nn.Module):
def __init__(self, gam1=1.0, gam2=1.0, eps=0.01):
super(MaximalCodingRateReduction, self).__init__()
self.gam1 = gam1
self.gam2 = gam2
self.eps = eps
def compute_discrimn_loss_empirical(self, W):
'Empirical Discriminative Loss.'
(p, m) = W.shape
I = torch.eye(p).cuda()
scalar = (p / (m * self.eps))
logdet = torch.logdet((I + ((self.gam1 * scalar) * W.matmul(W.T))))
return (logdet / 2.0)
def compute_compress_loss_empirical(self, W, Pi):
'Empirical Compressive Loss.'
(p, m) = W.shape
(k, _, _) = Pi.shape
I = torch.eye(p).cuda()
compress_loss = 0.0
for j in range(k):
trPi = (torch.trace(Pi[j]) + 1e-08)
scalar = (p / (trPi * self.eps))
log_det = torch.logdet((I + (scalar * W.matmul(Pi[j]).matmul(W.T))))
compress_loss += ((log_det * trPi) / m)
return (compress_loss / 2.0)
def compute_discrimn_loss_theoretical(self, W):
'Theoretical Discriminative Loss.'
(p, m) = W.shape
I = torch.eye(p).cuda()
scalar = (p / (m * self.eps))
logdet = torch.logdet((I + (scalar * W.matmul(W.T))))
return (logdet / 2.0)
def compute_compress_loss_theoretical(self, W, Pi):
'Theoretical Compressive Loss.'
(p, m) = W.shape
(k, _, _) = Pi.shape
I = torch.eye(p).cuda()
compress_loss = 0.0
for j in range(k):
trPi = (torch.trace(Pi[j]) + 1e-08)
scalar = (p / (trPi * self.eps))
log_det = torch.logdet((I + (scalar * W.matmul(Pi[j]).matmul(W.T))))
compress_loss += ((trPi / (2 * m)) * log_det)
return compress_loss
def forward(self, X, Y, num_classes=None):
if (num_classes is None):
num_classes = (Y.max() + 1)
W = X.T
Pi = tf.label_to_membership(Y.numpy(), num_classes)
Pi = torch.tensor(Pi, dtype=torch.float32).cuda()
discrimn_loss_empi = self.compute_discrimn_loss_empirical(W)
compress_loss_empi = self.compute_compress_loss_empirical(W, Pi)
discrimn_loss_theo = self.compute_discrimn_loss_theoretical(W)
compress_loss_theo = self.compute_compress_loss_theoretical(W, Pi)
total_loss_empi = ((self.gam2 * (- discrimn_loss_empi)) + compress_loss_empi)
return (total_loss_empi, [discrimn_loss_empi.item(), compress_loss_empi.item()], [discrimn_loss_theo.item(), compress_loss_theo.item()])
|
def sort_dataset(data, labels, num_classes=10, stack=False):
'Sort dataset based on classes.\n \n Parameters:\n data (np.ndarray): data array\n labels (np.ndarray): one dimensional array of class labels\n num_classes (int): number of classes\n stack (bol): combine sorted data into one numpy array\n \n Return:\n sorted data (np.ndarray), sorted_labels (np.ndarray)\n\n '
sorted_data = [[] for _ in range(num_classes)]
for (i, lbl) in enumerate(labels):
sorted_data[lbl].append(data[i])
sorted_data = [np.stack(class_data) for class_data in sorted_data]
sorted_labels = [np.repeat(i, len(sorted_data[i])) for i in range(num_classes)]
if stack:
sorted_data = np.vstack(sorted_data)
sorted_labels = np.hstack(sorted_labels)
return (sorted_data, sorted_labels)
|
def init_pipeline(model_dir, headers=None):
'Initialize folder and .csv logger.'
os.makedirs(model_dir)
os.makedirs(os.path.join(model_dir, 'checkpoints'))
os.makedirs(os.path.join(model_dir, 'figures'))
os.makedirs(os.path.join(model_dir, 'plabels'))
if (headers is None):
headers = ['epoch', 'step', 'loss', 'discrimn_loss_e', 'compress_loss_e', 'discrimn_loss_t', 'compress_loss_t']
create_csv(model_dir, 'losses.csv', headers)
print('project dir: {}'.format(model_dir))
|
def create_csv(model_dir, filename, headers):
'Create .csv file with filename in model_dir, with headers as the first line \n of the csv. '
csv_path = os.path.join(model_dir, filename)
if os.path.exists(csv_path):
os.remove(csv_path)
with open(csv_path, 'w+') as f:
f.write(','.join(map(str, headers)))
return csv_path
|
def save_params(model_dir, params):
'Save params to a .json file. Params is a dictionary of parameters.'
path = os.path.join(model_dir, 'params.json')
with open(path, 'w') as f:
json.dump(params, f, indent=2, sort_keys=True)
|
def update_params(model_dir, pretrain_dir):
'Updates architecture and feature dimension from pretrain directory \n to new directoy. '
params = load_params(model_dir)
old_params = load_params(pretrain_dir)
params['arch'] = old_params['arch']
params['fd'] = old_params['fd']
save_params(model_dir, params)
|
def load_params(model_dir):
'Load params.json file in model directory and return dictionary.'
_path = os.path.join(model_dir, 'params.json')
with open(_path, 'r') as f:
_dict = json.load(f)
return _dict
|
def save_state(model_dir, *entries, filename='losses.csv'):
'Save entries to csv. Entries is list of numbers. '
csv_path = os.path.join(model_dir, filename)
assert os.path.exists(csv_path), 'CSV file is missing in project directory.'
with open(csv_path, 'a') as f:
f.write(('\n' + ','.join(map(str, entries))))
|
def save_ckpt(model_dir, net, epoch):
'Save PyTorch checkpoint to ./checkpoints/ directory in model directory. '
torch.save(net.state_dict(), os.path.join(model_dir, 'checkpoints', 'model-epoch{}.pt'.format(epoch)))
|
def save_labels(model_dir, labels, epoch):
'Save labels of a certain epoch to directory. '
path = os.path.join(model_dir, 'plabels', f'epoch{epoch}.npy')
np.save(path, labels)
|
def compute_accuracy(y_pred, y_true):
'Compute accuracy by counting correct classification. '
assert (y_pred.shape == y_true.shape)
return (1 - (np.count_nonzero((y_pred - y_true)) / y_true.size))
|
def clustering_accuracy(labels_true, labels_pred):
'Compute clustering accuracy.'
from sklearn.metrics.cluster import supervised
from scipy.optimize import linear_sum_assignment
(labels_true, labels_pred) = supervised.check_clusterings(labels_true, labels_pred)
value = supervised.contingency_matrix(labels_true, labels_pred)
[r, c] = linear_sum_assignment((- value))
return (value[(r, c)].sum() / len(labels_true))
|
def svm(train_features, train_labels, test_features, test_labels):
svm = LinearSVC(verbose=0, random_state=10)
svm.fit(train_features, train_labels)
acc_train = svm.score(train_features, train_labels)
acc_test = svm.score(test_features, test_labels)
print('SVM: {}'.format(acc_test))
return (acc_train, acc_test)
|
def knn(train_features, train_labels, test_features, test_labels, k=5):
'Perform k-Nearest Neighbor classification using cosine similaristy as metric.\n Options:\n k (int): top k features for kNN\n \n '
sim_mat = (train_features @ test_features.T)
topk = torch.from_numpy(sim_mat).topk(k=k, dim=0)
topk_pred = train_labels[topk.indices]
test_pred = torch.tensor(topk_pred).mode(0).values.detach()
acc = compute_accuracy(test_pred.numpy(), test_labels)
print('kNN: {}'.format(acc))
return acc
|
def nearsub(train_features, train_labels, test_features, test_labels, n_comp=10):
'Perform nearest subspace classification.\n \n Options:\n n_comp (int): number of components for PCA or SVD\n \n '
scores_svd = []
classes = np.unique(test_labels)
(features_sort, _) = utils.sort_dataset(train_features, train_labels, classes=classes, stack=False)
fd = features_sort[0].shape[1]
if (n_comp >= fd):
n_comp = (fd - 1)
for j in np.arange(len(classes)):
svd = TruncatedSVD(n_components=n_comp).fit(features_sort[j])
svd_subspace = svd.components_.T
svd_j = ((np.eye(fd) - (svd_subspace @ svd_subspace.T)) @ test_features.T)
score_svd_j = np.linalg.norm(svd_j, ord=2, axis=0)
scores_svd.append(score_svd_j)
test_predict_svd = np.argmin(scores_svd, axis=0)
acc_svd = compute_accuracy(classes[test_predict_svd], test_labels)
print('SVD: {}'.format(acc_svd))
return acc_svd
|
def nearsub_pca(train_features, train_labels, test_features, test_labels, n_comp=10):
'Perform nearest subspace classification.\n \n Options:\n n_comp (int): number of components for PCA or SVD\n \n '
scores_pca = []
classes = np.unique(test_labels)
(features_sort, _) = utils.sort_dataset(train_features, train_labels, classes=classes, stack=False)
fd = features_sort[0].shape[1]
if (n_comp >= fd):
n_comp = (fd - 1)
for j in np.arange(len(classes)):
pca = PCA(n_components=n_comp).fit(features_sort[j])
pca_subspace = pca.components_.T
mean = np.mean(features_sort[j], axis=0)
pca_j = ((np.eye(fd) - (pca_subspace @ pca_subspace.T)) @ (test_features - mean).T)
score_pca_j = np.linalg.norm(pca_j, ord=2, axis=0)
scores_pca.append(score_pca_j)
test_predict_pca = np.argmin(scores_pca, axis=0)
acc_pca = compute_accuracy(classes[test_predict_pca], test_labels)
print('PCA: {}'.format(acc_pca))
return acc_svd
|
def compute_accuracy(y_pred, y_true):
'Compute accuracy by counting correct classification. '
assert (y_pred.shape == y_true.shape)
return (1 - (np.count_nonzero((y_pred - y_true)) / y_true.size))
|
def baseline(train_features, train_labels, test_features, test_labels):
test_models = {'log_l2': SGDClassifier(loss='log', max_iter=10000, random_state=42), 'SVM_linear': LinearSVC(max_iter=10000, random_state=42), 'SVM_RBF': SVC(kernel='rbf', random_state=42), 'DecisionTree': DecisionTreeClassifier(), 'RandomForrest': RandomForestClassifier()}
for model_name in test_models:
test_model = test_models[model_name]
test_model.fit(train_features, train_labels)
score = test_model.score(test_features, test_labels)
print(f'{model_name}: {score}')
|
class Architecture():
def __init__(self, blocks, model_dir, num_classes, batch_size=100):
self.blocks = blocks
self.model_dir = model_dir
self.num_classes = num_classes
self.batch_size = batch_size
def __call__(self, Z, y=None):
for (b, block) in enumerate(self.blocks):
block.load_arch(self, b)
self.init_loss()
Z = block.preprocess(Z)
Z = block(Z, y)
Z = block.postprocess(Z)
return Z
def __getitem__(self, i):
return self.blocks[i]
def init_loss(self):
self.loss_dict = {'loss_total': [], 'loss_expd': [], 'loss_comp': []}
def update_loss(self, layer, loss_total, loss_expd, loss_comp):
self.loss_dict['loss_total'].append(loss_total)
self.loss_dict['loss_expd'].append(loss_expd)
self.loss_dict['loss_comp'].append(loss_comp)
print(f'layer: {layer} | loss_total: {loss_total:5f} | loss_expd: {loss_expd:5f} | loss_comp: {loss_comp:5f}')
|
class Lift():
def __init__(self, kernels, stride=1, relu=True):
self.kernels = kernels
self.stride = stride
self.relu = relu
def load_arch(self, arch, block_id):
pass
def init(self, Z, y):
return self(Z)
def init_zero(self, Z):
return Z
def preprocess(self, X):
return X
def postprocess(self, X):
return X
|
class Lift1D(Lift):
def __init__(self, kernels, stride=1, relu=True):
assert (len(kernels.shape) == 3), 'kernel should have dimensions (out_channel, in_channel, kernel_size)'
super(Lift1D, self).__init__(kernels, stride, relu)
def __call__(self, Z, y=None, sgd=False):
ksize = self.kernels.shape[2]
_Z = F.pad(torch.tensor(Z).float(), (0, (ksize - 1)), 'circular')
_kernels = torch.tensor(self.kernels).float()
out = F.conv1d(_Z, _kernels, stride=self.stride)
if self.relu:
out = F.relu(out)
return out.numpy()
|
class Lift2D(Lift):
def __init__(self, kernels, stride=1, relu=True):
assert (len(kernels.shape) == 4), 'kernel should have dimensions (out_channel, in_channel, kernel_height, kernel_width)'
super(Lift2D, self).__init__(kernels, stride, relu)
def __call__(self, Z, y=None, sgd=False):
ksize = self.kernels.shape[2]
_Z = F.pad(torch.tensor(Z).float(), (0, (ksize - 1), 0, (ksize - 1)), 'circular')
_kernels = torch.tensor(self.kernels).float()
out = F.conv2d(_Z, _kernels, stride=self.stride)
if self.relu:
out = F.relu(out)
return out.numpy()
|
def sort_dataset(data, labels, classes, stack=False):
'Sort dataset based on classes.\n \n Parameters:\n data (np.ndarray): data array\n labels (np.ndarray): one dimensional array of class labels\n classes (int): number of classes\n stack (bol): combine sorted data into one numpy array\n \n Return:\n sorted data (np.ndarray), sorted_labels (np.ndarray)\n\n '
if (type(classes) == int):
classes = np.arange(classes)
sorted_data = []
sorted_labels = []
for c in classes:
idx = (labels == c)
data_c = data[idx]
labels_c = labels[idx]
sorted_data.append(data_c)
sorted_labels.append(labels_c)
if stack:
sorted_data = np.vstack(sorted_data)
sorted_labels = np.hstack(sorted_labels)
return (sorted_data, sorted_labels)
|
def save_params(model_dir, params, name='params.json'):
'Save params to a .json file. Params is a dictionary of parameters.'
path = os.path.join(model_dir, name)
with open(path, 'w') as f:
json.dump(params, f, indent=2, sort_keys=True)
|
def load_params(model_dir):
'Load params.json file in model directory and return dictionary.'
_path = os.path.join(model_dir, 'params.json')
with open(_path, 'r') as f:
_dict = json.load(f)
return _dict
|
def create_csv(model_dir, filename, headers):
'Create .csv file with filename in model_dir, with headers as the first line \n of the csv. '
csv_path = os.path.join(model_dir, filename)
if os.path.exists(csv_path):
os.remove(csv_path)
with open(csv_path, 'w+') as f:
f.write(','.join(map(str, headers)))
return csv_path
|
def save_loss(loss_dict, model_dir, name):
save_dir = os.path.join(model_dir, 'loss')
os.makedirs(save_dir, exist_ok=True)
file_path = os.path.join(save_dir, '{}.csv'.format(name))
pd.DataFrame(loss_dict).to_csv(file_path)
|
def save_features(model_dir, name, features, labels, layer=None):
save_dir = os.path.join(model_dir, 'features')
os.makedirs(save_dir, exist_ok=True)
np.save(os.path.join(save_dir, f'{name}_features.npy'), features)
np.save(os.path.join(save_dir, f'{name}_labels.npy'), labels)
|
def flatten(layers, num_classes):
net = ReduNet(*[Vector(eta=0.5, eps=0.1, lmbda=500, num_classes=num_classes, dimensions=784) for _ in range(layers)])
return net
|
def lift2d(channels, layers, num_classes, seed=0):
net = ReduNet(Lift2D(1, channels, 9, seed=seed), *[Fourier2D(eta=0.5, eps=0.1, lmbda=500, num_classes=num_classes, dimensions=(channels, 28, 28)) for _ in range(layers)])
return net
|
def mnist2d_10class(data_dir):
transform = transforms.Compose([transforms.ToTensor()])
trainset = datasets.MNIST(data_dir, train=True, transform=transform, download=True)
testset = datasets.MNIST(data_dir, train=False, transform=transform, download=True)
num_classes = 10
return (trainset, testset, num_classes)
|
def mnist2d_5class(data_dir):
transform = transforms.Compose([transforms.ToTensor()])
trainset = datasets.MNIST(data_dir, train=True, transform=transform, download=True)
testset = datasets.MNIST(data_dir, train=False, transform=transform, download=True)
(trainset, num_classes) = filter_class(trainset, [0, 1, 2, 3, 4])
(testset, _) = filter_class(testset, [0, 1, 2, 3, 4])
num_classes = 5
return (trainset, testset, num_classes)
|
def mnist2d_2class(data_dir):
transform = transforms.Compose([transforms.ToTensor()])
trainset = datasets.MNIST(data_dir, train=True, transform=transform, download=True)
testset = datasets.MNIST(data_dir, train=False, transform=transform, download=True)
(trainset, num_classes) = filter_class(trainset, [0, 1])
(testset, _) = filter_class(testset, [0, 1])
return (trainset, testset, num_classes)
|
def mnistvector_10class(data_dir):
transform = transforms.Compose([transforms.ToTensor(), transforms.Lambda((lambda x: x.flatten()))])
trainset = datasets.MNIST(data_dir, train=True, transform=transform, download=True)
testset = datasets.MNIST(data_dir, train=False, transform=transform, download=True)
num_classes = 10
return (trainset, testset, num_classes)
|
def mnistvector_5class(data_dir):
transform = transforms.Compose([transforms.ToTensor(), transforms.Lambda((lambda x: x.flatten()))])
trainset = datasets.MNIST(data_dir, train=True, transform=transform, download=True)
testset = datasets.MNIST(data_dir, train=False, transform=transform, download=True)
(trainset, num_classes) = filter_class(trainset, [0, 1, 2, 3, 4])
(testset, _) = filter_class(testset, [0, 1, 2, 3, 4])
return (trainset, testset, num_classes)
|
def mnistvector_2class(data_dir):
transform = transforms.Compose([transforms.ToTensor(), transforms.Lambda((lambda x: x.flatten()))])
trainset = datasets.MNIST(data_dir, train=True, transform=transform, download=True)
testset = datasets.MNIST(data_dir, train=False, transform=transform, download=True)
(trainset, num_classes) = filter_class(trainset, [0, 1])
(testset, _) = filter_class(testset, [0, 1])
return (trainset, testset, num_classes)
|
def filter_class(dataset, classes):
(data, labels) = (dataset.data, dataset.targets)
if (type(labels) == list):
labels = torch.tensor(labels)
data_filter = []
labels_filter = []
for _class in classes:
idx = (labels == _class)
data_filter.append(data[idx])
labels_filter.append(labels[idx])
if (type(dataset.data) == np.ndarray):
dataset.data = np.vstack(data_filter)
dataset.targets = np.hstack(labels_filter)
elif (type(dataset.data) == torch.Tensor):
dataset.data = torch.cat(data_filter)
dataset.targets = torch.cat(labels_filter)
else:
raise TypeError('dataset.data type neither np.ndarray nor torch.Tensor')
return (dataset, len(classes))
|
def load_architecture(data, arch, seed=0):
if (data == 'mnist2d'):
if (arch == 'lift2d_channels35_layers5'):
from architectures.mnist.lift2d import lift2d
return lift2d(channels=35, layers=5, num_classes=10, seed=seed)
if (arch == 'lift2d_channels35_layers10'):
from architectures.mnist.lift2d import lift2d
return lift2d(channels=35, layers=5, num_classes=10, seed=seed)
if (arch == 'lift2d_channels35_layers20'):
from architectures.mnist.lift2d import lift2d
return lift2d(channels=35, layers=20, num_classes=10, seed=seed)
if (arch == 'lift2d_channels55_layers5'):
from architectures.mnist.lift2d import lift2d
return lift2d(channels=55, layers=5, num_classes=10, seed=seed)
if (arch == 'lift2d_channels55_layers10'):
from architectures.mnist.lift2d import lift2d
return lift2d(channels=55, layers=5, num_classes=10, seed=seed)
if (arch == 'lift2d_channels55_layers20'):
from architectures.mnist.lift2d import lift2d
return lift2d(channels=55, layers=20, num_classes=10, seed=seed)
if (data == 'mnist2d+2class'):
if (arch == 'lift2d_channels35_layers5'):
from architectures.mnist.lift2d import lift2d
return lift2d(channels=35, layers=5, num_classes=2, seed=seed)
if (arch == 'lift2d_channels35_layers10'):
from architectures.mnist.lift2d import lift2d
return lift2d(channels=35, layers=5, num_classes=2, seed=seed)
if (arch == 'lift2d_channels35_layers20'):
from architectures.mnist.lift2d import lift2d
return lift2d(channels=35, layers=20, num_classes=2, seed=seed)
if (arch == 'lift2d_channels55_layers5'):
from architectures.mnist.lift2d import lift2d
return lift2d(channels=55, layers=5, num_classes=2, seed=seed)
if (arch == 'lift2d_channels55_layers10'):
from architectures.mnist.lift2d import lift2d
return lift2d(channels=55, layers=5, num_classes=2, seed=seed)
if (arch == 'lift2d_channels55_layers20'):
from architectures.mnist.lift2d import lift2d
return lift2d(channels=55, layers=20, num_classes=2, seed=seed)
if (data == 'mnistvector'):
if (arch == 'layers50'):
from architectures.mnist.flatten import flatten
return flatten(layers=50, num_classes=10)
if (arch == 'layers20'):
from architectures.mnist.flatten import flatten
return flatten(layers=20, num_classes=10)
if (arch == 'layers10'):
from architectures.mnist.flatten import flatten
return flatten(layers=10, num_classes=10)
if (arch == 'layers5'):
from architectures.mnist.flatten import flatten
return flatten(layers=5, num_classes=10)
if (data == 'mnistvector_2class'):
if (arch == 'layers50'):
from architectures.mnist.flatten import flatten
return flatten(layers=50, num_classes=2)
if (arch == 'layers20'):
from architectures.mnist.flatten import flatten
return flatten(layers=20, num_classes=2)
if (arch == 'layers10'):
from architectures.mnist.flatten import flatten
return flatten(layers=10, num_classes=2)
if (arch == 'layers5'):
from architectures.mnist.flatten import flatten
return flatten(layers=5, num_classes=2)
raise NameError('Cannot find architecture: {}.')
|
def load_dataset(choice, data_dir='./data/'):
if (choice == 'mnist2d'):
from datasets.mnist import mnist2d_10class
return mnist2d_10class(data_dir)
if (choice == 'mnist2d_2class'):
from datasets.mnist import mnist2d_2class
return mnist2d_2class(data_dir)
if (choice == 'mnistvector'):
from datasets.mnist import mnistvector_10class
return mnistvector_10class(data_dir)
raise NameError(f'Dataset {choice} not found.')
|
def plot_loss_mcr(model_dir, name):
file_dir = os.path.join(model_dir, 'loss', f'{name}.csv')
data = pd.read_csv(file_dir)
loss_total = data['loss_total'].ravel()
loss_expd = data['loss_expd'].ravel()
loss_comp = data['loss_comp'].ravel()
num_iter = np.arange(len(loss_total))
(fig, ax) = plt.subplots(1, 1, figsize=(7, 5), sharey=True, sharex=True)
ax.plot(num_iter, loss_total, label='$\\Delta R$', color='green', linewidth=1.0, alpha=0.8)
ax.plot(num_iter, loss_expd, label='$R$', color='royalblue', linewidth=1.0, alpha=0.8)
ax.plot(num_iter, loss_comp, label='$R^c$', color='coral', linewidth=1.0, alpha=0.8)
ax.set_ylabel('Loss', fontsize=10)
ax.set_xlabel('Number of iterations', fontsize=10)
ax.legend(loc='lower right', prop={'size': 15}, ncol=3, framealpha=0.5)
fig.tight_layout()
loss_dir = os.path.join(model_dir, 'figures', 'loss_mcr')
os.makedirs(loss_dir, exist_ok=True)
file_name = os.path.join(loss_dir, f'{name}.png')
plt.savefig(file_name, dpi=400)
plt.close()
print('Plot saved to: {}'.format(file_name))
|
def plot_loss(model_dir):
'Plot cross entropy loss. '
file_dir = os.path.join(model_dir, 'losses.csv')
data = pd.read_csv(file_dir)
epochs = data['epoch'].ravel()
loss = data['loss'].ravel()
(fig, ax) = plt.subplots(1, 1, figsize=(7, 5), sharey=True, sharex=True, dpi=400)
ax.plot(epochs, loss, color='green', linewidth=1.0, alpha=0.8)
ax.set_ylabel('Loss', fontsize=10)
ax.set_xlabel('Number of iterations', fontsize=10)
ax.legend(loc='lower right', prop={'size': 15}, ncol=3, framealpha=0.5)
ax.set_title('Loss')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
loss_dir = os.path.join(model_dir, 'figures', 'loss')
os.makedirs(loss_dir, exist_ok=True)
file_name = os.path.join(loss_dir, 'loss.png')
plt.savefig(file_name, dpi=400)
print('Plot saved to: {}'.format(file_name))
file_name = os.path.join(loss_dir, 'loss.pdf')
plt.savefig(file_name, dpi=400)
plt.close()
print('Plot saved to: {}'.format(file_name))
|
def plot_csv(model_dir, filename):
df = pd.read_csv(os.path.join(model_dir, f'{filename}.csv'))
colnames = df.columns
(fig, ax) = plt.subplots(1, 1, figsize=(7, 5))
for colname in colnames[1:]:
ax.plot(df[colnames[0]], df[colname], marker='x', label=colname)
ax.set_xlabel(colnames[0])
ax.set_ylabel(filename)
ax.legend()
csv_dir = os.path.join(model_dir, 'figures', 'csv')
os.makedirs(csv_dir, exist_ok=True)
savepath = os.path.join(csv_dir, f'{filename}.png')
fig.savefig(savepath)
print('Plot saved to: {}'.format(savepath))
|
def plot_loss_ce(model_dir, filename='loss_ce'):
df = pd.read_csv(os.path.join(model_dir, f'{filename}.csv'))
colnames = df.columns
(fig, ax) = plt.subplots(1, 1, figsize=(7, 5))
for colname in colnames[1:]:
ax.plot(np.arange(df.shape[0]), df['loss_ce'], label=colname)
ax.set_xlabel(colnames[0])
ax.set_ylabel(filename)
ax.legend()
csv_dir = os.path.join(model_dir, 'figures', 'loss_ce')
os.makedirs(csv_dir, exist_ok=True)
savepath = os.path.join(csv_dir, f'loss_ce.png')
fig.savefig(savepath)
print('Plot saved to: {}'.format(savepath))
|
def plot_acc(model_dir):
'Plot training and testing accuracy'
file_dir = os.path.join(model_dir, 'acc.csv')
data = pd.read_csv(file_dir)
epochs = data['epoch'].ravel()
acc_train = data['acc_train'].ravel()
acc_test = data['acc_test'].ravel()
(fig, ax) = plt.subplots(1, 1, figsize=(7, 5), sharey=True, sharex=True, dpi=400)
ax.plot(epochs, acc_train, label='train', color='green', alpha=0.8)
ax.plot(epochs, acc_test, label='test', color='red', alpha=0.8)
ax.set_ylabel('Accuracy', fontsize=10)
ax.set_xlabel('Epoch', fontsize=10)
ax.legend(loc='lower right', prop={'size': 15}, ncol=3, framealpha=0.5)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.tight_layout()
acc_dir = os.path.join(model_dir, 'figures', 'acc')
os.makedirs(acc_dir, exist_ok=True)
file_name = os.path.join(acc_dir, 'accuracy.png')
plt.savefig(file_name, dpi=400)
print('Plot saved to: {}'.format(file_name))
file_name = os.path.join(acc_dir, 'accuracy.pdf')
plt.savefig(file_name, dpi=400)
plt.close()
print('Plot saved to: {}'.format(file_name))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.