code stringlengths 17 6.64M |
|---|
def plot_heatmap(model_dir, name, features, labels, num_classes):
'Plot heatmap of cosine simliarity for all features. '
(features_sort, _) = utils.sort_dataset(features, labels, classes=num_classes, stack=False)
features_sort_ = np.vstack(features_sort)
sim_mat = np.abs((features_sort_ @ features_sort_.T))
(fig, ax) = plt.subplots(figsize=(7, 5), sharey=True, sharex=True)
im = ax.imshow(sim_mat, cmap='Blues')
fig.colorbar(im, pad=0.02, drawedges=0, ticks=[0, 0.5, 1])
ax.set_xticks(np.linspace(0, len(labels), (num_classes + 1)))
ax.set_yticks(np.linspace(0, len(labels), (num_classes + 1)))
[tick.label.set_fontsize(10) for tick in ax.xaxis.get_major_ticks()]
[tick.label.set_fontsize(10) for tick in ax.yaxis.get_major_ticks()]
fig.tight_layout()
save_dir = os.path.join(model_dir, 'figures', 'heatmaps')
os.makedirs(save_dir, exist_ok=True)
file_name = os.path.join(save_dir, f'{name}.png')
fig.savefig(file_name)
print('Plot saved to: {}'.format(file_name))
plt.close()
|
def plot_transform(model_dir, inputs, outputs, name):
(fig, ax) = plt.subplots(ncols=2)
inputs = inputs.permute(1, 2, 0)
outputs = outputs.permute(1, 2, 0)
outputs = ((outputs - outputs.min()) / (outputs.max() - outputs.min()))
ax[0].imshow(inputs)
ax[0].set_title('inputs')
ax[1].imshow(outputs)
ax[1].set_title('outputs')
save_dir = os.path.join(model_dir, 'figures', 'images')
os.makedirs(save_dir, exist_ok=True)
file_name = os.path.join(save_dir, f'{name}.png')
fig.savefig(file_name)
print('Plot saved to: {}'.format(file_name))
plt.close()
|
def plot_channel_image(model_dir, features, name):
def normalize(x):
out = (x - x.min())
out = (out / (out.max() - out.min()))
return out
(fig, ax) = plt.subplots()
ax.imshow(normalize(features), cmap='gray')
save_dir = os.path.join(model_dir, 'figures', 'images')
os.makedirs(save_dir, exist_ok=True)
file_name = os.path.join(save_dir, f'{name}.png')
fig.savefig(file_name)
print('Plot saved to: {}'.format(file_name))
plt.close()
|
def plot_nearest_image(model_dir, image, nearest_images, values, name, grid_size=(4, 4)):
(fig, ax) = plt.subplots(*grid_size, figsize=(10, 10))
idx = 1
for i in range(grid_size[0]):
for j in range(grid_size[1]):
if ((i == 0) and (j == 0)):
ax[(i, j)].imshow(image)
else:
ax[(i, j)].set_title(values[(idx - 1)])
ax[(i, j)].imshow(nearest_images[(idx - 1)])
idx += 1
ax[(i, j)].set_xticks([])
ax[(i, j)].set_yticks([])
plt.setp(ax[(0, 0)].spines.values(), color='red', linewidth=2)
fig.tight_layout()
save_dir = os.path.join(model_dir, 'figures', 'nearest_image')
os.makedirs(save_dir, exist_ok=True)
save_path = os.path.join(save_dir, f'{name}.png')
fig.savefig(save_path)
print(f'Plot saved to: {save_path}')
plt.close()
|
def plot_image(model_dir, image, name):
(fig, ax) = plt.subplots(1, 1, figsize=(10, 10))
if (image.shape[2] == 1):
ax.imshow(image, cmap='gray')
else:
ax.imshow(image)
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
save_dir = os.path.join(model_dir, 'figures', 'image')
os.makedirs(save_dir, exist_ok=True)
save_path = os.path.join(save_dir, f'{name}.png')
fig.savefig(save_path)
print(f'Plot saved to: {save_path}')
plt.close()
|
def save_image(image, save_path):
(fig, ax) = plt.subplots(1, 1, figsize=(10, 10))
if (image.shape[2] == 1):
ax.imshow(image, cmap='gray')
else:
ax.imshow(image)
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
fig.savefig(save_path)
print(f'Plot saved to: {save_path}')
plt.close()
|
class ReduLayer(nn.Module):
def __init__(self):
super(ReduLayer, self).__init__()
def __name__(self):
return 'ReduNet'
def forward(self, Z):
raise NotImplementedError
def zero(self):
state_dict = self.state_dict()
state_dict['E.weight'] = torch.zeros_like(self.E.weight)
for j in range(self.num_classes):
state_dict[f'Cs.{j}.weight'] = torch.zeros_like(self.Cs[j].weight)
self.load_state_dict(state_dict)
def init(self, X, y):
gam = self.compute_gam(X, y)
E = self.compute_E(X)
Cs = self.compute_Cs(X, y)
self.set_params(E, Cs, gam)
def update_old(self, X, y, tau):
E = self.compute_E(X).to(X.device)
Cs = self.compute_Cs(X, y).to(X.device)
state_dict = self.state_dict()
ref_E = self.E.weight
ref_Cs = [self.Cs[j].weight for j in range(self.num_classes)]
new_E = (ref_E + (tau * (E - ref_E)))
new_Cs = [(ref_Cs[j] + (tau * (Cs[j] - ref_Cs[j]))) for j in range(self.num_classes)]
state_dict['E.weight'] = new_E
for j in range(self.num_classes):
state_dict[f'Cs.{j}.weight'] = new_Cs[j]
self.load_state_dict(state_dict)
def update(self, X, y, tau):
(E_ref, Cs_ref) = self.get_params()
E_new = self.compute_E(X).to(X.device)
Cs_new = self.compute_Cs(X, y).to(X.device)
E_update = (E_ref + (tau * (E_new - E_ref)))
Cs_update = [(Cs_ref[j] + (tau * (Cs_new[j] - Cs_ref[j]))) for j in range(self.num_classes)]
self.set_params(E_update, Cs_update)
def set_params(self, E, Cs, gam=None):
state_dict = self.state_dict()
assert (self.E.weight.shape == E.shape), f'E shape does not match: {self.E.weight.shape} and {E.shape}'
state_dict['E.weight'] = E
for j in range(self.num_classes):
assert (self.Cs[j].weight.shape == Cs[j].shape), f'Cj shape does not match'
state_dict[f'Cs.{j}.weight'] = Cs[j]
if (gam is not None):
assert (self.gam.shape == gam.shape), 'gam shape does not match'
state_dict['gam'] = gam
self.load_state_dict(state_dict)
def get_params(self):
E = self.E.weight
Cs = [self.Cs[j].weight for j in range(self.num_classes)]
return (E, Cs)
|
def ReduNetVector(num_classes, num_layers, d, eta, eps, lmbda):
redunet = ReduNet(*[Vector(eta, eps, lmbda, num_classes, d) for _ in range(num_layers)])
return redunet
|
def ReduNet1D(num_classes, num_layers, channels, timesteps, eta, eps, lmbda):
redunet = ReduNet(*[Fourier1D(eta, eps, lmbda, num_classes, (channels, timesteps)) for _ in range(num_layers)])
return redunet
|
def ReduNet2D(num_classes, num_layers, channels, height, width, eta, eps, lmbda):
redunet = ReduNet(*[Fourier2D(eta, eps, lmbda, num_classes, (channels, height, width)) for _ in range(num_layers)])
return redunet
|
class MultichannelWeight(nn.Module):
def __init__(self, channels, *dimension, dtype=torch.complex64):
super(MultichannelWeight, self).__init__()
self.weight = nn.Parameter(torch.randn(channels, channels, *dimension, dtype=dtype))
self.shape = self.weight.shape
self.dtype = dtype
def __getitem__(self, item):
return self.weight[item]
def forward(self, V):
return contract('bi...,ih...->bh...', V.type(self.dtype), self.weight.conj())
|
class Lift(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, init_mode='gaussian1.0', stride=1, trainable=False, relu=True, seed=0):
super(Lift, self).__init__()
self.in_channel = in_channel
self.out_channel = out_channel
self.kernel_size = kernel_size
self.init_mode = init_mode
self.stride = stride
self.trainable = trainable
self.relu = relu
self.seed = seed
def set_weight(self, init_mode, size, trainable):
torch.manual_seed(self.seed)
if (init_mode == 'gaussian0.1'):
p = distributions.normal.Normal(loc=0, scale=0.1)
elif (init_mode == 'gaussian1.0'):
p = distributions.normal.Normal(loc=0, scale=1.0)
elif (init_mode == 'gaussian5.0'):
p = distributions.normal.Normal(loc=0, scale=5.0)
elif (init_mode == 'uniform0.1'):
p = distributions.uniform.Uniform((- 0.1), 0.1)
elif (init_mode == 'uniform0.5'):
p = distributions.uniform.Uniform((- 0.5), 0.5)
else:
raise NameError(f'No such kernel: {init_mode}')
kernel = p.sample(size).type(torch.float)
self.kernel = nn.Parameter(kernel, requires_grad=trainable)
|
class Lift1D(Lift):
def __init__(self, in_channel, out_channel, kernel_size, init_mode='gaussian1.0', stride=1, trainable=False, relu=True, seed=0):
super(Lift1D, self).__init__(in_channel, out_channel, kernel_size, init_mode, stride, trainable, relu, seed)
self.size = (out_channel, in_channel, kernel_size)
self.set_weight(init_mode, self.size, trainable)
def forward(self, Z):
Z = F.pad(Z, (0, (self.kernel_size - 1)), 'circular')
out = F.conv1d(Z, self.kernel, stride=self.stride)
if self.relu:
return F.relu(out)
return out
|
class Lift2D(Lift):
def __init__(self, in_channel, out_channel, kernel_size, init_mode='gaussian1.0', stride=1, trainable=False, relu=True, seed=0):
super(Lift2D, self).__init__(in_channel, out_channel, kernel_size, init_mode, stride, trainable, relu, seed)
self.size = (out_channel, in_channel, kernel_size, kernel_size)
self.set_weight(init_mode, self.size, trainable)
def forward(self, Z):
kernel = self.kernel.to(Z.device)
Z = F.pad(Z, (0, (self.kernel_size - 1), 0, (self.kernel_size - 1)), 'circular')
out = F.conv2d(Z, kernel, stride=self.stride)
if self.relu:
return F.relu(out)
return out
|
class ReduNet(nn.Sequential):
def __init__(self, *modules):
super(ReduNet, self).__init__(*modules)
self._init_loss()
def init(self, inputs, labels):
with torch.no_grad():
return self.forward(inputs, labels, init=True, loss=True)
def update(self, inputs, labels, tau=0.1):
with torch.no_grad():
return self.forward(inputs, labels, tau=tau, update=True, loss=True)
def zero(self):
with torch.no_grad():
for module in self:
if isinstance(module, ReduLayer):
module.zero()
return self
def batch_forward(self, inputs, batch_size=1000, loss=False, cuda=True, device=None):
outputs = []
for i in range(0, inputs.shape[0], batch_size):
print('batch:', i, end='\r')
batch_inputs = inputs[i:(i + batch_size)]
if (device is not None):
batch_inputs = batch_inputs.to(device)
elif cuda:
batch_inputs = batch_inputs.cuda()
batch_outputs = self.forward(batch_inputs, loss=loss)
outputs.append(batch_outputs.cpu())
return torch.cat(outputs)
def forward(self, inputs, labels=None, tau=0.1, init=False, update=False, loss=False):
self._init_loss()
self._inReduBlock = False
for (layer_i, module) in enumerate(self):
if self._isEnterReduBlock(layer_i, module):
inputs = module.preprocess(inputs)
self._inReduBlock = True
if (init and self._isReduLayer(module)):
module.init(inputs, labels)
if (update and self._isReduLayer(module)):
module.update(inputs, labels, tau)
if self._isReduLayer(module):
(inputs, preds) = module(inputs, return_y=True)
else:
inputs = module(inputs)
if (loss and isinstance(module, ReduLayer)):
losses = compute_mcr2(inputs, preds, module.eps)
self._append_loss(layer_i, *losses)
if self._isExitReduBlock(layer_i, module):
inputs = module.postprocess(inputs)
self._inReduBlock = False
return inputs
def get_loss(self):
return self.losses
def _init_loss(self):
self.losses = {'layer': [], 'loss_total': [], 'loss_expd': [], 'loss_comp': []}
def _append_loss(self, layer_i, loss_total, loss_expd, loss_comp):
self.losses['layer'].append(layer_i)
self.losses['loss_total'].append(loss_total)
self.losses['loss_expd'].append(loss_expd)
self.losses['loss_comp'].append(loss_comp)
print(f'{layer_i} | {loss_total:.6f} {loss_expd:.6f} {loss_comp:.6f}')
def _isReduLayer(self, module):
return isinstance(module, ReduLayer)
def _isEnterReduBlock(self, _, module):
if ((not self._inReduBlock) and self._isReduLayer(module)):
return True
return False
def _isExitReduBlock(self, layer_i, _):
if (((len(self) - 1) == layer_i) and self._inReduBlock):
return True
if (self._inReduBlock and (not self._isReduLayer(self[(layer_i + 1)]))):
return True
return False
|
def sort_dataset(data, labels, classes, stack=False):
'Sort dataset based on classes.\n \n Parameters:\n data (np.ndarray): data array\n labels (np.ndarray): one dimensional array of class labels\n classes (int): number of classes\n stack (bol): combine sorted data into one numpy array\n \n Return:\n sorted data (np.ndarray), sorted_labels (np.ndarray)\n\n '
if (type(classes) == int):
classes = np.arange(classes)
sorted_data = []
sorted_labels = []
for c in classes:
idx = (labels == c)
data_c = data[idx]
labels_c = labels[idx]
sorted_data.append(data_c)
sorted_labels.append(labels_c)
if stack:
if isinstance(data, np.ndarray):
sorted_data = np.vstack(sorted_data)
sorted_labels = np.hstack(sorted_labels)
else:
sorted_data = torch.stack(sorted_data)
sorted_labels = torch.cat(sorted_labels)
return (sorted_data, sorted_labels)
|
def save_params(model_dir, params, name='params', name_prefix=None):
'Save params to a .json file. Params is a dictionary of parameters.'
if name_prefix:
model_dir = os.path.join(model_dir, name_prefix)
os.makedirs(model_dir, exist_ok=True)
path = os.path.join(model_dir, f'{name}.json')
with open(path, 'w') as f:
json.dump(params, f, indent=2, sort_keys=True)
|
def load_params(model_dir):
'Load params.json file in model directory and return dictionary.'
path = os.path.join(model_dir, 'params.json')
with open(path, 'r') as f:
_dict = json.load(f)
return _dict
|
def update_params(model_dir, dict_):
params = load_params(model_dir)
for key in dict_.keys():
params[key] = dict_[key]
save_params(model_dir, params)
return params
|
def create_csv(model_dir, filename, headers):
'Create .csv file with filename in model_dir, with headers as the first line \n of the csv. '
csv_path = os.path.join(model_dir, f'{filename}.csv')
if os.path.exists(csv_path):
os.remove(csv_path)
with open(csv_path, 'w+') as f:
f.write(','.join(map(str, headers)))
return csv_path
|
def append_csv(model_dir, filename, entries):
'Save entries to csv. Entries is list of numbers. '
csv_path = os.path.join(model_dir, f'{filename}.csv')
assert os.path.exists(csv_path), 'CSV file is missing in project directory.'
with open(csv_path, 'a') as f:
f.write(('\n' + ','.join(map(str, entries))))
|
def save_loss(model_dir, name, loss_dict):
save_dir = os.path.join(model_dir, 'loss')
os.makedirs(save_dir, exist_ok=True)
file_path = os.path.join(save_dir, '{}.csv'.format(name))
pd.DataFrame(loss_dict).to_csv(file_path)
|
def save_features(model_dir, name, features, labels, layer=None):
save_dir = os.path.join(model_dir, 'features')
os.makedirs(save_dir, exist_ok=True)
np.save(os.path.join(save_dir, f'{name}_features.npy'), features)
np.save(os.path.join(save_dir, f'{name}_labels.npy'), labels)
|
def save_ckpt(model_dir, name, net):
'Save PyTorch checkpoint to model_dir/checkpoints/ directory in model directory. '
os.makedirs(os.path.join(model_dir, 'checkpoints'), exist_ok=True)
torch.save(net.state_dict(), os.path.join(model_dir, 'checkpoints', '{}.pt'.format(name)))
|
def load_ckpt(model_dir, name, net, eval_=True):
"Load checkpoint from model directory. Checkpoints should be stored in \n `model_dir/checkpoints/'.\n "
ckpt_path = os.path.join(model_dir, 'checkpoints', f'{name}.pt')
print('Loading checkpoint: {}'.format(ckpt_path))
state_dict = torch.load(ckpt_path)
net.load_state_dict(state_dict)
del state_dict
if eval_:
net.eval()
return net
|
def sample_trunc_beta(a, b, lower, upper):
'\n Samples from a truncated beta distribution in log space\n\n Parameters\n ----------\n a, b: float\n Canonical parameters of the beta distribution\n lower, upper: float\n Lower and upper truncations of the beta distribution\n\n Returns\n -------\n s: float\n Sampled value from the truncated beta distribution in log space\n '
if (upper < lower):
return
if ((a == 1) and (b == 1)):
s = np.random.uniform(low=lower, high=upper)
return s
peak = ((a - 1) / ((a + b) - 2))
if ((peak < lower) or (peak > upper)):
s = np.random.uniform(low=lower, high=upper)
log_f_s = beta.logpdf(s, a, b)
log_g_s = ((- 1) * np.log((upper - lower)))
log_M = (max(beta.logpdf(lower, a, b), beta.logpdf(upper, a, b)) + np.log((upper - lower)))
while (np.log(np.random.random()) > (log_f_s - (log_M + log_g_s))):
s = np.random.uniform(low=lower, high=upper)
log_f_s = beta.logpdf(s, a, b)
else:
s = beta.rvs(a, b)
while ((s < lower) or (s > upper)):
s = beta.rvs(a, b)
return s
|
def check_MH_criterion(log_labels_given_ps_prev, log_labels_given_ps_new, log_update, log_revert):
'\n Checks the Metropolis-Hastings criterion for accepting an MCMC move\n\n Parameters\n ----------\n log_labels_given_ps_prev: float\n log P(\theta \\mid p, A) before the proposed MCMCc move\n log_labels_given_ps_new: float\n log P(\theta \\mid p, A) after the proposed move\n log_update: float\n Log probability of transitioning from the current parameter set to the\n proposed parameter set\n log_revert: float\n Log probability of transitioning from the proposed parameter set to the\n current parameter set\n\n Returns\n -------\n acc: int\n Returns 1 if the move is accepted, 0 otherwsie\n '
log_p_update = ((log_labels_given_ps_new - log_labels_given_ps_prev) + (log_revert - log_update))
log_p_accept = min(0, log_p_update)
if (np.log(np.random.random()) < log_p_accept):
return 1
else:
return 0
|
def get_log_posterior_layered(N_nodes, layer_ms, layer_Ms, layer_ns, layer_ps):
'\n Calculates the log posterior of the layered core-periphery model\n\n Parameters\n ----------\n N_nodes: int\n Number of nodes in the network\n layer_ms: 1D array\n Array counting the number of edges that connect to each layer\n layer_Ms: 1D array\n Array counting the maximum number of edges that could potentially\n connect to each layer\n layer_ns: 1D array\n Array counting the number of nodes in each block\n layer_ps: 1D array\n Array recording the density of each layer\n '
n_layers = len(layer_ps)
c_likelihood = log_likelihood_layered(layer_ms, layer_Ms, layer_ps)
c_ps_prior = log_ps_prior_layered(layer_ps)
c_labels_prior = log_labels_prior_layered(N_nodes, layer_ns, n_layers)
return ((c_likelihood + c_ps_prior) + c_labels_prior)
|
def get_log_posterior_hubspoke(N_nodes, block_ms, block_Ms, block_ns, block_ps):
'\n Calculates the log posterior of the hub-and-spoke core-periphery model\n\n Parameters\n ----------\n N_nodes: int\n Number of nodes in the network\n block_ms: 2D array\n Matrix counting the number of edges between and within each block\n block_Ms: 2D array\n Matrix counting the maximum number of edges that could potentially\n connect between and within each block\n block_ns: 1D array\n Array counting the number of nodes in each block\n block_ps: 2D array\n Matrix recording the density of each block\n '
c_likelihood = log_likelihood_hubspoke(block_ms, block_Ms, block_ps)
c_ps_prior = log_ps_prior_hubspoke(block_ps)
c_labels_prior = log_labels_prior_hubspoke(N_nodes, block_ns)
return ((c_likelihood + c_ps_prior) + c_labels_prior)
|
def xlogy(x, y):
if ((x == 0) and (y == 0)):
return 0
elif (y == 0):
return ((- 1) * np.inf)
elif (y < 0):
return
else:
return (x * np.log(y))
|
def log_likelihood_layered(layer_ms, layer_Ms, layer_ps):
'\n Calculates the log likelihood of the layered core-periphery model.\n\n Parameters\n ----------\n layer_ms: 1D array\n Array counting the number of edges that connect to each layer\n layer_Ms: 1D array\n Array counting the maximum number of edges that could potentially\n connect to each layer\n layer_ps: 1D array\n Array recording the density of each layer\n '
log_like = 0
for s in range(len(layer_ps)):
log_like += xlogy(layer_ms[s], layer_ps[s])
log_like += xlogy((layer_Ms[s] - layer_ms[s]), (1 - layer_ps[s]))
return log_like
|
def log_likelihood_hubspoke(block_ms, block_Ms, block_ps):
'\n Calculates the log likelihood of the hub-and-spoke core-periphery model.\n\n Parameters\n ----------\n block_ms: 2D array\n Matrix counting the number of edges between and within each block\n block_Ms: 2D array\n Matrix counting the maximum number of edges that could potentially\n connect between and within each block\n block_ps: 2D array\n Matrix recording the density of each block\n '
log_like = 0
for r in range(2):
for s in range((r + 1)):
log_like += xlogy(block_ms[(r, s)], block_ps[(r, s)])
log_like += xlogy((block_Ms[(r, s)] - block_ms[(r, s)]), (1 - block_ps[(r, s)]))
return log_like
|
def log_labels_prior_layered(N_nodes, block_ns, n_layers):
'\n Calculates the prior on the node labels for the layered modeel\n\n Parameters\n ----------\n N_nodes: int\n The number of nodes in the network\n block_ns: 1D array\n Array counting the number of nodes in each block\n n_layers: int\n The number of layers being inferred\n '
c1 = np.sum(loggamma((block_ns + 1)))
c2 = loggamma((N_nodes + 1))
c3 = loggamma(N_nodes)
c4 = loggamma(n_layers)
c5 = loggamma(((N_nodes - n_layers) - 1))
c6 = np.log(N_nodes)
return (((((c1 - c2) - c3) + c4) + c5) - c6)
|
def log_labels_prior_hubspoke(N_nodes, block_ns):
'\n Calculates the prior on the node labels for the hub-and-spoke model\n\n Parameters\n ----------\n N_nodes: int\n The number of nodes in the network\n block_ns: 1D array\n Array counting the number of nodes in each block\n '
c1 = np.sum(loggamma((block_ns + 1)))
c2 = loggamma((N_nodes + 1))
c3 = loggamma(N_nodes)
c4 = loggamma(2)
c5 = loggamma(((N_nodes - 2) - 1))
c6 = np.log(N_nodes)
return (((((c1 - c2) - c3) + c4) + c5) - c6)
|
def log_ps_prior_layered(layer_ps):
'\n Calculates the prior on the ps for the layered model\n\n Parameters\n ----------\n layer_ps: 1D array\n Array recording the density of each layer\n '
if np.all((layer_ps[:(- 1)] >= layer_ps[1:])):
return loggamma(len(layer_ps))
else:
return ((- 1) * np.inf)
|
def log_ps_prior_hubspoke(block_ps):
'\n Calculates the prior on the ps for the hub-and-spoke model\n\n Parameters\n ----------\n block_ps: 2D array\n Matrix recording the density of each block\n '
if ((block_ps[(0, 0)] >= block_ps[(0, 1)]) and (block_ps[(0, 1)] >= block_ps[(1, 1)])):
return np.log(6)
else:
return ((- 1) * np.inf)
|
def log_labels_given_ps_layered(N_nodes, block_ns, layer_ms, layer_Ms, layer_ps):
'\n Calculates P(\theta \\mid A, p) for the layered model\n\n Parameters\n ----------\n N_nodes: int\n Number of nodes in the network\n block_ns: 1D array\n Array counting the number of nodes in each block\n layer_ms: 1D array\n Array counting the number of edges that connect to each layer\n layer_Ms: 1D array\n Array counting the maximum number of edges that could potentially\n connect to each layer\n layer_ps: 1D array\n Array recording the density of each layer\n '
n_layers = len(layer_ps)
log_like = log_likelihood_layered(layer_ms, layer_Ms, layer_ps)
log_label_prior = log_labels_prior_layered(N_nodes, block_ns, n_layers)
return (log_like + log_label_prior)
|
def log_labels_given_ps_hubspoke(N_nodes, block_ns, block_ms, block_Ms, block_ps):
'\n Calculates P(\theta \\mid A, p) for the hub-and-spoke model\n\n Parameters\n ----------\n N_nodes: int\n Number of nodes in the network\n block_ns: 1D array\n Array counting the number of nodes in each block\n block_ms: 2D array\n Matrix counting the number of edges between and within each block\n block_Ms: 2D array\n Matrix counting the maximum number of edges that could potentially\n connect between and within each block\n block_ps: 2D array\n Matrix recording the density of each block\n '
log_like = log_likelihood_hubspoke(block_ms, block_Ms, block_ps)
log_label_prior = log_labels_prior_hubspoke(N_nodes, block_ns)
return (log_like + log_label_prior)
|
def get_max_edges(block_r, block_s, block_ns):
'\n Calculates the maximum number of edges that could possibly exist between two\n blocks\n\n Parameters\n ----------\n block_r, block_s: int\n Blocks to get the maximum number of edges between\n block_ns: 1D array\n Array counting the number of nodes in each block\n\n Returns\n -------\n M_rs: int\n The number of possible edges between block_r and block_s\n '
if (block_r == block_s):
M_rs = ((block_ns[block_r] * (block_ns[block_r] - 1)) / 2)
else:
M_rs = (block_ns[block_r] * block_ns[block_s])
return M_rs
|
def get_ordered_block_stats(G, node_labels, n_blocks=None):
'\n Calculates fundamental statistics for working with the block matrix of a\n network and then orders them according to the on-diagonal densities\n\n Parameters\n ----------\n G: NetworkX graph\n The graph for which to get block statistics\n node_labels: 1D array\n An array of the block label for each node in G. It is implicitly assumed\n that this array is sorted in the same way as sorted(G)\n n_blocks: int\n The number of blocks. If None, assumes max(node_labels)+1 is the number\n of blocks\n\n Returns\n -------\n block_ns: 1D array\n Array counting the number of nodes in each block\n block_ms: 2D array\n Matrix counting the number of edges that exist between pairs of blocks\n block_Ms: 2D array\n Matrix counting the maximum number of edges taht could potentially exist\n between pairs of blocks\n '
(block_ns, block_ms, block_Ms) = get_block_stats(G, node_labels, n_blocks=n_blocks)
return reorder_blocks(node_labels, block_ns, block_ms, block_Ms)
|
def get_block_stats(G, node_labels, n_blocks=None):
'\n Calculates fundamental statistics for working with the block matrix of a\n network\n\n Parameters\n ----------\n G: NetworkX graph\n The graph for which to get block statistics\n node_labels: 1D array\n An array of the block label for each node in G. It is implicitly assumed\n that this array is sorted in the same way as sorted(G)\n n_blocks: int\n The number of blocks. If None, assumes max(node_labels)+1 is the number\n of blocks\n\n Returns\n -------\n block_ns: 1D array\n Array counting the number of nodes in each block\n block_ms: 2D array\n Matrix counting the number of edges that exist between pairs of blocks\n block_Ms: 2D array\n Matrix counting the maximum number of edges taht could potentially exist\n between pairs of blocks\n '
if (n_blocks is None):
n_blocks = (max(node_labels) + 1)
seen_nodes = set()
block_ns = np.zeros(n_blocks, dtype=int)
block_ms = np.zeros((n_blocks, n_blocks), dtype=np.int64)
for (i, j) in G.edges():
i_block = node_labels[i]
j_block = node_labels[j]
block_ms[(i_block, j_block)] += 1
if (i_block != j_block):
block_ms[(j_block, i_block)] += 1
if (i not in seen_nodes):
block_ns[i_block] += 1
seen_nodes.add(i)
if (j not in seen_nodes):
block_ns[j_block] += 1
seen_nodes.add(j)
block_Ms = np.zeros((n_blocks, n_blocks), dtype=np.int64)
for r in range(n_blocks):
for s in range((r + 1)):
M_rs = get_max_edges(r, s, block_ns)
block_Ms[(r, s)] = M_rs
if (r != s):
block_Ms[(s, r)] = M_rs
return (block_ns, block_ms, block_Ms)
|
def get_layered_stats(block_ns, block_ms):
'\n Collapses block edge counts down to layer edge counts\n\n Parameters\n ----------\n block_ns: 1D array\n Array counting the number of nodes in each block\n block_ms: 2D array\n Matrix counting the number of edges that exist between pairs of blocks\n\n Returns\n -------\n layer_ms: 1D array\n Array counting the number of edges that connect to each layer\n layer_Ms: 1D array\n Array counting the maximum number of edges that could potentially\n connect to each layer\n '
n_blocks = len(block_ns)
layer_ms = np.zeros(n_blocks, dtype=np.int64)
layer_Ms = np.zeros(n_blocks, dtype=np.int64)
for r in range(n_blocks):
for s in range((r + 1)):
layer_ms[r] += block_ms[(r, s)]
layer_Ms[r] += get_max_edges(r, s, block_ns)
return (layer_ms, layer_Ms)
|
def get_on_diagonal_densities(block_ms, block_ns):
'\n Calculates the densities within each block (densities of on diagonals of the\n block matrix)\n\n Parameters\n ----------\n block_ms: 2D array\n Matrix counting the number of edges that exist between pairs of blocks\n block_ns: 1D array\n Array counting the number of nodes in each block\n\n Returns\n -------\n ps: 1D array\n Array of the density within each block\n '
ps = []
for l in range(len(block_ns)):
n = block_ns[l]
m = block_ms[(l, l)]
if ((n == 0) or (n == 1)):
ps.append(0)
else:
ps.append(((2 * m) / (n * (n - 1))))
return ps
|
def reorder_blocks(node_labels, block_ns, block_ms, block_Ms):
'\n Sorts the fundamental block statistics according to the within block\n densities such that the highest density is indexed as 0, and so on\n\n Parameters\n ----------\n node_labels: 1D array\n An array of the block label for each node in G. It is implicitly assumed\n that this array is sorted in the same way as sorted(G)\n block_ns: 1D array\n Array counting the number of nodes in each block\n block_ms: 2D array\n Matrix counting the number of edges that exist between pairs of blocks\n block_Ms: 2D array\n Matrix counting the maximum number of edges taht could potentially exist\n between pairs of blocks\n\n Returns\n -------\n Returns the same parameters, but sorted according to within block densities\n '
ps = get_on_diagonal_densities(block_ms, block_ns)
ordered_blocks = np.argsort(ps)[::(- 1)]
old_block2new_block = {old_b: b for (b, old_b) in enumerate(ordered_blocks)}
node_labels = [old_block2new_block[b] for b in node_labels]
block_ns = [block_ns[b] for b in ordered_blocks]
block_ms = block_ms[np.ix_(ordered_blocks, ordered_blocks)]
block_Ms = block_Ms[np.ix_(ordered_blocks, ordered_blocks)]
return (node_labels, block_ns, block_ms, block_Ms)
|
def adam(lr, tparams, grads, inp, cost):
gshared = [theano.shared((p.get_value() * 0.0), name=('%s_grad' % k)) for (k, p) in tparams.iteritems()]
gsup = [(gs, g) for (gs, g) in zip(gshared, grads)]
f_grad_shared = theano.function(inp, cost, updates=gsup, profile=False)
lr0 = 0.0002
b1 = 0.1
b2 = 0.001
e = 1e-08
updates = []
i = theano.shared(numpy.float32(0.0))
i_t = (i + 1.0)
fix1 = (1.0 - (b1 ** i_t))
fix2 = (1.0 - (b2 ** i_t))
lr_t = (lr0 * (tensor.sqrt(fix2) / fix1))
for (p, g) in zip(tparams.values(), gshared):
m = theano.shared((p.get_value() * 0.0))
v = theano.shared((p.get_value() * 0.0))
m_t = ((b1 * g) + ((1.0 - b1) * m))
v_t = ((b2 * tensor.sqr(g)) + ((1.0 - b2) * v))
g_t = (m_t / (tensor.sqrt(v_t) + e))
p_t = (p - (lr_t * g_t))
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((i, i_t))
f_update = theano.function([lr], [], updates=updates, on_unused_input='ignore', profile=False)
return (f_grad_shared, f_update)
|
def zipp(params, tparams):
'\n Push parameters to Theano shared variables\n '
for (kk, vv) in params.iteritems():
tparams[kk].set_value(vv)
|
def unzip(zipped):
'\n Pull parameters from Theano shared variables\n '
new_params = OrderedDict()
for (kk, vv) in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
|
def itemlist(tparams):
'\n Get the list of parameters. \n Note that tparams must be OrderedDict\n '
return [vv for (kk, vv) in tparams.iteritems()]
|
def _p(pp, name):
'\n Make prefix-appended name\n '
return ('%s_%s' % (pp, name))
|
def init_tparams(params):
'\n Initialize Theano shared variables according to the initial parameters\n '
tparams = OrderedDict()
for (kk, pp) in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
|
def load_params(path, params):
'\n Load parameters\n '
pp = numpy.load(path)
for (kk, vv) in params.iteritems():
if (kk not in pp):
warnings.warn(('%s is not in the archive' % kk))
continue
params[kk] = pp[kk]
return params
|
def ortho_weight(ndim):
'\n Orthogonal weight init, for recurrent layers\n '
W = numpy.random.randn(ndim, ndim)
(u, s, v) = numpy.linalg.svd(W)
return u.astype('float32')
|
def norm_weight(nin, nout=None, scale=0.1, ortho=True):
'\n Uniform initalization from [-scale, scale]\n If matrix is square and ortho=True, use ortho instead\n '
if (nout == None):
nout = nin
if ((nout == nin) and ortho):
W = ortho_weight(nin)
else:
W = numpy.random.uniform(low=(- scale), high=scale, size=(nin, nout))
return W.astype('float32')
|
def tanh(x):
'\n Tanh activation function\n '
return tensor.tanh(x)
|
def relu(x):
'\n ReLU activation function\n '
return (x * (x > 0))
|
def linear(x):
'\n Linear activation function\n '
return x
|
def concatenate(tensor_list, axis=0):
'\n Alternative implementation of `theano.tensor.concatenate`.\n '
concat_size = sum((tt.shape[axis] for tt in tensor_list))
output_shape = ()
for k in range(axis):
output_shape += (tensor_list[0].shape[k],)
output_shape += (concat_size,)
for k in range((axis + 1), tensor_list[0].ndim):
output_shape += (tensor_list[0].shape[k],)
out = tensor.zeros(output_shape)
offset = 0
for tt in tensor_list:
indices = ()
for k in range(axis):
indices += (slice(None),)
indices += (slice(offset, (offset + tt.shape[axis])),)
for k in range((axis + 1), tensor_list[0].ndim):
indices += (slice(None),)
out = tensor.set_subtensor(out[indices], tt)
offset += tt.shape[axis]
return out
|
def build_dictionary(text):
'\n Build a dictionary\n text: list of sentences (pre-tokenized)\n '
wordcount = OrderedDict()
for cc in text:
words = cc.split()
for w in words:
if (w not in wordcount):
wordcount[w] = 0
wordcount[w] += 1
words = wordcount.keys()
freqs = wordcount.values()
sorted_idx = numpy.argsort(freqs)[::(- 1)]
worddict = OrderedDict()
for (idx, sidx) in enumerate(sorted_idx):
worddict[words[sidx]] = (idx + 2)
return (worddict, wordcount)
|
def load_dictionary(loc='/ais/gobi3/u/rkiros/bookgen/book_dictionary_large.pkl'):
'\n Load a dictionary\n '
with open(loc, 'rb') as f:
worddict = pkl.load(f)
return worddict
|
def save_dictionary(worddict, wordcount, loc):
'\n Save a dictionary to the specified location \n '
with open(loc, 'wb') as f:
pkl.dump(worddict, f)
pkl.dump(wordcount, f)
|
def tokenize(sentence, grams):
words = sentence.split()
tokens = []
for gram in grams:
for i in range(((len(words) - gram) + 1)):
tokens += ['_*_'.join(words[i:(i + gram)])]
return tokens
|
def build_dict(X, grams):
dic = Counter()
for sentence in X:
dic.update(tokenize(sentence, grams))
return dic
|
def compute_ratio(poscounts, negcounts, alpha=1):
alltokens = list(set((poscounts.keys() + negcounts.keys())))
dic = dict(((t, i) for (i, t) in enumerate(alltokens)))
d = len(dic)
(p, q) = ((np.ones(d) * alpha), (np.ones(d) * alpha))
for t in alltokens:
p[dic[t]] += poscounts[t]
q[dic[t]] += negcounts[t]
p /= abs(p).sum()
q /= abs(q).sum()
r = np.log((p / q))
return (dic, r)
|
def process_text(text, dic, r, grams):
'\n Return sparse feature matrix\n '
X = lil_matrix((len(text), len(dic)))
for (i, l) in enumerate(text):
tokens = tokenize(l, grams)
indexes = []
for t in tokens:
try:
indexes += [dic[t]]
except KeyError:
pass
indexes = list(set(indexes))
indexes.sort()
for j in indexes:
X[(i, j)] = r[j]
return csr_matrix(X)
|
def init_params(options):
'\n Initialize all parameters\n '
params = OrderedDict()
params['Wemb'] = norm_weight(options['n_words'], options['dim_word'])
params = get_layer(options['encoder'])[0](options, params, prefix='encoder', nin=options['dim_word'], dim=options['dim'])
params = get_layer(options['decoder'])[0](options, params, prefix='decoder_f', nin=options['dim_word'], dim=options['dim'])
params = get_layer(options['decoder'])[0](options, params, prefix='decoder_b', nin=options['dim_word'], dim=options['dim'])
params = get_layer('ff')[0](options, params, prefix='ff_logit', nin=options['dim'], nout=options['n_words'])
return params
|
def build_model(tparams, options):
'\n Computation graph for the model\n '
opt_ret = dict()
trng = RandomStreams(1234)
x = tensor.matrix('x', dtype='int64')
x_mask = tensor.matrix('x_mask', dtype='float32')
y = tensor.matrix('y', dtype='int64')
y_mask = tensor.matrix('y_mask', dtype='float32')
z = tensor.matrix('z', dtype='int64')
z_mask = tensor.matrix('z_mask', dtype='float32')
n_timesteps = x.shape[0]
n_timesteps_f = y.shape[0]
n_timesteps_b = z.shape[0]
n_samples = x.shape[1]
emb = tparams['Wemb'][x.flatten()].reshape([n_timesteps, n_samples, options['dim_word']])
proj = get_layer(options['encoder'])[1](tparams, emb, None, options, prefix='encoder', mask=x_mask)
ctx = proj[0][(- 1)]
dec_ctx = ctx
embf = tparams['Wemb'][y.flatten()].reshape([n_timesteps_f, n_samples, options['dim_word']])
embf_shifted = tensor.zeros_like(embf)
embf_shifted = tensor.set_subtensor(embf_shifted[1:], embf[:(- 1)])
embf = embf_shifted
embb = tparams['Wemb'][z.flatten()].reshape([n_timesteps_b, n_samples, options['dim_word']])
embb_shifted = tensor.zeros_like(embb)
embb_shifted = tensor.set_subtensor(embb_shifted[1:], embb[:(- 1)])
embb = embb_shifted
projf = get_layer(options['decoder'])[1](tparams, embf, dec_ctx, options, prefix='decoder_f', mask=y_mask)
projb = get_layer(options['decoder'])[1](tparams, embb, dec_ctx, options, prefix='decoder_b', mask=z_mask)
logit = get_layer('ff')[1](tparams, projf[0], options, prefix='ff_logit', activ='linear')
logit_shp = logit.shape
probs = tensor.nnet.softmax(logit.reshape([(logit_shp[0] * logit_shp[1]), logit_shp[2]]))
y_flat = y.flatten()
y_flat_idx = ((tensor.arange(y_flat.shape[0]) * options['n_words']) + y_flat)
costf = (- tensor.log((probs.flatten()[y_flat_idx] + 1e-08)))
costf = costf.reshape([y.shape[0], y.shape[1]])
costf = (costf * y_mask).sum(0)
costf = costf.sum()
logit = get_layer('ff')[1](tparams, projb[0], options, prefix='ff_logit', activ='linear')
logit_shp = logit.shape
probs = tensor.nnet.softmax(logit.reshape([(logit_shp[0] * logit_shp[1]), logit_shp[2]]))
z_flat = z.flatten()
z_flat_idx = ((tensor.arange(z_flat.shape[0]) * options['n_words']) + z_flat)
costb = (- tensor.log((probs.flatten()[z_flat_idx] + 1e-08)))
costb = costb.reshape([z.shape[0], z.shape[1]])
costb = (costb * z_mask).sum(0)
costb = costb.sum()
cost = (costf + costb)
return (trng, x, x_mask, y, y_mask, z, z_mask, opt_ret, cost)
|
def build_encoder(tparams, options):
'\n Computation graph, encoder only\n '
opt_ret = dict()
trng = RandomStreams(1234)
x = tensor.matrix('x', dtype='int64')
x_mask = tensor.matrix('x_mask', dtype='float32')
n_timesteps = x.shape[0]
n_samples = x.shape[1]
emb = tparams['Wemb'][x.flatten()].reshape([n_timesteps, n_samples, options['dim_word']])
proj = get_layer(options['encoder'])[1](tparams, emb, None, options, prefix='encoder', mask=x_mask)
ctx = proj[0][(- 1)]
return (trng, x, x_mask, ctx, emb)
|
def build_encoder_w2v(tparams, options):
'\n Computation graph for encoder, given pre-trained word embeddings\n '
opt_ret = dict()
trng = RandomStreams(1234)
embedding = tensor.tensor3('embedding', dtype='float32')
x_mask = tensor.matrix('x_mask', dtype='float32')
proj = get_layer(options['encoder'])[1](tparams, embedding, None, options, prefix='encoder', mask=x_mask)
ctx = proj[0][(- 1)]
return (trng, embedding, x_mask, ctx)
|
def adam(lr, tparams, grads, inp, cost):
gshared = [theano.shared((p.get_value() * 0.0), name=('%s_grad' % k)) for (k, p) in tparams.iteritems()]
gsup = [(gs, g) for (gs, g) in zip(gshared, grads)]
f_grad_shared = theano.function(inp, cost, updates=gsup, profile=False)
lr0 = 0.0002
b1 = 0.1
b2 = 0.001
e = 1e-08
updates = []
i = theano.shared(numpy.float32(0.0))
i_t = (i + 1.0)
fix1 = (1.0 - (b1 ** i_t))
fix2 = (1.0 - (b2 ** i_t))
lr_t = (lr0 * (tensor.sqrt(fix2) / fix1))
for (p, g) in zip(tparams.values(), gshared):
m = theano.shared((p.get_value() * 0.0))
v = theano.shared((p.get_value() * 0.0))
m_t = ((b1 * g) + ((1.0 - b1) * m))
v_t = ((b2 * tensor.sqr(g)) + ((1.0 - b2) * v))
g_t = (m_t / (tensor.sqrt(v_t) + e))
p_t = (p - (lr_t * g_t))
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((i, i_t))
f_update = theano.function([lr], [], updates=updates, on_unused_input='ignore', profile=False)
return (f_grad_shared, f_update)
|
def zipp(params, tparams):
'\n Push parameters to Theano shared variables\n '
for (kk, vv) in params.iteritems():
tparams[kk].set_value(vv)
|
def unzip(zipped):
'\n Pull parameters from Theano shared variables\n '
new_params = OrderedDict()
for (kk, vv) in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
|
def itemlist(tparams):
'\n Get the list of parameters. \n Note that tparams must be OrderedDict\n '
return [vv for (kk, vv) in tparams.iteritems()]
|
def _p(pp, name):
'\n Make prefix-appended name\n '
return ('%s_%s' % (pp, name))
|
def init_tparams(params):
'\n Initialize Theano shared variables according to the initial parameters\n '
tparams = OrderedDict()
for (kk, pp) in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
|
def load_params(path, params):
'\n Load parameters\n '
pp = numpy.load(path)
for (kk, vv) in params.iteritems():
if (kk not in pp):
warnings.warn(('%s is not in the archive' % kk))
continue
params[kk] = pp[kk]
return params
|
def ortho_weight(ndim):
'\n Orthogonal weight init, for recurrent layers\n '
W = numpy.random.randn(ndim, ndim)
(u, s, v) = numpy.linalg.svd(W)
return u.astype('float32')
|
def norm_weight(nin, nout=None, scale=0.1, ortho=True):
'\n Uniform initalization from [-scale, scale]\n If matrix is square and ortho=True, use ortho instead\n '
if (nout == None):
nout = nin
if ((nout == nin) and ortho):
W = ortho_weight(nin)
else:
W = numpy.random.uniform(low=(- scale), high=scale, size=(nin, nout))
return W.astype('float32')
|
def tanh(x):
'\n Tanh activation function\n '
return tensor.tanh(x)
|
def linear(x):
'\n Linear activation function\n '
return x
|
def concatenate(tensor_list, axis=0):
'\n Alternative implementation of `theano.tensor.concatenate`.\n '
concat_size = sum((tt.shape[axis] for tt in tensor_list))
output_shape = ()
for k in range(axis):
output_shape += (tensor_list[0].shape[k],)
output_shape += (concat_size,)
for k in range((axis + 1), tensor_list[0].ndim):
output_shape += (tensor_list[0].shape[k],)
out = tensor.zeros(output_shape)
offset = 0
for tt in tensor_list:
indices = ()
for k in range(axis):
indices += (slice(None),)
indices += (slice(offset, (offset + tt.shape[axis])),)
for k in range((axis + 1), tensor_list[0].ndim):
indices += (slice(None),)
out = tensor.set_subtensor(out[indices], tt)
offset += tt.shape[axis]
return out
|
def build_dictionary(text):
'\n Build a dictionary\n text: list of sentences (pre-tokenized)\n '
wordcount = OrderedDict()
for cc in text:
words = cc.split()
for w in words:
if (w not in wordcount):
wordcount[w] = 0
wordcount[w] += 1
words = wordcount.keys()
freqs = wordcount.values()
sorted_idx = numpy.argsort(freqs)[::(- 1)]
worddict = OrderedDict()
for (idx, sidx) in enumerate(sorted_idx):
worddict[words[sidx]] = (idx + 2)
return (worddict, wordcount)
|
def load_dictionary(loc='/ais/gobi3/u/rkiros/bookgen/book_dictionary_large.pkl'):
'\n Load a dictionary\n '
with open(loc, 'rb') as f:
worddict = pkl.load(f)
return worddict
|
def save_dictionary(worddict, wordcount, loc):
'\n Save a dictionary to the specified location \n '
with open(loc, 'wb') as f:
pkl.dump(worddict, f)
pkl.dump(wordcount, f)
|
def load_dataset(name='f8k', load_train=True):
'\n Load captions and image features\n Possible options: f8k, f30k, coco\n '
loc = ((path_to_data + name) + '/')
(train_caps, dev_caps, test_caps) = ([], [], [])
if load_train:
with open(((loc + name) + '_train_caps.txt'), 'rb') as f:
for line in f:
train_caps.append(line.strip())
else:
train_caps = None
with open(((loc + name) + '_dev_caps.txt'), 'rb') as f:
for line in f:
dev_caps.append(line.strip())
with open(((loc + name) + '_test_caps.txt'), 'rb') as f:
for line in f:
test_caps.append(line.strip())
if load_train:
train_ims = numpy.load(((loc + name) + '_train_ims.npy'))
else:
train_ims = None
dev_ims = numpy.load(((loc + name) + '_dev_ims.npy'))
test_ims = numpy.load(((loc + name) + '_test_ims.npy'))
return ((train_caps, train_ims), (dev_caps, dev_ims), (test_caps, test_ims))
|
def adam(lr, tparams, grads, inp, cost):
gshared = [theano.shared((p.get_value() * 0.0), name=('%s_grad' % k)) for (k, p) in tparams.iteritems()]
gsup = [(gs, g) for (gs, g) in zip(gshared, grads)]
f_grad_shared = theano.function(inp, cost, updates=gsup, profile=False)
b1 = 0.1
b2 = 0.001
e = 1e-08
updates = []
i = theano.shared(numpy.float32(0.0))
i_t = (i + 1.0)
fix1 = (1.0 - (b1 ** i_t))
fix2 = (1.0 - (b2 ** i_t))
lr_t = (lr * (tensor.sqrt(fix2) / fix1))
for (p, g) in zip(tparams.values(), gshared):
m = theano.shared((p.get_value() * 0.0))
v = theano.shared((p.get_value() * 0.0))
m_t = ((b1 * g) + ((1.0 - b1) * m))
v_t = ((b2 * tensor.sqr(g)) + ((1.0 - b2) * v))
g_t = (m_t / (tensor.sqrt(v_t) + e))
p_t = (p - (lr_t * g_t))
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((i, i_t))
f_update = theano.function([lr], [], updates=updates, on_unused_input='ignore', profile=False)
return (f_grad_shared, f_update)
|
def build_dictionary(text):
'\n Build a dictionary\n text: list of sentences (pre-tokenized)\n '
wordcount = OrderedDict()
for cc in text:
words = cc.split()
for w in words:
if (w not in wordcount):
wordcount[w] = 0
wordcount[w] += 1
words = wordcount.keys()
freqs = wordcount.values()
sorted_idx = numpy.argsort(freqs)[::(- 1)]
worddict = OrderedDict()
for (idx, sidx) in enumerate(sorted_idx):
worddict[words[sidx]] = (idx + 2)
return (worddict, wordcount)
|
def load_dictionary(loc='/ais/gobi3/u/rkiros/bookgen/book_dictionary_large.pkl'):
'\n Load a dictionary\n '
with open(loc, 'rb') as f:
worddict = pkl.load(f)
return worddict
|
def save_dictionary(worddict, wordcount, loc):
'\n Save a dictionary to the specified location \n '
with open(loc, 'wb') as f:
pkl.dump(worddict, f)
pkl.dump(wordcount, f)
|
def yuv_import(filename, dims, numfrm, startfrm):
fp = open(filename, 'rb')
blk_size = ((np.prod(dims) * 3) / 2)
fp.seek(int((blk_size * startfrm)), 0)
d00 = (dims[0] // 2)
d01 = (dims[1] // 2)
Y = np.zeros((numfrm, dims[0], dims[1]), np.uint8, 'C')
U = np.zeros((numfrm, d00, d01), np.uint8, 'C')
V = np.zeros((numfrm, d00, d01), np.uint8, 'C')
for i in range(numfrm):
for m in range(dims[0]):
for n in range(dims[1]):
Y[(i, m, n)] = ord(fp.read(1))
for m in range(d00):
for n in range(d01):
U[(i, m, n)] = ord(fp.read(1))
for m in range(d00):
for n in range(d01):
V[(i, m, n)] = ord(fp.read(1))
fp.close()
return (Y, U, V)
|
def yuv2rgb(Y, U, V, height, width):
U = imresize(U, [height, width], 'bilinear', mode='F')
V = imresize(V, [height, width], 'bilinear', mode='F')
Y = Y
rf = (Y + (1.4075 * (V - 128.0)))
gf = ((Y - (0.3455 * (U - 128.0))) - (0.7169 * (V - 128.0)))
bf = (Y + (1.779 * (U - 128.0)))
for m in range(height):
for n in range(width):
if (rf[(m, n)] > 255):
rf[(m, n)] = 255
if (gf[(m, n)] > 255):
gf[(m, n)] = 255
if (bf[(m, n)] > 255):
bf[(m, n)] = 255
if (rf[(m, n)] < 0):
rf[(m, n)] = 0
if (gf[(m, n)] < 0):
gf[(m, n)] = 0
if (bf[(m, n)] < 0):
bf[(m, n)] = 0
r = rf.astype(uint8)
g = gf.astype(uint8)
b = bf.astype(uint8)
return (r, g, b)
|
class ConvLSTMCell_orig(tf.nn.rnn_cell.RNNCell):
'A LSTM cell with convolutions instead of multiplications.\n Reference:\n Xingjian, S. H. I., et al. "Convolutional LSTM network: A machine learning approach for precipitation nowcasting." Advances in Neural Information Processing Systems. 2015.\n '
def __init__(self, shape, filters, kernel, forget_bias=1.0, activation=tf.tanh, normalize=False, peephole=False, data_format='channels_last', reuse=None):
super(ConvLSTMCell_orig, self).__init__(_reuse=reuse)
self._kernel = kernel
self._filters = filters
self._forget_bias = forget_bias
self._activation = activation
self._normalize = normalize
self._peephole = peephole
if (data_format == 'channels_last'):
self._size = tf.TensorShape((shape + [self._filters]))
self._feature_axis = self._size.ndims
self._data_format = None
elif (data_format == 'channels_first'):
self._size = tf.TensorShape(([self._filters] + shape))
self._feature_axis = 0
self._data_format = 'NC'
else:
raise ValueError('Unknown data_format')
@property
def state_size(self):
return tf.nn.rnn_cell.LSTMStateTuple(self._size, self._size)
@property
def output_size(self):
return self._size
def call(self, x, state):
(c, h) = state
x = tf.concat([x, h], axis=self._feature_axis)
n = x.shape[(- 1)].value
m = ((4 * self._filters) if (self._filters > 1) else 4)
W = tf.get_variable('kernel', (self._kernel + [n, m]))
y = tf.nn.convolution(x, W, 'SAME', data_format=self._data_format)
if (not self._normalize):
y += tf.get_variable('bias', [m], initializer=tf.zeros_initializer())
(j, i, f, o) = tf.split(y, 4, axis=self._feature_axis)
if self._peephole:
i += (tf.get_variable('W_ci', c.shape[1:]) * c)
f += (tf.get_variable('W_cf', c.shape[1:]) * c)
if self._normalize:
j = tf.contrib.layers.layer_norm(j)
i = tf.contrib.layers.layer_norm(i)
f = tf.contrib.layers.layer_norm(f)
f = tf.sigmoid((f + self._forget_bias))
i = tf.sigmoid(i)
c = ((c * f) + (i * self._activation(j)))
if self._peephole:
o += (tf.get_variable('W_co', c.shape[1:]) * c)
if self._normalize:
o = tf.contrib.layers.layer_norm(o)
c = tf.contrib.layers.layer_norm(c)
o = tf.sigmoid(o)
h = (o * self._activation(c))
state = tf.nn.rnn_cell.LSTMStateTuple(c, h)
return (h, state)
|
class QGConvLSTMCell(tf.nn.rnn_cell.RNNCell):
'A LSTM cell with convolutions instead of multiplications.\n Reference:\n Xingjian, S. H. I., et al. "Convolutional LSTM network: A machine learning approach for precipitation nowcasting." Advances in Neural Information Processing Systems. 2015.\n '
def __init__(self, shape, filters, kernel, forget_bias=1.0, activation=tf.tanh, normalize=False, peephole=False, data_format='channels_last', reuse=None):
super(QGConvLSTMCell, self).__init__(_reuse=reuse, name='conv_lstm_cell')
self._kernel = kernel
self._filters = filters
self._forget_bias = forget_bias
self._activation = activation
self._normalize = normalize
self._peephole = peephole
if (data_format == 'channels_last'):
self._size = tf.TensorShape((shape + [self._filters]))
self._feature_axis = self._size.ndims
self._data_format = None
elif (data_format == 'channels_first'):
self._size = tf.TensorShape(([self._filters] + shape))
self._feature_axis = 0
self._data_format = 'NC'
else:
raise ValueError('Unknown data_format')
@property
def state_size(self):
return tf.nn.rnn_cell.LSTMStateTuple(self._size, self._size)
@property
def output_size(self):
return self._size
def call(self, x, state):
(c, h) = state
(x, f, u) = tf.split(x, [self._filters, 1, 1], axis=(- 1))
x = tf.concat([x, h], axis=self._feature_axis)
n = x.shape[(- 1)].value
m = ((2 * self._filters) if (self._filters > 1) else 2)
W = tf.get_variable('kernel', (self._kernel + [n, m]))
y = tf.nn.convolution(x, W, 'SAME', data_format=self._data_format)
if (not self._normalize):
y += tf.get_variable('bias', [m], initializer=tf.zeros_initializer())
(j, o) = tf.split(y, 2, axis=self._feature_axis)
if self._normalize:
j = tf.contrib.layers.layer_norm(j)
c = ((c * f) + (u * self._activation(j)))
if self._peephole:
o += (tf.get_variable('W_co', c.shape[1:]) * c)
if self._normalize:
o = tf.contrib.layers.layer_norm(o)
c = tf.contrib.layers.layer_norm(c)
o = tf.sigmoid(o)
h = (o * self._activation(c))
state = tf.nn.rnn_cell.LSTMStateTuple(c, h)
return (h, state)
|
class ConvGRUCell(tf.nn.rnn_cell.RNNCell):
'A GRU cell with convolutions instead of multiplications.'
def __init__(self, shape, filters, kernel, activation=tf.tanh, normalize=True, data_format='channels_last', reuse=None):
super(ConvGRUCell, self).__init__(_reuse=reuse)
self._filters = filters
self._kernel = kernel
self._activation = activation
self._normalize = normalize
if (data_format == 'channels_last'):
self._size = tf.TensorShape((shape + [self._filters]))
self._feature_axis = self._size.ndims
self._data_format = None
elif (data_format == 'channels_first'):
self._size = tf.TensorShape(([self._filters] + shape))
self._feature_axis = 0
self._data_format = 'NC'
else:
raise ValueError('Unknown data_format')
@property
def state_size(self):
return self._size
@property
def output_size(self):
return self._size
def call(self, x, h):
channels = x.shape[self._feature_axis].value
with tf.variable_scope('gates'):
inputs = tf.concat([x, h], axis=self._feature_axis)
n = (channels + self._filters)
m = ((2 * self._filters) if (self._filters > 1) else 2)
W = tf.get_variable('kernel', (self._kernel + [n, m]))
y = tf.nn.convolution(inputs, W, 'SAME', data_format=self._data_format)
if self._normalize:
(r, u) = tf.split(y, 2, axis=self._feature_axis)
r = tf.contrib.layers.layer_norm(r)
u = tf.contrib.layers.layer_norm(u)
else:
y += tf.get_variable('bias', [m], initializer=tf.ones_initializer())
(r, u) = tf.split(y, 2, axis=self._feature_axis)
(r, u) = (tf.sigmoid(r), tf.sigmoid(u))
with tf.variable_scope('candidate'):
inputs = tf.concat([x, (r * h)], axis=self._feature_axis)
n = (channels + self._filters)
m = self._filters
W = tf.get_variable('kernel', (self._kernel + [n, m]))
y = tf.nn.convolution(inputs, W, 'SAME', data_format=self._data_format)
if self._normalize:
y = tf.contrib.layers.layer_norm(y)
else:
y += tf.get_variable('bias', [m], initializer=tf.zeros_initializer())
h = ((u * h) + ((1 - u) * self._activation(y)))
return (h, h)
|
def yuv_import(filename, dims, numfrm, startfrm):
fp = open(filename, 'rb')
blk_size = ((np.prod(dims) * 3) / 2)
fp.seek(np.int((blk_size * startfrm)), 0)
d00 = (dims[0] // 2)
d01 = (dims[1] // 2)
Y = np.zeros((numfrm, dims[0], dims[1]), np.uint8, 'C')
U = np.zeros((numfrm, d00, d01), np.uint8, 'C')
V = np.zeros((numfrm, d00, d01), np.uint8, 'C')
for i in range(numfrm):
for m in range(dims[0]):
for n in range(dims[1]):
Y[(i, m, n)] = ord(fp.read(1))
for m in range(d00):
for n in range(d01):
U[(i, m, n)] = ord(fp.read(1))
for m in range(d00):
for n in range(d01):
V[(i, m, n)] = ord(fp.read(1))
fp.close()
return (Y, U, V)
|
def validate_parts(parts):
if (type(parts) is int):
sys.exit("Single value is not accepted for --parts as it's ambiguous between wanting only that exact part or that number of parts starting from 0. Please use a range instead like 0:2")
parts_bounds = parts.split(':')
try:
parts_bounds = [int(part) for part in parts_bounds]
except Exception:
sys.exit(f'The parts pattern "{parts}" is not valid. It should be a valid range such as "0:2" or "14:42"')
if (len(parts_bounds) == 0):
sys.exit('The --parts argument cannot be empty')
elif (len(parts_bounds) == 1):
sys.exit("Single value is not accepted for --parts as it's ambiguous between wanting only that exact part or that number of parts starting from 0. Please use a range instead like 0:2")
elif (len(parts_bounds) > 2):
sys.exit('Ranges with more than 2 parts, such as 0:2:14 are not valid for --parts. Please limit yourself with simple ranges such as 0:14')
(start_part, end_part) = parts_bounds
if ((start_part < 0) or (end_part < 0)):
sys.exit('Only positive integers are allowed for --parts, such as 0:14')
if (end_part <= start_part):
sys.exit('The --parts argument must be of the shape "s:e" with s < e, such as "0:1" or "14:42". The "e" bound is excluded.')
return (start_part, end_part)
|
def validate_part_format(pattern):
format_variables = [tup[1] for tup in string.Formatter().parse(pattern) if (tup[1] is not None)]
if ((len(format_variables) != 1) or (format_variables[0] != 'part')):
sys.exit(f'Your pattern "{pattern}" is not valid as it should contain the "part" variable such as "{{part:04d}}.npy".')
|
def main():
'Main entry point'
fire.Fire({'download': snip_download, 'compress': snip_compress, 'index': snip_index})
|
def snip_download(outfolder='data/downloaded', start=0, end=2313, dl_dedup_set=True):
"Download and deduplicate a dataset.\n\n Parameters\n ----------\n outfolder : str, optional\n Where to put the downloaded metadata\n start : int, optional\n Start index of the metadata\n end : int, optional\n End index of the metadata\n dl_dedup_set : bool, optional\n Indicate whether you'll download the dedup set again (2GB)\n "
metadata_dir = os.path.join(outfolder, 'metadata')
dedup_set_path = os.path.join(outfolder, 'is_dup_mlp_1024_128_gelu_snn_2layer_notext.npy')
os.makedirs(metadata_dir, exist_ok=True)
if dl_dedup_set:
print('downloading dedup set...')
url = 'https://huggingface.co/datasets/fraisdufour/snip-dedup/resolve/main/is_dup_mlp_1024_128_gelu_snn_2layer_notext.npy'
response = requests.get(url)
open(dedup_set_path, 'wb').write(response.content)
is_dup_all = np.load(dedup_set_path).ravel()
abs_ind = 0
for n in range(start, end):
print(f'downloading metadata file {n}/{end}')
url = f'https://huggingface.co/datasets/laion/laion2b-en-vit-h-14-embeddings/resolve/main/metadata/metadata_{n:04d}.parquet'
response = requests.get(url)
parquet_path = os.path.join(metadata_dir, f'metadata_{n:04d}.parquet')
open(parquet_path, 'wb').write(response.content)
md = pd.read_parquet(parquet_path)
non_dup_chunk = is_dup_all[abs_ind:(abs_ind + len(md.index))]
non_dup_chunk = np.logical_not(non_dup_chunk)
non_dup_chunk[0] = True
md = md[non_dup_chunk]
md.to_parquet(parquet_path)
abs_ind += len(md.index)
|
def snip_index(parts='0:2', snip_feats='snip_feats/{part:04d}.npy', snip_base_index_path='snip_models/snip_vitl14_deep_IVFPQ_M4_base.index', index_outdir='snip_index', shard_size=1):
'Build a sharded index from SNIP compressed features\n\n Parameters\n ----------\n parts : str\n Parts to index, using a slice notation, such as 0:2 or 14:42\n snip_feats : str\n Pattern referencing the path to the SNIP features parts.\n You are expected to use the "part" variable with formatting options, such as "{part:03d}.npy" which will be replaced by "001.npy" when part==1.\n snip_base_index_path : str\n Path to the base index, might be something like: snip_models/snip_vitl14_deep_IVFPQ_M4_base.index\n index_outdir : str\n Directory where the computed index parts will be saved.\n shard_size : int\n Number of SNIP parts to group per index shard.\n Since the index is much smaller than the features, we can pack many feature parts in a single index shard.\n '
if (not os.path.isfile(snip_base_index_path)):
sys.exit(f'The base index file "{snip_base_index_path}" does not exist or is not readable.')
(start_part, end_part) = _cli_helper.validate_parts(parts)
_cli_helper.validate_part_format(snip_feats)
if ((start_part % shard_size) != 0):
sys.exit(f'WARNING: your starting SNIP part ({start_part}) is not a multiple of your packing argument ({shard_size}). You might be doing a mistake so please double check.')
res = faiss.StandardGpuResources()
os.makedirs(index_outdir, exist_ok=True)
parts_range = list(range(start_part, end_part))
grouped_parts = [parts_range[i:(i + shard_size)] for i in range(0, len(parts_range), shard_size)]
base_index = faiss.read_index(snip_base_index_path)
for parts in grouped_parts:
index = faiss.index_cpu_to_gpu(res, 0, base_index)
for snip_part_id in parts:
print(f'Indexing SNIP part {snip_part_id} ...')
snip_part = np.load(snip_feats.format(part=snip_part_id))
index.add(snip_part)
group_str = '_'.join([f'{id:04d}' for id in parts])
print(f'Writing index for parts {parts} ...')
faiss.write_index(faiss.index_gpu_to_cpu(index), os.path.join(index_outdir, f'{group_str}.index'))
|
def test():
import unittest
from hypothesis import Settings, Verbosity
from tests import testsuite as _testsuite
unittest.TextTestRunner(verbosity=2).run(_testsuite)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.