repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
spyn-repr
|
spyn-repr-master/dataset.py
|
import numpy
import csv
import re
DATA_PATH = "data/"
DATA_FULL_PATH = DATA_PATH + 'full/'
DATASET_NAMES = ['accidents',
'ad',
'baudio',
'bbc',
'bnetflix',
'book',
'c20ng',
'cr52',
'cwebkb',
'dna',
'jester',
'kdd',
'msnbc',
'msweb',
'nltcs',
'plants',
'pumsb_star',
'tmovie',
'tretail']
import os
from spn import RND_SEED
from spn import MARG_IND
def csv_2_numpy(file, path=DATA_PATH, sep=',', type='int8'):
"""
WRITEME
"""
file_path = os.path.join(path, file)
reader = csv.reader(open(file_path, "r"), delimiter=sep)
x = list(reader)
dataset = numpy.array(x).astype(type)
return dataset
def load_train_val_test_csvs(dataset,
path=DATA_PATH,
sep=',',
type='int32',
suffixes=['.ts.data',
'.valid.data',
'.test.data']):
"""
WRITEME
"""
csv_files = [dataset + ext for ext in suffixes]
return [csv_2_numpy(file, path, sep, type) for file in csv_files]
def load_dataset_splits(path=DATA_PATH,
filter_regex=['\.ts\.data',
'\.valid\.data',
'\.test\.data'],
sep=',',
type='int32',):
dataset_paths = []
for pattern in filter_regex:
for f in os.listdir(path):
if os.path.isfile(os.path.join(path, f)) and pattern in f:
dataset_paths.append(f)
break
return [csv_2_numpy(file_path, path, sep, type) for file_path in dataset_paths]
def save_splits_to_csv(dataset_name,
output_path,
dataset_splits,
splits_names=['train',
'valid',
'test'],
ext='data'):
assert len(splits_names) == len(dataset_splits)
n_features = dataset_splits[0].shape[1]
os.makedirs(output_path, exist_ok=True)
for split, name in zip(dataset_splits, splits_names):
assert split.shape[1] == n_features
print('\t{0} shape: {1}'.format(name, split.shape))
split_file_name = '.'.join([dataset_name, name, ext])
split_out_path = os.path.join(output_path, split_file_name)
numpy.savetxt(split_out_path, split, delimiter=',', fmt='%d')
print('\t\tSaved split to {}'.format(split_out_path))
def sample_indexes(indexes, perc, replace=False, rand_gen=None):
"""
index sampling
"""
n_indices = indexes.shape[0]
sample_size = int(n_indices * perc)
if rand_gen is None:
rand_gen = numpy.random.RandomState(RND_SEED)
sampled_indices = rand_gen.choice( # n_indices,
indexes,
size=sample_size,
replace=replace)
return sampled_indices
def sample_instances(dataset, perc, replace=False, rndState=None):
"""
Little utility to sample instances (rows) from
a dataset (2d numpy array)
"""
n_instances = dataset.shape[0]
sample_size = int(n_instances * perc)
if rndState is None:
row_indexes = numpy.random.choice(n_instances,
sample_size,
replace)
else:
row_indexes = rndState.choice(n_instances,
sample_size,
replace)
# print(row_indexes)
return dataset[row_indexes, :]
def sample_sets(datasets, perc, replace=False, rndState=None):
"""
WRITEME
"""
sampled_datasets = [sample_instances(dataset, perc, replace, rndState)
for dataset in datasets]
return sampled_datasets
def dataset_to_instances_set(dataset):
#
# from numpy arrays to python tuples
instances = [tuple(x) for x in dataset]
#
# removing duplicates
instances = set(instances)
return instances
from time import perf_counter
def one_hot_encoding(data, feature_values=None, n_features=None, dtype=numpy.float32):
if feature_values and n_features:
assert len(feature_values) == n_features
#
# if values are not specified, assuming all of them to be binary
if not feature_values and n_features:
feature_values = numpy.array([2 for i in range(n_features)])
if feature_values and not n_features:
n_features = len(feature_values)
if not feature_values and not n_features:
raise ValueError('Specify feature values or n_features')
#
# computing the new number of features
n_features_ohe = numpy.sum(feature_values)
n_instances = data.shape[0]
transformed_data = numpy.zeros((n_instances, n_features_ohe), dtype=dtype)
enc_start_t = perf_counter()
for i in range(n_instances):
for j in range(n_features):
value = data[i, j]
if value != MARG_IND:
ohe_feature_id = int(numpy.sum(feature_values[:j]) + data[i, j])
transformed_data[i, ohe_feature_id] = 1
else:
ohe_feature_id = int(numpy.sum(feature_values[:j]))
# print(ohe_feature_id, ohe_feature_id + feature_values[j])
ohe_feature_ids = [i for i in range(ohe_feature_id,
ohe_feature_id + feature_values[j])]
transformed_data[i, ohe_feature_ids] = 1
enc_end_t = perf_counter()
print('New dataset ({0} x {1}) encoded in {2}'.format(transformed_data.shape[0],
transformed_data.shape[1],
enc_end_t - enc_start_t))
return transformed_data
def data_2_freqs(dataset):
"""
WRITEME
"""
freqs = []
features = []
for j, col in enumerate(dataset.T):
freq_dict = {'var': j}
# transforming into a set to get the feature value
# this is assuming not missing values features
# feature_values = max(2, len(set(col)))
feature_values = max(2, max(set(col)) + 1)
features.append(feature_values)
# create a list whose length is the number of feature values
freq_list = [0 for i in range(feature_values)]
# populate it with the seen values
for val in col:
freq_list[val] += 1
# update the dictionary and the resulting list
freq_dict['freqs'] = freq_list
freqs.append(freq_dict)
return freqs, features
def update_feature_count(old_freqs, new_freqs):
if not old_freqs:
return new_freqs
else:
for i, frew in enumerate(old_freqs):
old_freqs[i] = max(old_freqs[i], new_freqs[i])
return old_freqs
def data_clust_freqs(dataset,
n_clusters,
rand_state=None):
"""
WRITEME
"""
freqs = []
features = []
n_instances = dataset.shape[0]
# assign clusters randomly to instances
if rand_state is None:
rand_state = numpy.random.RandomState(RND_SEED)
# inst_2_clusters = numpy.random.randint(0, n_clusters, n_instances)
# getting the indices for each cluster
# this all stuff could be done with a single loop
clusters = [[] for i in range(n_clusters)]
# for instance in range(n_instances):
# rand_cluster = rand_state.randint(0, n_clusters)
# clusters[rand_cluster].append(instance)
instance_ids = numpy.arange(n_instances)
rand_state.shuffle(instance_ids)
print(instance_ids)
for i in range(n_instances):
clusters[i % n_clusters].append(instance_ids[i])
# now we can operate cluster-wise
for cluster_ids in clusters:
# collecting all the data for the cluster
cluster_data = dataset[cluster_ids, :]
# count the frequencies for the var values
cluster_freqs, cluster_features = data_2_freqs(cluster_data)
# updating stats
features = update_feature_count(features, cluster_features)
freqs.extend(cluster_freqs)
return freqs, features
def merge_datasets(dataset_name,
shuffle=True,
path=DATA_PATH,
sep=',',
type='int32',
suffixes=['.ts.data',
'.valid.data',
'.test.data'],
savetxt=True,
out_path=DATA_FULL_PATH,
output_suffix='.all.data',
rand_gen=None):
"""
Merging portions of a dataset
Loading them from file and optionally writing them to file
"""
dataset_parts = load_train_val_test_csvs(dataset_name,
path,
sep,
type,
suffixes)
print('Loaded dataset parts for', dataset_name)
#
# checking features
assert len(dataset_parts) > 0
first_dataset = dataset_parts[0]
n_features = first_dataset.shape[1]
for dataset_p in dataset_parts:
assert dataset_p.shape[1] == n_features
print('\tFeatures are conform')
#
# storing instances
n_instances = [dataset_p.shape[0]
for dataset_p in dataset_parts]
#
# merging
merged_dataset = numpy.concatenate(dataset_parts)
print('\tParts merged')
#
# shuffling
if shuffle:
if rand_gen is None:
rand_gen = numpy.random.RandomState(RND_SEED)
rand_gen.shuffle(merged_dataset)
print('\tShuffled')
#
#
tot_n_instances = sum(n_instances)
assert merged_dataset.shape[0] == tot_n_instances
#
# writing out
if savetxt:
out_path = out_path + dataset_name + output_suffix
if not os.path.exists(os.path.dirname(out_path)):
os.makedirs(os.path.dirname(out_path))
fmt = '%.8e'
if 'int' in type:
fmt = '%d'
numpy.savetxt(out_path, merged_dataset, delimiter=sep, fmt=fmt)
print('\tMerged Dataset saved to', out_path)
return merged_dataset
def shuffle_columns(data, rand_gen=None):
if rand_gen is None:
rand_gen = numpy.random.RandomState(RND_SEED)
data = numpy.array(data)
n_features = data.shape[1]
for i in range(n_features):
rand_gen.shuffle(data[:, i])
return data
def random_binary_dataset(n_instances, n_features, perc=0.5, rand_gen=None):
if rand_gen is None:
rand_gen = numpy.random.RandomState(RND_SEED)
data = rand_gen.binomial(1, p=perc, size=(n_instances, n_features))
return data
def split_into_folds(dataset,
n_folds=10,
percentages=[0.81, 0.09, 0.1]):
"""
Splitting a dataset into N folds (e.g. for cv)
and optionally each fold into train-valid-test
"""
| 11,393
| 28.594805
| 88
|
py
|
spyn-repr
|
spyn-repr-master/caltech101.py
|
import numpy
import matplotlib
import matplotlib.pyplot as pyplot
import pickle
import os
from scipy.io import loadmat
RANDOM_SEED = 1337
def load_caltech101_from_mat(data_path,
split_names=['train',
'val',
'test'],
data_suffix='_data',
label_suffix='_labels',
class_names='classnames'):
data_dict = loadmat(data_path)
data_splits = [(data_dict[split + data_suffix], data_dict[split + label_suffix])
for split in split_names]
#
# un raveling the y
data_splits = [(split_x, split_y.flatten()) for split_x, split_y in data_splits]
return data_splits
def save_caltech101_pickle(data_splits, output_path):
with open(output_path, 'wb') as data_file:
pickle.dump(data_splits, data_file)
def load_caltech101_pickle(data_path):
data_splits = None
with open(data_path, 'rb') as data_file:
data_splits = pickle.load(data_file)
return data_splits
def plot_m_by_n_images(images,
m, n,
fig_size=(12, 12),
cmap=matplotlib.cm.binary):
fig = pyplot.figure(figsize=fig_size)
for x in range(m):
for y in range(n):
ax = fig.add_subplot(m, n, n * y + x + 1)
ax.matshow(images[n * y + x], cmap=cmap)
pyplot.xticks(numpy.array([]))
pyplot.yticks(numpy.array([]))
pyplot.show()
def array_2_mat(array, n_rows=28):
return array.reshape(n_rows, -1)
def plot_caltech101_silhouettes(image_arrays,
m, n,
fig_size=(12, 12),
invert=True,
cmap=matplotlib.cm.binary):
image_matrixes = None
if invert:
image_matrixes = [array_2_mat(1 - img).T for img in image_arrays]
else:
image_matrixes = [array_2_mat(img).T for img in image_arrays]
plot_m_by_n_images(image_matrixes, m, n, fig_size, cmap)
| 2,154
| 28.121622
| 84
|
py
|
spyn-repr
|
spyn-repr-master/visualize.py
|
from spn import MARG_IND
import numpy
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as pyplot
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.gridspec as gridspec
from matplotlib.colors import LogNorm
# from spn.utils import get_best_value_from_frame
import seaborn
from collections import defaultdict
from collections import Counter
color_bi_list = [(1., 1., 1.), (0., 0., 0.)]
binary_cmap = matplotlib.colors.ListedColormap(color_bi_list)
inv_binary_cmap = matplotlib.colors.ListedColormap(list(reversed(color_bi_list)))
color_tri_list = [(1., 1., 1.), (0., 0., 0.), (1., 0., 0.), ]
ternary_cmap = matplotlib.colors.ListedColormap([(1., 0., 0.), (1., 1., 1.), (0., 0., 0.)])
inv_ternary_cmap = matplotlib.colors.ListedColormap([(1., 0., 0.), (0., 0., 0.), (1., 1., 1.)])
#
# changing font size
# seaborn.set_context("poster", font_scale=1.7, rc={'font.size': 32,
# # 'axes.labelsize': fontSize,
# # 'xtick.labelsize': fontSize,
# # 'ytick.labelsize': fontSize,
# # 'legend.fontsize': fontSize,
# 'text.usetex': True
# })
# matplotlib.rcParams.update({'font.size': 22})
def beautify_with_seaborn():
#
seaborn.set_style('white')
seaborn.despine(trim=True)
seaborn.set_context('poster')
def visualize_curves(curves,
output=None,
labels=None,
lines=None,
linestyles=None,
linewidths=None,
palette='husl',
markers=None,
loc=None,
colors=None,
fig_size=(10, 8)):
"""
WRITEME
"""
seaborn.set_style('white')
seaborn.set_context(rc={'lines.markeredgewidth': 0.1})
seaborn.set_context('poster')
n_curves = len(curves)
n_lines = len(lines)
#
# default legend location, upper right
if loc is None:
loc = 3
#
# setting the palette
# seaborn.set_palette(palette, n_colors=(n_curves + n_lines))
if colors is None:
colors = [seaborn.color_palette("husl", 12)[1],
seaborn.color_palette("husl", 12)[4],
seaborn.color_palette("husl")[0],
seaborn.color_palette("husl", 12)[8],
seaborn.color_palette("husl", 12)[9]]
print(colors)
#
# default linestyle
default_linestyle = '-'
if linestyles is None:
linestyles = [default_linestyle for i in range(n_curves)]
default_width = 6
if linewidths is None:
linewidths = [default_width for i in range(n_curves)]
if markers is None:
# markers = ['s', 'D', '2', '3', '1']
markers = ['3', '1', '2', '3', 's']
fig, ax = pyplot.subplots(figsize=fig_size)
for i, curve in enumerate(curves):
curve_x, curve_y = curve
if labels is not None:
label = labels[i]
line = ax.plot(curve_x, curve_y,
label=label,
linestyle=linestyles[i],
linewidth=linewidths[i],
marker=markers[i],
mew=0.1,
color=colors[i],
markeredgecolor='none'
)
else:
line = ax.plot(curve_x, curve_y,
linestyle=linestyles[i],
linewidth=linewidths[i],
marker=markers[i],
mew=0.1,
color=colors[i],
markeredgecolor='none'
)
#
# lastly plotting straight lines, if present
if lines is not None:
default_linestyles = ['--', '-.', ':']
for i, line_y in enumerate(lines):
#
# this feels a little bit hackish, assuming all share the same axis
prototypical_x_axis = curves[0][0]
start_x = prototypical_x_axis[0]
end_x = prototypical_x_axis[-1]
ax.plot([start_x, end_x],
[line_y, line_y],
linestyle=default_linestyles[i],
color=colors[i + len(curves)],
linewidth=default_width) # linestyles[i + n_curves])
#
# setting up the legend
if labels is not None:
legend = ax.legend(labels, loc=loc)
seaborn.despine()
pyplot.xlabel('# components')
pyplot.ylabel('test ll')
if output is not None:
# fig = pyplot.gcf()
# fig_width = 18.5
# fig_height = 10.5
# dpi = 150
# fig.set_size_inches(fig_width, fig_height)
# fig.savefig(output,
# # additional_artists=[legend],
# dpi=dpi,
# bbox_inches='tight')
# pyplot.close(fig)
pp = PdfPages(output + '.pdf')
pp.savefig(fig)
pp.close()
else:
#
# shall this be mutually exclusive with file saving?
pyplot.show()
DATASET_LIST = ['nltcs', 'msnbc', 'kdd',
'plants', 'baudio', 'jester', 'bnetflix',
'accidents', 'tretail', 'pumsb_star',
'dna', 'kosarek', 'msweb',
'book', 'tmovie', 'cwebkb',
'cr52', 'c20ng', 'bbc', 'ad']
def visualize_histograms(histograms,
output=None,
labels=DATASET_LIST,
linestyles=None,
rotation=90,
legend=None,
y_log=False,
colors=['seagreen', 'orange', 'cornflowerblue']):
"""
Plotting histograms one near the other
"""
n_histograms = len(histograms)
#
# assuming homogeneous data leengths
# TODO: better error checking
n_ticks = len(histograms[0])
bin_width = 1 / (n_histograms + 1)
bins = [[i + j * bin_width for i in range(n_ticks)]
for j in range(1, n_histograms + 1)]
#
# setting up seaborn
seaborn.set_style("white")
seaborn.set_context("poster")
# seaborn.set_palette(palette, n_colors=n_histograms)
fig, ax = pyplot.subplots()
if legend is not None:
_legend = pyplot.legend(legend)
#
# setting labels
middle_histogram = n_histograms // 2 + 1 # if n_histograms > 1 else 0
pyplot.xticks(bins[middle_histogram], DATASET_LIST)
if rotation is not None:
locs, labels = pyplot.xticks()
pyplot.setp(labels, rotation=90)
#
# actual plotting
print(histograms)
for i, histogram in enumerate(histograms):
ax.bar(bins[i], histogram, width=bin_width,
facecolor=colors[i], edgecolor="none",
log=y_log)
seaborn.despine()
if output is not None:
pp = PdfPages(output)
pp.savefig(fig)
pp.close()
def jitter(arr, std=.02):
stdev = std * (max(arr) - min(arr))
return arr + numpy.random.randn(len(arr)) * stdev
def plot_depth_vs_size(frame_list,
labels,
depth_col_label='n_levels:',
size_col_label='n_edges:',
fig_size=(9, 8),
save_path=None,
pdf=False,
colors=None,
markers=None,
jitter_points=(.04, .04),
marker_size=80):
if not colors:
colors = seaborn.color_palette("husl")
if not markers:
markers = ['o' for _frame in frame_list]
fig = pyplot.figure(figsize=fig_size)
ax1 = fig.add_subplot(111)
# fig.suptitle('nltcs')
pyplot.xlabel('depth')
pyplot.ylabel('# edges')
for i, frame in enumerate(frame_list):
ax1.scatter(x=jitter(frame[depth_col_label].values, jitter_points[0]),
y=jitter(frame[size_col_label].values, jitter_points[1]),
c=colors[i],
edgecolor='none',
marker=markers[i],
label=labels[i],
s=marker_size)
seaborn.despine()
pyplot.legend(loc='upper right')
if save_path:
fig.savefig(save_path + '.svg')
if pdf:
pp = PdfPages(save_path + '.pdf')
pp.savefig(fig)
pp.close()
pyplot.show()
return ax1
def get_best_value_from_frame(frame,
best_col,
attribute=None):
if not attribute:
attribute = best_col
best_values = frame[frame[best_col] == frame[best_col].max()][attribute].values
assert len(best_values) == 1
return best_values[0]
def plot_comparative_histograms(frame_lists,
col_name,
best_col_name,
x_labels,
y_label,
save_path=None,
linestyles=None,
rotation=90,
legend=None,
fig_size=(10, 8),
y_log=False,
colors=None,
pdf=False):
"""
Plotting bars one near the other
"""
if not colors:
colors = seaborn.color_palette("husl")
#
# extracting histograms from frames
histograms = [[] for _list in frame_lists]
for i, f_list in enumerate(frame_lists):
for frame in f_list:
param_value = get_best_value_from_frame(frame, best_col_name, col_name)
histograms[i].append(param_value)
n_histograms = len(histograms)
#
# assuming homogeneous data lengths
# TODO: better error checking
n_ticks = len(histograms[0])
bin_width = 1 / (n_histograms + 1)
bins = [[i + j * bin_width for i in range(n_ticks)]
for j in range(1, n_histograms + 1)]
fig = pyplot.figure(figsize=fig_size)
ax = fig.add_subplot(111)
# fig.suptitle('nltcs')
pyplot.xlabel('datasets')
pyplot.ylabel(y_label)
if legend is not None:
_legend = pyplot.legend(legend)
#
# setting labels
middle_histogram = n_histograms // 2 + 1 # if n_histograms > 1 else 0
pyplot.xticks(bins[middle_histogram], x_labels)
if rotation is not None:
locs, labels = pyplot.xticks()
pyplot.setp(labels, rotation=90)
#
# actual plotting
print(histograms)
for i, histogram in enumerate(histograms):
ax.bar(bins[i], histogram, width=bin_width,
facecolor=colors[i], edgecolor="none",
log=y_log)
seaborn.despine()
pyplot.legend(loc='upper right')
if save_path:
fig.savefig(save_path + '.svg')
if pdf:
pp = PdfPages(save_path + '.pdf')
pp.savefig(fig)
pp.close()
pyplot.show()
return ax
def plot_m_by_n_images(images,
m, n,
fig_size=(12, 12),
cmap=matplotlib.cm.binary,
w_space=0.1,
h_space=0.1,
dpi=900,
save_path=None,
pdf=False):
"""
Plot images in a mxn tiling
"""
print(w_space, h_space)
gs1 = gridspec.GridSpec(m, n)
gs1.update(wspace=w_space, hspace=h_space)
print(len(images))
fig = pyplot.figure(figsize=fig_size, dpi=dpi)
for x in range(m):
for y in range(n):
id = n * x + y
if id < len(images):
ax = fig.add_subplot(gs1[id])
ax.matshow(images[id], cmap=cmap)
pyplot.xticks(numpy.array([]))
pyplot.yticks(numpy.array([]))
# pyplot.tight_layout()
pyplot.subplots_adjust(left=None, right=None, wspace=w_space, hspace=h_space)
if save_path:
fig.savefig(save_path + '.svg')
if pdf:
pp = PdfPages(save_path + '.pdf')
pp.savefig(fig)
pp.close()
pyplot.show()
def plot_m_by_n_by_p_by_q_images(images_lists,
m, n, p, q,
fig_size=(16, 16),
cmap=matplotlib.cm.binary,
save_path=None,
pdf=False):
"""
Plot images in a (mxp + p - 1) x (nxq + q - 1) tiling
"""
fig = pyplot.figure(figsize=fig_size)
tot_rows = m * p + p - 1
tot_cols = n * q + q - 1
for i, images in enumerate(images_lists):
i_row = i // q
i_col = i - q * i_row
# print('i', i, i_row, i_col)
for j, img in enumerate(images):
j_row = j // n
j_col = j - n * j_row
if j < m * n:
# print('j', j, j_row, j_col)
t_row = i_row * m + i_row + j_row
t_col = i_col * n + i_col + j_col
id = tot_cols * t_row + t_col
# print('id', id)
ax = fig.add_subplot(tot_rows,
tot_cols, id + 1)
ax.matshow(img, cmap=cmap)
pyplot.xticks(numpy.array([]))
pyplot.yticks(numpy.array([]))
pyplot.tight_layout()
if save_path:
fig.savefig(save_path + '.svg')
if pdf:
pp = PdfPages(save_path + '.pdf')
pp.savefig(fig)
pp.close()
pyplot.show()
#
# axes utils
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_m_by_n_heatmaps(images,
min_max_list,
m, n,
cmaps,
fig_size=(12, 12),
w_space=0.1,
h_space=0.1,
colorbars=False,
dpi=900,
save_path=None,
pdf=False):
"""
Plot images in a mxn tiling
"""
seaborn.set_style('white')
seaborn.set_context('poster')
assert len(min_max_list) == len(images)
print(len(images))
gs1 = gridspec.GridSpec(m, n)
gs1.update(wspace=w_space, hspace=h_space)
fig = pyplot.figure(figsize=fig_size, dpi=dpi)
for x in range(m):
for y in range(n):
id = n * x + y
if id < len(images):
# ax = fig.add_subplot(m, n, id + 1)
ax = fig.add_subplot(gs1[id])
if id > 0:
norm = None # LogNorm(vmin=min_act, vmax=max_act)
print('min max', min_max_list[id])
img = ax.matshow(images[id],
cmap=cmaps[id],
vmin=min_max_list[id][0],
vmax=min_max_list[id][1],
norm=norm)
pyplot.xticks(numpy.array([]))
pyplot.yticks(numpy.array([]))
pyplot.axis('off')
if colorbars:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
pyplot.colorbar(img, cax=cax)
else:
img = ax.matshow(images[id], cmap=cmaps[id])
pyplot.xticks(numpy.array([]))
pyplot.yticks(numpy.array([]))
pyplot.axis('off')
# pyplot.tight_layout()
if save_path:
fig.savefig(save_path + '.svg')
if pdf:
pp = PdfPages(save_path + '.pdf')
pp.savefig(fig)
pp.close()
pyplot.show()
def array_2_mat(array, n_rows, n_cols):
array = numpy.array(array, copy=True)
return array.reshape(n_rows, n_cols)
def tiling_sizes(n_images, n_cols=None):
n_rows = None
if n_cols is None:
n_rows = int(numpy.sqrt(n_images))
n_cols = n_rows
else:
n_rows = max(n_images // n_cols, 1)
rem_tiles = n_images - n_rows * n_cols
if rem_tiles > 0:
n_rem_rows, n_rem_cols = tiling_sizes(rem_tiles, n_cols)
return n_rows + n_rem_rows, n_cols
return n_rows, n_cols
def scope_histogram(spn,
fig_size=(12, 4),
dpi=900,
ylim=None,
xlim=None,
save_path=None,
pdf=False):
seaborn.set_style('white')
# seaborn.despine(trim=True)
seaborn.set_context('poster', font_scale=1.8)
scope_dict = defaultdict(list)
for node in spn.top_down_nodes():
scope = None
if hasattr(node, 'var_scope'):
scope = node.var_scope
elif hasattr(node, 'var'):
scope = frozenset(node.var)
scope_dict[len(scope)].append(node)
max_scope_len = max(scope_dict.keys())
scope_list = [0] * max_scope_len
for scope_len, nodes in scope_dict.items():
scope_list[scope_len - 1] = len(nodes)
print(scope_list)
fig, ax = pyplot.subplots(figsize=fig_size)
# ax.bar(numpy.arange(max_scope_len),
# scope_list,
# log=True)
# width = 3e-3
width = 0.1
for i in range(0, len(scope_list)):
# x_pos = [10 ** (numpy.log10(i) - width),
# 10 ** (numpy.log10(i) - width),
# 10 ** (numpy.log10(i) + width),
# 10 ** (numpy.log10(i) + width)]
x_pos = [i - width + 1, i - width + 1, i + width + 1, i + width + 1]
y_pos = [0,
scope_list[i],
scope_list[i],
0]
ax.fill(x_pos,
y_pos, 'black')
ax.set_yscale('log')
# ax.set_xscale('log')
if xlim:
ax.set_xlim([xlim[0], xlim[1]])
else:
ax.set_xlim([-1, len(scope_list) + 1])
if ylim:
ax.set_ylim([0.1, ylim])
pyplot.xlabel('scope length')
pyplot.ylabel('# nodes')
pyplot.tight_layout()
if save_path:
fig.savefig(save_path + '.svg')
if pdf:
pp = PdfPages(save_path + '.pdf')
pp.savefig(fig)
pp.close()
pyplot.show()
# rects1 = ax.bar(ind, menMeans, width, color='r', yerr=menStd)
return scope_list
def scope_maps(spns,
height=20,
fig_size=(12, 4),
dpi=900,
cmap=matplotlib.cm.jet,
min_val=None,
max_val=None,
xlim=None,
w_space=0.0,
h_space=1.1,
save_path=None,
pdf=False):
seaborn.set_style('white')
seaborn.despine(trim=True)
seaborn.set_context('poster', font_scale=1.8)
scope_lists = []
for spn in spns:
scope_dict = defaultdict(list)
for node in spn.top_down_nodes():
scope = None
if hasattr(node, 'var_scope'):
scope = node.var_scope
elif hasattr(node, 'var'):
scope = frozenset(node.var)
scope_dict[len(scope)].append(node)
max_scope_len = max(scope_dict.keys())
scope_list = [0] * max_scope_len
for scope_len, nodes in scope_dict.items():
scope_list[scope_len - 1] = len(nodes)
print(scope_list)
scope_lists.append(scope_list)
m = len(spns)
n = 1
gs1 = gridspec.GridSpec(m, n)
gs1.update(wspace=w_space, hspace=h_space)
if xlim is None:
xlim = len(scope_lists[0])
height_val = xlim * (fig_size[1] / len(spns)) / fig_size[0]
step = 99 if xlim // 10 > 10 else 29
fig = pyplot.figure(figsize=fig_size, dpi=dpi)
for i in range(m):
norm = None
ax = fig.add_subplot(gs1[i])
matrix_map = numpy.log10(numpy.array(scope_lists[i][:xlim]) + 1).reshape(1, xlim)
matrix_map = numpy.lib.pad(matrix_map, ((0, 0), (5, 5)), 'constant')
matrix_map = numpy.repeat(matrix_map, height_val, axis=0)
print(matrix_map)
img = ax.matshow(matrix_map,
cmap=cmap,
vmin=min_val,
vmax=max_val,
norm=norm)
pyplot.xticks(numpy.array([]))
pyplot.yticks(numpy.array([]))
# pyplot.axis('off')
ax.set_xticks(numpy.arange(1, xlim, step))
ax.set_xlabel('scope length')
# pyplot.ylabel('# nodes')
# pyplot.tight_layout()
if save_path:
fig.savefig(save_path + '.svg')
if pdf:
pp = PdfPages(save_path + '.pdf')
pp.savefig(fig)
pp.close()
pyplot.show()
# rects1 = ax.bar(ind, menMeans, width, color='r', yerr=menStd)
return scope_list
def scope_map_layerwise(spn,
fig_size=(20, 1),
dpi=900,
cmap=matplotlib.cm.jet,
xlim=None,
w_space=-100.0,
h_space=-100.0,
save_path=None,
pdf=False):
seaborn.set_style('white')
seaborn.despine(trim=True)
seaborn.set_context('poster', font_scale=1.)
max_scope_len = max([len(node.var_scope) for node in spn.top_down_nodes()
if hasattr(node, 'var_scope')])
scope_lists = []
for layer in spn.bottom_up_layers():
scope_dict = defaultdict(list)
for node in layer.nodes():
scope = None
if hasattr(node, 'var_scope'):
scope = node.var_scope
elif hasattr(node, 'var'):
scope = frozenset(node.var)
scope_dict[len(scope)].append(node)
# max_scope_len = max(scope_dict.keys())
scope_list = [0] * max_scope_len
for scope_len, nodes in scope_dict.items():
scope_list[scope_len - 1] = len(nodes)
print(scope_list)
scope_lists.append(scope_list)
m = len(scope_lists)
n = 1
gs1 = gridspec.GridSpec(m, n)
gs1.update(wspace=w_space, hspace=h_space)
fig = pyplot.figure(figsize=fig_size, dpi=dpi)
maps = []
min_val = numpy.Inf
max_val = -numpy.inf
norm = None
if xlim is None:
xlim = len(scope_lists[0])
height_val = max(1, xlim * (fig_size[1] / len(scope_lists)) / fig_size[0])
step = 99 if xlim // 10 > 10 else 29
print('height:', height_val)
for i in range(m):
# print(scope_lists[i])
# matrix_map = numpy.log10(numpy.array(scope_lists[i][:xlim])).reshape(1, xlim)
matrix_map = numpy.zeros(xlim)
matrix_map[numpy.array(scope_lists[i][:xlim]) > 0] = 1
matrix_map = matrix_map.reshape(1, xlim)
matrix_map = numpy.lib.pad(matrix_map, ((0, 0), (5, 5)), 'constant')
matrix_map = numpy.repeat(matrix_map, height_val, axis=0)
# print(matrix_map)
maps.append(matrix_map)
min_val = min([matrix_map.min(), min_val])
max_val = max([matrix_map.max(), max_val])
print('minmax', min_val, max_val)
for i in range(len(maps)):
ax = fig.add_subplot(gs1[i], frameon=False)
if i > 0:
step = 100 if xlim // 10 > 10 else 30
# matrix_map = numpy.repeat(maps[i], height_val, axis=0)
img = ax.matshow(maps[i],
cmap=cmap,
vmin=min_val,
vmax=max_val,
norm=norm)
pyplot.xticks(numpy.array([]))
pyplot.yticks(numpy.array([]), rotation=90)
# ax.yaxis.set_rotate_label(False)
ax.set_ylabel(str(i), rotation=0, ha='right', va='center')
# pyplot.axis('off')
if i == 0:
ax.set_xticks(numpy.arange(1, xlim, step))
# pyplot.ylabel(None, rotation=90)
# ax.set_xticks(numpy.arange(1, xlim, step))
ax.set_xlabel('scope length')
# pyplot.ylabel('# nodes')
# pyplot.tight_layout()
# fig.subplots_adjust(wspace=0, hspace=0)
if save_path:
fig.savefig(save_path + '.svg')
if pdf:
pp = PdfPages(save_path + '.pdf')
pp.savefig(fig)
pp.close()
pyplot.show()
# rects1 = ax.bar(ind, menMeans, width, color='r', yerr=menStd)
return scope_list
def multiple_scope_histogram(spns,
fig_size=(14, 5),
dpi=900,
save_path=None,
y_log=True,
colors=None,
pdf=False):
seaborn.set_style('white')
# seaborn.despine(trim=True)
seaborn.set_context('poster')
if not colors:
# colors = seaborn.color_palette("husl")
colors = ['red', 'green', 'black']
n_ticks = 0
spn_scope_lists = []
for spn in spns:
scope_dict = defaultdict(list)
for node in spn.top_down_nodes():
scope = None
if hasattr(node, 'var_scope'):
scope = node.var_scope
elif hasattr(node, 'var'):
scope = frozenset(node.var)
scope_dict[len(scope)].append(node)
#
# assuming all spns to have the same scope
max_scope_len = max(scope_dict.keys())
n_ticks = max(max_scope_len, 0)
scope_list = [0] * max_scope_len
for scope_len, nodes in scope_dict.items():
scope_list[scope_len - 1] = len(nodes)
print(scope_list)
spn_scope_lists.append(scope_list)
n_histograms = len(spns)
bin_width = 1 / (n_histograms + 1)
bins = [[i + j * bin_width for i in range(n_ticks)]
for j in range(1, n_histograms + 1)]
fig, ax = pyplot.subplots(figsize=fig_size)
width = 0.1
for i, histogram in enumerate(spn_scope_lists):
# ax.bar(bins[i], spn_scope_lists[i], width=bin_width,
# facecolor=colors[i], edgecolor="none",
# log=y_log)
# seaborn.despine()
for j in range(0, len(scope_list)):
# x_pos = [10 ** (numpy.log10(i) - width),
# 10 ** (numpy.log10(i) - width),
# 10 ** (numpy.log10(i) + width),
# 10 ** (numpy.log10(i) + width)]
x_pos = [bins[i][j] - width, bins[i][j] - width,
bins[i][j] + width, bins[i][j] + width]
y_pos = [0,
spn_scope_lists[i][j],
spn_scope_lists[i][j],
0]
ax.fill(x_pos,
y_pos, colors[i])
if y_log:
ax.set_yscale('log')
# ax.set_xscale('log')
ax.set_xlim([0, len(scope_list) + 1])
pyplot.xlabel('scope length')
pyplot.ylabel('# nodes')
pyplot.tight_layout()
if save_path:
fig.savefig(save_path + '.svg')
if pdf:
pp = PdfPages(save_path + '.pdf')
pp.savefig(fig)
pp.close()
pyplot.show()
def layer_scope_histogram(spn,
m, n,
fig_size=(16, 16),
save_path=None,
pdf=False):
fig = pyplot.figure(figsize=fig_size)
scope_dict = defaultdict(lambda: defaultdict(list))
layer_list = []
for layer in spn.bottom_up_layers():
layer_list.append(layer)
for node in layer.nodes():
scope = None
if hasattr(node, 'var_scope'):
scope = node.var_scope
elif hasattr(node, 'var'):
scope = frozenset(node.var)
scope_dict[layer][len(scope)].append(node)
max_scope_len = max([max(scope_dict[l].keys()) for l in scope_dict])
max_val = max([max([len(n) for n in scope_dict[l].items()]) for l in scope_dict])
for i, layer in enumerate(layer_list):
scope_list = [0] * max_scope_len
for scope_len, nodes in scope_dict[layer].items():
scope_list[scope_len - 1] = len(nodes)
print(scope_list)
if i < m * n:
ax = fig.add_subplot(m, n, i + 1)
ax.bar(numpy.arange(max_scope_len),
scope_list,
log=True)
ax.set_ylim(top=max_val)
if save_path:
fig.savefig(save_path + '.svg')
if pdf:
pp = PdfPages(save_path + '.pdf')
pp.savefig(fig)
pp.close()
pyplot.show()
# rects1 = ax.bar(ind, menMeans, width, color='r', yerr=menStd)
def visualize_node_activations_for_instance(spn,
nodes,
instance,
marg_mask=None,
mean=False,
hard=False,
fig_size=(10, 10),
n_rows=28, n_cols=28,
dtype=float,
cmap=matplotlib.cm.spectral,
save_path=None,
pdf=False):
"""
Given an SPN and an instance, return a same shape instance
containing the activations of all nodes, summed by scopes
"""
seaborn.set_style('white')
seaborn.despine(trim=True)
assert instance.ndim == 1
n_features = len(instance)
activations = numpy.zeros(n_features, dtype=dtype)
var_counter = Counter()
#
# marginalizing?
if marg_mask is not None:
instance = numpy.array(instance, copy=True)
instance[numpy.logical_not(marg_mask)] = MARG_IND
# print(instance, type(instance))
# instance = instance.astype(numpy.int32)
# print(instance, type(instance), instance.shape)
#
# evaluate it bottom, up
res, = spn.single_eval(instance)
#
# then gather the node activation vals
for node in nodes:
val = numpy.exp(node.log_val)
scope = None
if hasattr(node, 'var_scope'):
scope = node.var_scope
elif hasattr(node, 'var'):
scope = [node.var]
#
# accumulating scope
for var in scope:
var_counter[var] += 1
if hard:
activations[var] += 1
else:
# activations[var] += (val * len(scope))
activations[var] += (val / len(scope))
# if instance[var] == 1:
# activations[var] += val
# else:
# activations[var] += (1 - val)
if mean:
for i in range(n_features):
activations[i] /= var_counter[i]
fig, ax = pyplot.subplots(figsize=fig_size)
activation_image = array_2_mat(activations,
n_rows=n_rows,
n_cols=n_cols)
cax = ax.matshow(activation_image, cmap=cmap)
fig.colorbar(cax)
if save_path:
fig.savefig(save_path + '.svg')
if pdf:
pp = PdfPages(save_path + '.pdf')
pp.savefig(fig)
pp.close()
pyplot.show()
return activations
def visualize_marginalizations_for_instance(spn,
instance,
all_ones=False,
exp=False,
fig_size=(10, 10),
n_rows=28, n_cols=28,
dtype=float,
cmap=matplotlib.cm.spectral,
save_path=None,
pdf=False):
"""
Given an SPN and an instance, return a same shape instance
containing the activations of all nodes, summed by scopes
"""
seaborn.set_style('white')
seaborn.despine(trim=True)
assert instance.ndim == 1
n_features = len(instance)
marg_data = numpy.zeros(n_features, dtype=instance.dtype)
marginalizations = numpy.zeros(n_features, dtype=dtype)
for i in range(n_features):
marg_data.fill(MARG_IND)
if all_ones:
marg_data[i] = 1
else:
marg_data[i] = instance[i]
#
# evaluate it bottom, up
res, = spn.single_eval(marg_data)
if exp:
res = numpy.exp(res)
marginalizations[i] = res
fig, ax = pyplot.subplots(figsize=fig_size)
activation_image = array_2_mat(marginalizations,
n_rows=n_rows,
n_cols=n_cols)
cax = ax.matshow(activation_image, cmap=cmap)
fig.colorbar(cax)
if save_path:
fig.savefig(save_path + '.svg')
if pdf:
pp = PdfPages(save_path + '.pdf')
pp.savefig(fig)
pp.close()
pyplot.show()
return marginalizations
if __name__ == '__main__':
labels = [i for i in range(-10, 10)]
points = [numpy.exp(i) for i in labels]
visualize_curves([(labels, points)], labels=['a', 'b'])
| 33,780
| 29.765938
| 95
|
py
|
spyn-repr
|
spyn-repr-master/mnist.py
|
from visualize import array_2_mat
from visualize import plot_m_by_n_images
import numpy
import matplotlib
import matplotlib.pyplot as pyplot
import pickle
import os
RANDOM_SEED = 1337
def load_mnist_data_split_from_txt(data_path):
data = numpy.loadtxt(data_path, delimiter=' ')
x, y = data[:, :-1], data[:, -1].astype(numpy.int32)
print('Loaded dataset:\n\tx: {}\ty: {}'.format(x.shape, y.shape))
assert x.shape[0] == y.shape[0]
assert y.ndim == 1
assert x.shape[1] == 784
return x, y
def load_mnist_from_txt(data_dir, split_names=['mnist_train.txt',
'mnist_valid.txt',
'mnist_test.txt']):
split_paths = [os.path.join(data_dir, file_name)
for file_name in split_names]
data_splits = [load_mnist_data_split_from_txt(path) for path in split_paths]
return data_splits
def save_mnist_pickle(data_splits, output_path):
with open(output_path, 'wb') as data_file:
pickle.dump(data_splits, data_file)
def load_mnist_pickle(data_path):
data_splits = None
with open(data_path, 'rb') as data_file:
data_splits = pickle.load(data_file)
return data_splits
def plot_mnist_digits(image_arrays,
m, n,
fig_size=(12, 12),
invert=True,
cmap=matplotlib.cm.binary,
save_path=None,
pdf=False):
image_matrixes = None
if invert:
image_matrixes = [array_2_mat(1 - img, 28, 28) for img in image_arrays]
else:
image_matrixes = [array_2_mat(img, 28, 28) for img in image_arrays]
plot_m_by_n_images(image_matrixes, m, n, fig_size, cmap, save_path, pdf)
def binarize_image(image_array, rand_gen, dtype=numpy.int32):
assert image_array.ndim == 1
bin_image_array = numpy.zeros(image_array.shape, dtype=dtype)
n_features = image_array.shape[0]
for i in range(n_features):
bin_image_array[i] = rand_gen.choice(2, p=[1 - image_array[i],
image_array[i]])
return bin_image_array
def binarize_mnist_data_split(images, rand_gen, dtype=numpy.int32):
n_images = len(images)
bin_images = [binarize_image(images[i], rand_gen, dtype) for i in range(n_images)]
return numpy.array(bin_images)
def binarize_mnist(data_splits, rand_gen=None, dtype=numpy.int32):
if rand_gen is None:
rand_gen = numpy.random.RandomState(RANDOM_SEED)
binarized_splits = [(binarize_mnist_data_split(x, rand_gen, dtype), y) for x, y in data_splits]
return binarized_splits
| 2,696
| 27.691489
| 99
|
py
|
spyn-repr
|
spyn-repr-master/bin/eval_spn.py
|
import argparse
try:
from time import perf_counter
except:
from time import time
perf_counter = time
import dataset
import numpy
import datetime
import os
import logging
from spn.utils import stats_format
from spn.linked.spn import evaluate_on_dataset
from spn.theanok.spn import evaluate_on_dataset_batch
import pickle
PREDS_EXT = 'lls'
TRAIN_PREDS_EXT = 'train.{}'.format(PREDS_EXT)
VALID_PREDS_EXT = 'valid.{}'.format(PREDS_EXT)
TEST_PREDS_EXT = 'test.{}'.format(PREDS_EXT)
# def evaluate_on_dataset(spn, data):
# n_instances = data.shape[0]
# pred_lls = numpy.zeros(n_instances)
# for i, instance in enumerate(data):
# (pred_ll, ) = spn.single_eval(instance)
# pred_lls[i] = pred_ll
# return pred_lls
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("dataset", type=str, nargs=1,
help='Specify a dataset name from data/ (es. nltcs)')
parser.add_argument('--model', type=str,
help='Spn model file path')
parser.add_argument('--seed', type=int, nargs='?',
default=1337,
help='Seed for the random generator')
parser.add_argument('-o', '--output', type=str, nargs='?',
default='./exp/learnspn-b/',
help='Output dir path')
parser.add_argument('--exp-name', type=str, nargs='?',
default=None,
help='Experiment name, if not present a date will be used')
parser.add_argument('-v', '--verbose', type=int, nargs='?',
default=1,
help='Verbosity level')
#
# parsing the args
args = parser.parse_args()
#
# setting verbosity level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbose == 2:
logging.basicConfig(level=logging.DEBUG)
logging.info("Starting with arguments:\n%s", args)
#
# loading dataset splits
logging.info('Loading datasets: %s', args.dataset)
(dataset_name,) = args.dataset
train, valid, test = dataset.load_train_val_test_csvs(dataset_name)
n_instances = train.shape[0]
n_test_instances = test.shape[0]
logging.info('\ttrain: {}\n\tvalid: {}\n\ttest: {}'.format(train.shape,
valid.shape,
test.shape))
if args.exp_name:
out_path = os.path.join(args.output, dataset_name + '_' + args.exp_name)
else:
date_string = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
out_path = os.path.join(args.output, dataset_name + '_' + date_string)
out_log_path = os.path.join(out_path, 'exp.log')
os.makedirs(out_path, exist_ok=True)
logging.info('Opening log file {}...'.format(out_log_path))
preamble = ("""train_ll\tvalid_ll\ttest_ll\n""")
with open(out_log_path, 'w') as out_log:
out_log.write("parameters:\n{0}\n\n".format(args))
out_log.write(preamble)
out_log.flush()
logging.info('\nLoading spn model from: {}'.format(args.model))
spn = None
with open(args.model, 'rb') as model_file:
load_start_t = perf_counter()
spn = pickle.load(model_file)
load_end_t = perf_counter()
logging.info('done in {}'.format(load_end_t - load_start_t))
logging.info('\nEvaluating on training set')
train_preds = evaluate_on_dataset(spn, train)
assert train_preds.shape[0] == train.shape[0]
train_avg_ll = numpy.mean(train_preds)
logging.info('\t{}'.format(train_avg_ll))
logging.info('Evaluating on validation set')
valid_preds = evaluate_on_dataset(spn, valid)
assert valid_preds.shape[0] == valid.shape[0]
valid_avg_ll = numpy.mean(valid_preds)
logging.info('\t{}'.format(valid_avg_ll))
logging.info('Evaluating on test set')
test_preds = evaluate_on_dataset(spn, test)
assert test_preds.shape[0] == test.shape[0]
test_avg_ll = numpy.mean(test_preds)
logging.info('\t{}'.format(test_avg_ll))
#
# writing to file
stats = stats_format([train_avg_ll,
valid_avg_ll,
test_avg_ll],
'\t',
digits=5)
out_log.write(stats + '\n')
out_log.flush()
#
# also serializing the split predictions
train_lls_path = os.path.join(out_path, TRAIN_PREDS_EXT)
numpy.savetxt(train_lls_path, train_preds, delimiter='\n')
valid_lls_path = os.path.join(out_path, VALID_PREDS_EXT)
numpy.savetxt(valid_lls_path, valid_preds, delimiter='\n')
test_lls_path = os.path.join(out_path, TEST_PREDS_EXT)
numpy.savetxt(test_lls_path, test_preds, delimiter='\n')
logging.info('Saved predictions to disk')
| 5,069
| 31.292994
| 83
|
py
|
spyn-repr
|
spyn-repr-master/bin/mtlearn_exp.py
|
import subprocess
import numpy
import os
import argparse
import logging
import datetime
import re
try:
from time import perf_counter
except:
from time import time as perf_counter
MTLEARN_EXEC = './mtlearn'
MSCORE_EXEC = './mscore'
SPN2AC_EXEC = './spn2ac'
SPN_EXT = '.spn'
AC_EXT = '.ac'
DATA_DIR = 'data/'
TRAIN_EXT = '.ts.data'
VALID_EXT = '.valid.data'
TEST_EXT = '.test.data'
def ll_array_from_model_score(score_output):
"""
Quick and dirty parsing
"""
#
# split strings by newlines
lines = score_output.split('\n')
#
# remove all the lines that are not numbers
lls = []
for ll in lines:
try:
lls.append(float(ll))
except ValueError:
pass
#
# convert to numpy array
return numpy.array(lls)
def model_score(model, dataset, exec_path=MSCORE_EXEC, single_instances=False):
"""
Computing the LL of the model on a dataset using mscore
"""
process = None
if single_instances:
process = subprocess.Popen([exec_path,
'-m', model,
'-i', dataset,
'-v'],
stdout=subprocess.PIPE)
else:
process = subprocess.Popen([exec_path,
'-m', model,
'-i', dataset],
stdout=subprocess.PIPE)
proc_out, proc_err = process.communicate()
#
# TODO manage errors
# print(proc_out)
if proc_err is not None:
print('Mscore Errors:')
print(proc_err)
# else:
# print(proc_out)
#
# parsing the output
if single_instances:
return ll_array_from_model_score(proc_out.decode("utf-8")) # this shall be completed
else:
avg_ll, std_ll = re.findall(b"[-+]?\d*\.\d+|\d+", proc_out)
return avg_ll, std_ll
def adding_bins_to_path(bin_dir):
"""
"""
path = os.getenv('PATH')
print(path)
os.environ["PATH"] = path + ':' + bin_dir
print(os.getenv('PATH'))
def convert_spn_to_ac(spn_model_path, spn_2_ac_exec_path=SPN2AC_EXEC):
"""
Converting an spn model (.spn folder) into ACs with spn2ac
"""
#
# computing the output file path
ac_model_path = spn_model_path.replace(SPN_EXT, AC_EXT)
process = subprocess.Popen([spn_2_ac_exec_path,
'-m', spn_model_path,
'-o', ac_model_path],
stdout=subprocess.PIPE)
proc_out, proc_err = process.communicate()
print(proc_out)
print(proc_err)
#
# TODO: manage errors
return ac_model_path, (proc_out, proc_err)
def stats_format(stats_list, separator, digits=5):
formatted = []
float_format = '{0:.' + str(digits) + 'f}'
for stat in stats_list:
# if isinstance(stat, int):
# formatted.append(str(stat))
# el
if isinstance(stat, float):
formatted.append(float_format.format(stat))
else:
formatted.append(str(stat))
# concatenation
return separator.join(formatted)
def mtlearn_wrapper(model_path,
dataset_path,
n_components,
seed,
exec_path=MTLEARN_EXEC,
spn_2_ac_exec_path=SPN2AC_EXEC):
"""
Wrapping mtlearn executable in python
"""
mtlearn_start_t = perf_counter()
#
# opening a pipe to execute mtlearn
process = subprocess.Popen([exec_path,
'-i', dataset_path,
'-o', model_path,
'-k', str(n_components),
'-seed', str(seed)],
stdout=subprocess.PIPE)
proc_out, proc_err = process.communicate()
mtlearn_end_t = perf_counter()
logging.info('Model learned in %f secs', (mtlearn_end_t - mtlearn_start_t))
#
# joping not for errors
if proc_err is not None:
logging.info('ERRORS: %s', proc_err)
#
# mtlearns outputs .spn files, so we need to convert them
ac_path, conv_out = convert_spn_to_ac(model_path, spn_2_ac_exec_path)
logging.info('Converted model to AC!\n%s', conv_out[0])
return ac_path
#
# the main script with argparse
if __name__ == '__main__':
# bin_dir = '/home/valerio/Petto Redigi/libra-tk-1.0.1/bin'
# bin_dir = '/root/Desktop/libra_exp/bin/'
#########################################
# creating the opt parser
parser = argparse.ArgumentParser()
parser.add_argument("dataset", type=str, nargs=1,
help='Specify a dataset name from data/ (es. nltcs)')
parser.add_argument('-n', '--n-components', type=int, nargs='+',
default=[2, 50, 2],
help='min max inc')
parser.add_argument('--seed', type=int, nargs='?',
default=1337,
help='Seed for the random generator')
parser.add_argument('-e', '--exec-path', type=str, nargs='?',
default='/home/valerio/Petto Redigi/libra-tk-1.0.1/bin',
help='Output dir path')
parser.add_argument('-o', '--output', type=str, nargs='?',
default='exp/mtlearn/',
help='Output dir path')
parser.add_argument('-i', '--n-iters', type=int, nargs='?',
default=5,
help='Number of trials')
parser.add_argument('-v', '--verbose', type=int, nargs='?',
default=1,
help='Verbosity level')
#
# parsing the args
args = parser.parse_args()
#
# adding to the path
adding_bins_to_path(args.exec_path)
#
# setting verbosity level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbose == 2:
logging.basicConfig(level=logging.DEBUG)
logging.info("Starting with arguments:\n%s", args)
seed = args.seed
MAX_RAND_SEED = 99999999 # sys.maxsize
numpy_rand_gen = numpy.random.RandomState(seed)
logging.info('Opening log file...')
(dataset_name,) = args.dataset
date_string = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
out_path = args.output + dataset_name + '_' + date_string
print('OUTPATH', out_path)
out_log_path = out_path + '/exp.log'
test_lls_log_path = out_path + '/test.lls'
model_subdir_path = out_path + '/models/'
mean_lls_path = out_path + '/mean.lls'
best_lls_path = out_path + '/best.lls'
#
# getting the paths
train_path = DATA_DIR + dataset_name + TRAIN_EXT
valid_path = DATA_DIR + dataset_name + VALID_EXT
test_path = DATA_DIR + dataset_name + TEST_EXT
#
# parsing the components
min_components = 1
max_components = None
increment = 1
if len(args.n_components) > 3 or len(args.n_components) < 1:
raise ValueError('More than three values for components')
elif len(args.n_components) == 3:
min_components = args.n_components[0]
max_components = args.n_components[1]
increment = args.n_components[2]
elif len(args.n_components) == 2:
min_components = args.n_components[0]
max_components = args.n_components[1]
elif len(args.n_components) == 1:
max_components = args.n_components[0]
n_components = (max_components - min_components) // increment + 1
logging.info('N Components: %d', n_components)
#
# creating dir if non-existant
if not os.path.exists(os.path.dirname(out_log_path)):
os.makedirs(os.path.dirname(out_log_path))
if not os.path.exists(os.path.dirname(model_subdir_path)):
os.makedirs(os.path.dirname(model_subdir_path))
#
# keeping track of results
best_state = {}
best_state['valid_ll'] = -numpy.Inf
mean_state = {}
best_train_lls = numpy.zeros(n_components)
best_valid_lls = numpy.zeros(n_components)
best_test_lls = numpy.zeros(n_components)
best_train_lls.fill(-numpy.Inf)
best_valid_lls.fill(-numpy.Inf)
best_test_lls.fill(-numpy.Inf)
mean_train_lls = numpy.zeros(n_components)
mean_valid_lls = numpy.zeros(n_components)
mean_test_lls = numpy.zeros(n_components)
preamble = ("""n-compo:\t#trial:\tseed:""" +
"""\ttrain_ll\tvalid_ll:\ttest_ll\n""")
mtlearn_exec_path = os.path.join(args.exec_path, 'mtlearn')
mscore_exec_path = os.path.join(args.exec_path, 'mscore')
spn2ac_exec_path = os.path.join(args.exec_path, 'spn2ac')
with open(out_log_path, 'w') as out_log:
out_log.write("parameters:\n{0}\n\n".format(args))
out_log.write(preamble)
out_log.flush()
#
# the main cycle here is on the component
for j, m in enumerate(range(min_components - 1,
max_components,
increment)):
seeds = numpy_rand_gen.randint(MAX_RAND_SEED, size=args.n_iters)
mean_state[m] = {}
#
# then we repeat it for a number of trials
for i in range(args.n_iters):
logging.info('\n## Repeating trial %d/%d##', i + 1, args.n_iters)
#
# compositing the paths
model_path = model_subdir_path + dataset_name + '_' + \
str(m) + '_' + str(i) + SPN_EXT
print(train_path)
print(model_path)
#
# learning the component
mt = mtlearn_wrapper(model_path,
train_path,
n_components=m + 1,
exec_path=mtlearn_exec_path,
spn_2_ac_exec_path=spn2ac_exec_path,
seed=seeds[i])
#
# evaluating it
train_avg_ll, _train_std_ll = model_score(mt, train_path,
exec_path=mscore_exec_path)
logging.info('TRAIN SET: %s', train_avg_ll)
valid_avg_ll, _valid_std_ll = model_score(mt, valid_path,
exec_path=mscore_exec_path)
logging.info('VALID SET: %s', valid_avg_ll)
test_lls = model_score(mt, test_path, single_instances=True,
exec_path=mscore_exec_path)
test_avg_ll = test_lls.mean()
logging.info('TEST SET: %f', test_avg_ll)
train_avg_ll = float(train_avg_ll)
valid_avg_ll = float(valid_avg_ll)
if valid_avg_ll > best_state['valid_ll']:
best_state['valid_ll'] = valid_avg_ll
best_state['train_ll'] = train_avg_ll
best_state['test_ll'] = test_avg_ll
best_state['n_mix'] = m + 1
#
# saving to file
numpy.savetxt(test_lls_log_path, test_lls, delimiter='\n')
#
# updating the best stats
if train_avg_ll > best_train_lls[j]:
best_train_lls[j] = float(train_avg_ll)
if valid_avg_ll > best_valid_lls[j]:
best_valid_lls[j] = valid_avg_ll
if test_avg_ll > best_test_lls[j]:
best_test_lls[j] = test_avg_ll
#
# and the mean ones
mean_train_lls[j] += train_avg_ll
mean_valid_lls[j] += valid_avg_ll
mean_test_lls[j] += test_avg_ll
#
# saving to general log file
stats = stats_format([m,
i,
seeds[i],
train_avg_ll, valid_avg_ll, test_avg_ll],
'\t', digits=5)
out_log.write(stats + '\n')
out_log.flush()
#
# writing as last line the best params
out_log.write("{0}".format(best_state))
out_log.flush()
#
# saving aggregatet stats
numpy.savetxt(mean_lls_path,
numpy.vstack((mean_train_lls / args.n_iters,
mean_valid_lls / args.n_iters,
mean_test_lls / args.n_iters)),
delimiter=',',
fmt='%.8e')
numpy.savetxt(best_lls_path,
numpy.vstack((best_train_lls,
best_valid_lls,
best_test_lls)),
delimiter=',',
fmt='%.8e')
logging.info('Exp search ended.')
logging.info('Best params:\n\t%s', best_state)
| 13,099
| 31.26601
| 93
|
py
|
spyn-repr
|
spyn-repr-master/bin/classify_repr_exp.py
|
import argparse
try:
from time import perf_counter
except:
from time import time
perf_counter = time
import dataset
import numpy
import datetime
import os
import logging
from spn.utils import stats_format
from sklearn.preprocessing import StandardScaler
from sklearn import linear_model
from sklearn import neighbors
from sklearn import decomposition
from sklearn import manifold
from mnist import load_mnist_pickle
from caltech101 import load_caltech101_pickle
from newsgroups import load_20newsgroups_pickle
import pickle
import matplotlib
import matplotlib.pyplot as pyplot
MAX_N_INSTANCES = 10000
PICKLE_SPLIT_EXT = 'pickle'
BMNIST_PATH = 'data/mnist/binary_mnist_splits.pickle'
CALTECH101_PATH = 'data/caltech101/caltech101_silhouettes.pickle'
NEWSGROUPS_PATH = 'data/20newsgroups/20newsgroups_5000.pickle'
OCR_LETTERS_PATH = 'data/ocr_letters/ocr_letters.pickle'
PREPROCESS_DICT = {
'std-scl': StandardScaler
}
LOGISTIC_MOD_DICT_PARAMS = {
'l2-ovr-bal': {
'penalty': 'l2',
'tol': 0.0001,
'fit_intercept': True,
'class_weight': 'balanced',
'solver': 'liblinear',
'multi_class': 'ovr',
},
'l2-ovr-bal-lbfgs': {
'penalty': 'l2',
'tol': 0.0001,
'fit_intercept': True,
'class_weight': 'balanced',
'solver': 'lbfgs',
'multi_class': 'ovr',
},
'l2-ovr-bal-sag': {
'penalty': 'l2',
'tol': 0.0001,
'fit_intercept': True,
'class_weight': 'balanced',
'solver': 'sag',
'multi_class': 'ovr',
},
'l2-mul-bal': {
'penalty': 'l2',
'tol': 0.0001,
'fit_intercept': True,
'class_weight': 'balanced',
'solver': 'liblinear',
'multi_class': 'multinomial',
}}
KNN_DICT_PARAMS = {
'uni': {'weights': 'uniform',
# 'algorithm' : 'auto',
'leaf_size': 30,
'p': 2,
'metric': 'minkowski',
'metric_params': None,
}, }
VIS_METHODS_DICT = {
'tsne': manifold.TSNE,
'pca': decomposition.PCA}
# VIS_METHODS_PARAMS_DICT = {
# 'tsne': {},
# 'pca':}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("dataset", type=str,
help='Specify a dataset name (es. nltcs)')
parser.add_argument('-r', '--repr-data', type=str,
default=None,
help='Learned feature dataset name')
parser.add_argument('--repr-dir', type=str, nargs='?',
default='data/repr/bmnist/',
help='Learned feature dir')
parser.add_argument('--dtype', type=str, nargs='?',
default='int32',
help='Loaded dataset type')
parser.add_argument('-s', '--splits', type=str, nargs='+',
default=None,
help='Splits names')
parser.add_argument('--seed', type=int, nargs='?',
default=1337,
help='Seed for the random generator')
parser.add_argument('-o', '--output', type=str, nargs='?',
default='./exp/learnspn-b/',
help='Output dir path')
# parser.add_argument('-a', '--alpha', type=float, nargs='+',
# default=[0.1],
# help='Smoothing factor for leaf probability estimation')
parser.add_argument('--visualize', type=str, nargs='+',
default=[],
help='Algorithms for visualizing the features (es tsne|pca)')
parser.add_argument('--preprocess', type=str, nargs='+',
default=[],
help='Algorithms to preprocess data')
parser.add_argument('--logistic', type=str, nargs='?',
default=None,
help='parametrized version of the logistic regression')
parser.add_argument('--log-c', type=float, nargs='+',
default=[0.1, 0.2, 0.5, 1.0, 1.5, 2.0, 5.0, 10.0],
help='logistic ')
parser.add_argument('--knn', type=str, nargs='?',
default=None,
help='Algorithms to learn')
parser.add_argument('--knn-k', type=int, nargs='+',
default=[1, 3, 5, 11, 25, 51],
help='k-NN k values')
# parser.add_argument('--model', type=str,
# help='Spn model file path')
parser.add_argument('--feature-inc', type=int, nargs='+',
default=None,
help='Considering features in batches')
parser.add_argument('--exp-name', type=str, nargs='?',
default=None,
help='Experiment name, if not present a date will be used')
parser.add_argument('--concat', action='store_true',
help='Whether to concatenate the new representation to the old dataset')
parser.add_argument('--save-model', action='store_true',
help='Whether to store the model file as a pickle file')
parser.add_argument('--eval-only-orig', action='store_true',
help='Whether to evaluate only the original data')
parser.add_argument('-v', '--verbose', type=int, nargs='?',
default=1,
help='Verbosity level')
#
# parsing the args
args = parser.parse_args()
#
# setting verbosity level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbose == 2:
logging.basicConfig(level=logging.DEBUG)
logging.info("Starting with arguments:\n%s", args)
#
# loading the dataset splits
#
logging.info('Loading datasets: %s', args.dataset)
dataset_name = args.dataset
dataset_splits = None
if dataset_name == 'bmnist':
logging.info('Loading bmnist from pickle')
dataset_splits = load_mnist_pickle(BMNIST_PATH)
elif dataset_name == 'caltech101':
logging.info('Loading caltech101-silhouettes from pickle')
dataset_splits = load_mnist_pickle(CALTECH101_PATH)
elif dataset_name == '20newsgroups':
logging.info('Loading 20newsgroups from pickle')
dataset_splits = load_20newsgroups_pickle(NEWSGROUPS_PATH)
elif dataset_name == 'ocr_letters':
logging.info('Loading ocr letters from pickle')
dataset_splits = load_20newsgroups_pickle(OCR_LETTERS_PATH)
else:
dataset_splits = dataset.load_train_val_test_csvs(dataset_name,
type=args.dtype,
suffixes=args.splits)
for i, split in enumerate(dataset_splits):
logging.info('\tsplit {}, shape {}, labels {}'.format(i,
split[0].shape,
split[1].shape))
#
# loading the learned representations
#
logging.info('Loading repr splits from {}'.format(args.repr_data))
repr_splits = None
pickle_split_path = os.path.join(args.repr_dir, '{}.{}'.format(args.repr_data,
PICKLE_SPLIT_EXT))
#
# Opening the file for test prediction
#
if args.exp_name:
out_path = os.path.join(args.output, dataset_name + '_' + args.exp_name)
else:
date_string = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
out_path = os.path.join(args.output, dataset_name + '_' + date_string)
out_log_path = os.path.join(out_path, 'exp.log')
os.makedirs(out_path, exist_ok=True)
logging.info('Opening log file {}...'.format(out_log_path))
#
# shall we concatenate them? or just adding the labels?
#
labelled_splits = None
if args.eval_only_orig:
logging.info('Classification only on original data')
labelled_splits = dataset_splits
else:
print('Looking for {}'.format(pickle_split_path))
if os.path.exists(pickle_split_path):
logging.info('Loading from pickle {}'.format(pickle_split_path))
with open(pickle_split_path, 'rb') as split_file:
repr_splits = pickle.load(split_file)
else:
repr_splits = []
for s in args.splits:
split_path = os.path.join(args.repr_dir, args.repr_data + s)
repr_splits.append(numpy.loadtxt(split_path, dtype=args.dtype, delimiter=','))
# repr_splits = dataset.load_train_val_test_csvs(args.repr_data,
# path=args.repr_dir,
# type=args.dtype,
# suffixes=args.splits)
for i, split in enumerate(repr_splits):
logging.info('\tsplit {}, shape {}'.format(i, split.shape))
labelled_splits = []
for repr_x, (split_x, split_y) in zip(repr_splits, dataset_splits):
if args.concat:
new_repr_x = numpy.concatenate((split_x, repr_x), axis=1)
assert new_repr_x.shape[0] == split_x.shape[0]
assert new_repr_x.shape[1] == split_x.shape[1] + repr_x.shape[1]
logging.info('Concatenated representations: {} -> {}'.format(repr_x.shape,
new_repr_x.shape))
labelled_splits.append([new_repr_x, split_y])
else:
labelled_splits.append([repr_x, split_y])
#
# preprocessing
if args.preprocess:
for prep in args.preprocess:
preprocessor = PREPROCESS_DICT[prep]()
logging.info('Preprocessing with {}:'.format(preprocessor))
#
# assuming the first split is the training set
preprocessor.fit(labelled_splits[0][0])
for i in range(len(labelled_splits)):
labelled_splits[i][0] = preprocessor.transform(labelled_splits[i][0])
with open(out_log_path, 'w') as out_log:
out_log.write("parameters:\n{0}\n\n".format(args))
out_log.flush()
train_x, train_y = labelled_splits[0]
#
# classification task: logistic
if args.logistic:
logging.info('Logistic regression')
if args.feature_inc:
min_feature = 0
max_feature = train_x.shape[1]
increment = None
if len(args.feature_inc) == 1:
increment = args.feature_inc[0]
elif len(args.feature_inc) == 2:
min_feature = args.feature_inc[0]
increment = args.feature_inc[1]
elif len(args.feature_inc) == 3:
min_feature = args.feature_inc[0]
max_feature = args.feature_inc[1]
increment = args.feature_inc[2]
else:
raise ValueError('More than three values specified for --feature-inc')
for m in range(min_feature + increment, max_feature + 1, increment):
#
# selecting subset features
logging.info('Considering features {}:{}'.format(min_feature, m))
sel_labelled_splits = []
for i in range(len(labelled_splits)):
sel_labelled_splits.append((labelled_splits[i][0][:, min_feature:m],
labelled_splits[i][1]))
for i in range(len(labelled_splits)):
logging.info('shapes {} {}'.format(sel_labelled_splits[i][0].shape,
sel_labelled_splits[i][1].shape))
#
# reselecting train
train_x, train_y = sel_labelled_splits[0]
for c in args.log_c:
log_res = linear_model.LogisticRegression(C=c,
**LOGISTIC_MOD_DICT_PARAMS[args.logistic])
#
# fitting
fit_s_t = perf_counter()
log_res.fit(train_x, train_y)
fit_e_t = perf_counter()
logging.info('\tC: {} ({})'.format(c, fit_e_t - fit_s_t))
#
# scoring
accs = []
for split_x, split_y in sel_labelled_splits:
split_s_t = perf_counter()
split_acc = log_res.score(split_x, split_y)
split_e_t = perf_counter()
accs.append(split_acc)
logging.info('\t\tacc: {} ({})'.format(split_acc,
split_e_t - split_s_t))
#
# saving to file
out_log.write('{0}\t{1}\t{2}\n'.format(m,
c,
'\t'.join(str(a) for a in accs)))
out_log.flush()
else:
for c in args.log_c:
log_res = linear_model.LogisticRegression(C=c,
**LOGISTIC_MOD_DICT_PARAMS[args.logistic])
#
# fitting
fit_s_t = perf_counter()
log_res.fit(train_x, train_y)
fit_e_t = perf_counter()
logging.info('C: {} ({})'.format(c, fit_e_t - fit_s_t))
#
# scoring
accs = []
for split_x, split_y in labelled_splits:
split_s_t = perf_counter()
split_acc = log_res.score(split_x, split_y)
split_e_t = perf_counter()
accs.append(split_acc)
logging.info('\tacc: {} ({})'.format(split_acc, split_e_t - split_s_t))
#
# saving to file
out_log.write('{0}\t{1}\n'.format(c, '\t'.join(str(a) for a in accs)))
out_log.flush()
#
# classification task: k-nn
if args.knn:
logging.info('k-Nearest Neighbors')
for k in args.knn_k:
knn = neighbors.KNeighborsClassifier(n_neighbors=k,
**KNN_DICT_PARAMS[args.knn])
#
# fitting
fit_s_t = perf_counter()
knn.fit(train_x, train_y)
fit_e_t = perf_counter()
logging.info('k: {} ({})'.format(k, fit_e_t - fit_s_t))
#
# scoring
accs = []
for split_x, split_y in labelled_splits:
split_s_t = perf_counter()
split_acc = knn.score(split_x, split_y)
split_e_t = perf_counter()
accs.append(split_acc)
logging.info('\tacc: {} ({})'.format(split_acc, split_e_t - split_s_t))
#
# saving to file
out_log.write('{0}\t{1}\n'.format(k, '\t'.join(str(a) for a in accs)))
out_log.flush()
#
# visualizing?
if args.visualize:
n_components = 2
vis_x = None
vis_y = None
#
# if train is too large, visualizing the test split portion
if train_x.shape[0] > MAX_N_INSTANCES:
logging.info('Visualizing test')
vis_x, vis_y = labelled_splits[-1]
else:
logging.info('Visualizing training')
vis_x = train_x
vis_y = train_y
for decomp in args.visualize:
#
# decomposing in 2D
t0 = perf_counter()
tsne = VIS_METHODS_DICT[decomp](n_components=n_components)
dec_vis_x = tsne.fit_transform(vis_x)
t1 = perf_counter()
logging.info("Applied decomposition {} in {} sec".format(decomp,
t1 - t0))
#
# visualize the new components
fig = pyplot.figure(figsize=(16, 14))
pyplot.scatter(
dec_vis_x[:, 0], dec_vis_x[:, 1], c=vis_y, cmap=matplotlib.cm.Spectral)
pyplot.show()
| 17,148
| 35.721627
| 108
|
py
|
spyn-repr
|
spyn-repr-master/bin/filter_feature_repr.py
|
import argparse
try:
from time import perf_counter
except:
from time import time
perf_counter = time
import dataset
import numpy
import datetime
import os
import logging
from spn.utils import stats_format
from spn import MARG_IND
from spn.linked.nodes import SumNode
from spn.linked.nodes import ProductNode
from spn.linked.representation import load_feature_info
from spn.linked.representation import store_feature_info
from spn.linked.representation import filter_features_by_layer
from spn.linked.representation import filter_features_by_scope_length
from spn.linked.representation import feature_mask_from_info
from spn.linked.representation import filter_features_by_node_type
from spn.linked.representation import load_features_from_file
from spn.linked.representation import FeatureInfo
from spn.factory import build_theanok_spn_from_block_linked
import pickle
PREDS_EXT = 'lls'
TRAIN_PREDS_EXT = 'train.{}'.format(PREDS_EXT)
VALID_PREDS_EXT = 'valid.{}'.format(PREDS_EXT)
TEST_PREDS_EXT = 'test.{}'.format(PREDS_EXT)
DATA_EXT = 'data'
TRAIN_DATA_EXT = 'train.{}'.format(DATA_EXT)
VALID_DATA_EXT = 'valid.{}'.format(DATA_EXT)
TEST_DATA_EXT = 'test.{}'.format(DATA_EXT)
PICKLE_SPLIT_EXT = 'pickle'
FEATURE_FILE_EXT = 'features'
INFO_FILE_EXT = 'features.info'
SCOPE_FILE_EXT = 'scopes'
FMT_DICT = {
'int': '%d',
'float': '%.18e',
'float.8': '%.8e',
}
NODE_TYPE_DICT = {
'sum': SumNode.__name__,
'prod': ProductNode.__name__,
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("dataset", type=str,
help='Dataset dir')
parser.add_argument('-r', '--repr-data', type=str,
default=None,
help='Learned feature dataset name')
parser.add_argument('--train-ext', type=str,
help='Training set name regex')
parser.add_argument('--valid-ext', type=str,
help='Validation set name regex')
parser.add_argument('--test-ext', type=str,
help='Test set name regex')
parser.add_argument('--info', type=str,
help='Path to feature info file')
parser.add_argument('--seed', type=int, nargs='?',
default=1337,
help='Seed for the random generator')
parser.add_argument('-o', '--output', type=str, nargs='?',
default='./data/repr/',
help='Output dir path')
parser.add_argument('--suffix', type=str,
help='Dataset output suffix')
parser.add_argument('--fmt', type=str, nargs='?',
default='float',
help='Dataset output number formatter')
parser.add_argument('--sep', type=str, nargs='?',
default=',',
help='Dataset output separator')
parser.add_argument('--dtype', type=str, nargs='?',
default='float',
help='Loaded dataset type')
parser.add_argument('--layers', type=int, nargs='+',
default=None,
help='Layer ids to extract')
parser.add_argument('--scopes', type=int, nargs='+',
default=None,
help='Scope lengths to extract')
parser.add_argument('--nodes', type=str, nargs='+',
default=None,
help='Node types to extract (sum|prod)')
parser.add_argument('--no-ext', action='store_true',
help='Whether to concatenate the new representation to the old dataset')
parser.add_argument('--save-text', action='store_true',
help='Saving the filtered text to text as well')
parser.add_argument('-v', '--verbose', type=int, nargs='?',
default=1,
help='Verbosity level')
#
# parsing the args
args = parser.parse_args()
#
# fixing a seed
rand_gen = numpy.random.RandomState(args.seed)
os.makedirs(args.output, exist_ok=True)
#
# setting verbosity level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbose == 2:
logging.basicConfig(level=logging.DEBUG)
logging.info("Starting with arguments:\n%s", args)
#
# loading dataset splits
logging.info('Loading datasets: %s', args.dataset)
dataset_path = args.dataset
train = None
valid = None
test = None
pickle_split_path = os.path.join(dataset_path, '{}.{}'.format(args.repr_data,
PICKLE_SPLIT_EXT))
print('Looking for {}'.format(pickle_split_path))
if os.path.exists(pickle_split_path):
logging.info('Loading from pickle {}'.format(pickle_split_path))
with open(pickle_split_path, 'rb') as split_file:
train, valid, test = pickle.load(split_file)
else:
splits = []
for s in [args.train_ext,
args.valid_ext,
args.test_ext]:
split_path = os.path.join(dataset_path, '{}.{}'.format(args.repr_data,
s))
splits.append(numpy.loadtxt(split_path, dtype=args.dtype, delimiter=','))
train, valid, test = splits
# train, valid, test = dataset.load_dataset_splits(dataset_path,
# filter_regex=[args.train_ext,
# args.valid_ext,
# args.test_ext])
# dataset_name = args.train_ext.split('.')[0]
dataset_name = args.repr_data
n_instances = train.shape[0]
n_features = train.shape[1]
assert train.shape[1] == valid.shape[1]
assert valid.shape[1] == test.shape[1]
logging.info('\ttrain: {}\n\tvalid: {}\n\ttest: {}'.format(train.shape,
valid.shape,
test.shape))
logging.info('Loading feature info from {}'.format(args.info))
feat_s_t = perf_counter()
feature_info = load_feature_info(args.info)
feat_e_t = perf_counter()
logging.info('\tdone in {} secs'.format(feat_e_t - feat_s_t))
assert len(feature_info) == n_features
filter_prefix = "filtered"
#
# filtering by layer?
if args.layers:
layer_range = None
if len(args.layers) == 1:
layer_range = range(0, args.layers)
elif len(args.layers) == 2:
layer_range = range(args.layers[0], args.layers[1])
filter_prefix += ".l_{}.".format(args.layers)
logging.info('Filtering by layer (range {})'.format(layer_range))
filtered_info = []
for layer in layer_range:
logging.info('\tgetting layer {}'.format(layer))
filtered_info.extend(filter_features_by_layer(feature_info, layer))
feature_info = filtered_info
#
# filtering by scope length?
if args.scopes:
scope_range = None
if len(args.scopes) == 1:
scope_range = range(1, args.scopes)
elif len(args.scopes) == 2:
scope_range = range(args.scopes[0], args.scopes[1])
filter_prefix += ".s_{}.".format(args.scopes)
logging.info('Filtering by scope length {} (range {})'.format(args.scopes, scope_range))
filtered_info = []
for scope_length in scope_range:
logging.info('\tgetting scope of length {}'.format(scope_length))
filtered_info.extend(filter_features_by_scope_length(feature_info, scope_length))
feature_info = filtered_info
#
# filtering by node type
if args.nodes:
filter_prefix += ".n_{}.".format(args.scopes)
logging.info('Filtering by node types ({})'.format(args.nodes))
filtered_info = []
for node_type in args.nodes:
logging.info('\tgetting nodes of type {} ({})'.format(node_type,
NODE_TYPE_DICT[node_type]))
filtered_info.extend(filter_features_by_node_type(feature_info,
NODE_TYPE_DICT[node_type]))
feature_info = filtered_info
logging.info('\n')
logging.info('Remaining features {} -> {}\n'.format(n_features, len(feature_info)))
#
# saving to file filtered info
feature_info_output_path = os.path.join(args.output, '{}.filtered.{}'.format(args.suffix,
INFO_FILE_EXT))
logging.info('Saving filtered feature info to {}'.format(feature_info_output_path))
store_feature_info(feature_info, feature_info_output_path)
#
# generating the mask
feature_mask = feature_mask_from_info(feature_info, n_features)
#
# applying the mask to the data
filt_train = train[:, feature_mask]
filt_valid = valid[:, feature_mask]
filt_test = test[:, feature_mask]
assert filt_train.shape[0] == train.shape[0]
assert filt_valid.shape[0] == valid.shape[0]
assert filt_test.shape[0] == test.shape[0]
assert filt_train.shape[1] == filt_valid.shape[1]
assert filt_valid.shape[1] == filt_test.shape[1]
logging.info('New shapes:\n\ttrain: {}\n\tvalid: {}\n\ttest: {}'.format(filt_train.shape,
filt_valid.shape,
filt_test.shape))
#
# remapping to new feature order ids, starting from 0
ordered_feature_info = [FeatureInfo(i,
info.node_id,
info.layer_id,
info.node_type,
info.node_scope)
for i, info in enumerate(sorted(feature_info,
key=lambda x: x.feature_id))]
feature_info_output_path = os.path.join(args.output, '{}.{}'.format(args.suffix,
INFO_FILE_EXT))
logging.info('Saving remapped feature info to {}'.format(feature_info_output_path))
store_feature_info(ordered_feature_info, feature_info_output_path)
#
# storing them
if args.save_text:
train_out_path = os.path.join(args.output, '{}.{}'.format(args.suffix,
args.train_ext))
valid_out_path = os.path.join(args.output, '{}.{}'.format(args.suffix,
args.valid_ext))
test_out_path = os.path.join(args.output, '{}.{}'.format(args.suffix,
args.test_ext))
logging.info('\nSaving training set to: {}'.format(train_out_path))
numpy.savetxt(train_out_path, filt_train, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
logging.info('Saving validation set to: {}'.format(valid_out_path))
numpy.savetxt(valid_out_path, filt_valid, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
logging.info('Saving test set to: {}'.format(test_out_path))
numpy.savetxt(test_out_path, filt_test, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
#
# saving to pickle
split_file_path = os.path.join(args.output, '{}.{}'.format(args.suffix,
PICKLE_SPLIT_EXT))
logging.info('Saving pickle data splits to: {}'.format(split_file_path))
with open(split_file_path, 'wb') as split_file:
pickle.dump((filt_train, filt_valid, filt_test), split_file, protocol=4)
| 12,095
| 35
| 96
|
py
|
spyn-repr
|
spyn-repr-master/bin/rbm_repr_data.py
|
import argparse
try:
from time import perf_counter
except:
from time import time
perf_counter = time
import dataset
import numpy
import datetime
import os
import logging
from sklearn import neural_network
from spn.utils import stats_format
from spn import MARG_IND
import pickle
MODEL_EXT = 'model'
PREDS_EXT = 'lls'
TRAIN_PREDS_EXT = 'train.{}'.format(PREDS_EXT)
VALID_PREDS_EXT = 'valid.{}'.format(PREDS_EXT)
TEST_PREDS_EXT = 'test.{}'.format(PREDS_EXT)
DATA_EXT = 'data'
TRAIN_DATA_EXT = 'train.{}'.format(DATA_EXT)
VALID_DATA_EXT = 'valid.{}'.format(DATA_EXT)
TEST_DATA_EXT = 'test.{}'.format(DATA_EXT)
PICKLE_SPLIT_EXT = 'pickle'
FEATURE_FILE_EXT = 'features'
SCOPE_FILE_EXT = 'scopes'
FMT_DICT = {'int': '%d',
'float': '%.18e',
'float.8': '%.8e',
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("dataset", type=str,
help='Dataset dir')
parser.add_argument('--train-ext', type=str,
help='Training set name regex')
parser.add_argument('--valid-ext', type=str,
help='Validation set name regex')
parser.add_argument('--test-ext', type=str,
help='Test set name regex')
parser.add_argument('--seed', type=int, nargs='?',
default=1337,
help='Seed for the random generator')
parser.add_argument('-o', '--output', type=str, nargs='?',
default='./exp/rbm/',
help='Output dir path')
parser.add_argument('--suffix', type=str,
help='Dataset output suffix')
parser.add_argument('--sep', type=str, nargs='?',
default=',',
help='Dataset output separator')
parser.add_argument('--fmt', type=str, nargs='?',
default='int',
help='Dataset output number formatter')
parser.add_argument('--n-hidden', type=int, nargs='+',
default=[500],
help='Number of hidden units')
parser.add_argument('--l-rate', type=float, nargs='+',
default=[0.1],
help='Learning rate for training')
parser.add_argument('--batch-size', type=int, nargs='+',
default=[10],
help='Batch size during learning')
parser.add_argument('--n-iters', type=int, nargs='+',
default=[10],
help='Number of epochs')
parser.add_argument('--no-ext', action='store_true',
help='Whether to concatenate the new representation to the old dataset')
parser.add_argument('--log', action='store_true',
help='Transforming the repr data with log')
parser.add_argument('--save-model', action='store_true',
help='Whether to store the model file as a pickle file')
parser.add_argument('-v', '--verbose', type=int, nargs='?',
default=1,
help='Verbosity level')
#
# parsing the args
args = parser.parse_args()
#
# fixing a seed
rand_gen = numpy.random.RandomState(args.seed)
os.makedirs(args.output, exist_ok=True)
#
# setting verbosity level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbose == 2:
logging.basicConfig(level=logging.DEBUG)
logging.info("Starting with arguments:\n%s", args)
#
# loading dataset splits
logging.info('Loading datasets: %s', args.dataset)
dataset_path = args.dataset
train, valid, test = dataset.load_dataset_splits(dataset_path,
filter_regex=[args.train_ext,
args.valid_ext,
args.test_ext])
dataset_name = args.train_ext.split('.')[0]
n_instances = train.shape[0]
n_test_instances = test.shape[0]
logging.info('\ttrain: {}\n\tvalid: {}\n\ttest: {}'.format(train.shape,
valid.shape,
test.shape))
freqs, feature_vals = dataset.data_2_freqs(train)
logging.info('Opening log file...')
date_string = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
out_path = args.output + dataset_name + '_' + date_string
out_log_path = out_path + '/exp.log'
test_lls_path = out_path + '/test.lls'
os.makedirs(out_path, exist_ok=True)
repr_train = None
repr_valid = None
repr_test = None
#
#
# performing a grid search along the hyperparameter space
best_valid_avg_pll = -numpy.inf
best_params = {}
best_model = None
n_hidden_values = args.n_hidden
learning_rate_values = args.l_rate
batch_size_values = args.batch_size
n_iter_values = args.n_iters
preamble = ("""n-hidden:\tlearning-rate:\tbatch-size:\tn-iters:""" +
"""\ttrain_pll\tvalid_pll:\ttest_pll\n""")
with open(out_log_path, 'w') as out_log:
out_log.write("parameters:\n{0}\n\n".format(args))
out_log.write(preamble)
out_log.flush()
#
# looping over all parameters combinations
for n_hidden in n_hidden_values:
for l_rate in learning_rate_values:
for batch_size in batch_size_values:
for n_iters in n_iter_values:
logging.info('Learning RBM for {} {} {} {}'.format(n_hidden,
l_rate,
batch_size,
n_iters))
#
# learning
rbm = neural_network.BernoulliRBM(n_components=n_hidden,
learning_rate=l_rate,
batch_size=batch_size,
n_iter=n_iters,
verbose=args.verbose - 1,
random_state=rand_gen)
fit_s_t = perf_counter()
rbm.fit(train)
fit_e_t = perf_counter()
logging.info('Trained in {} secs'.format(fit_e_t - fit_s_t))
#
# evaluating training
eval_s_t = perf_counter()
train_plls = rbm.score_samples(train)
eval_e_t = perf_counter()
train_avg_pll = numpy.mean(train_plls)
logging.info('\tTrain avg PLL: {} ({})'.format(train_avg_pll,
eval_e_t - eval_s_t))
#
# evaluating validation
eval_s_t = perf_counter()
valid_plls = rbm.score_samples(valid)
eval_e_t = perf_counter()
valid_avg_pll = numpy.mean(valid_plls)
logging.info('\tValid avg PLL: {} ({})'.format(valid_avg_pll,
eval_e_t - eval_s_t))
#
# evaluating test
eval_s_t = perf_counter()
test_plls = rbm.score_samples(test)
eval_e_t = perf_counter()
test_avg_pll = numpy.mean(test_plls)
logging.info('\tTest avg PLL: {} ({})'.format(test_avg_pll,
eval_e_t - eval_s_t))
#
# checking for improvements on validation
if valid_avg_pll > best_valid_avg_pll:
best_valid_avg_pll = valid_avg_pll
best_model = rbm
best_params['n-hidden'] = n_hidden
best_params['learning-rate'] = l_rate
best_params['batch-size'] = batch_size
best_params['n-iters'] = n_iters
best_test_plls = test_plls
#
# saving the model
if args.save_model:
prefix_str = stats_format([n_hidden,
l_rate,
batch_size,
n_iters],
'_',
digits=5)
model_path = os.path.join(out_path,
'best.{0}.{1}'.format(dataset_name,
MODEL_EXT))
with open(model_path, 'wb') as model_file:
pickle.dump(rbm, model_file)
logging.info('Dumped RBM to {}'.format(model_path))
#
# writing to file a line for the grid
stats = stats_format([n_hidden,
l_rate,
batch_size,
n_iters,
train_avg_pll,
valid_avg_pll,
test_avg_pll],
'\t',
digits=5)
out_log.write(stats + '\n')
out_log.flush()
#
# writing as last line the best params
out_log.write("{0}".format(best_params))
out_log.flush()
#
# saving the best test_lls
numpy.savetxt(test_lls_path, best_test_plls, delimiter='\n')
logging.info('Grid search ended.')
logging.info('Best params:\n\t%s', best_params)
#
# now creating the new datasets from best model
logging.info('\nConverting training set')
feat_s_t = perf_counter()
repr_train = best_model.transform(train)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
logging.info('Converting validation set')
feat_s_t = perf_counter()
repr_valid = best_model.transform(valid)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
logging.info('Converting test set')
feat_s_t = perf_counter()
repr_test = best_model.transform(test)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
assert train.shape[0] == repr_train.shape[0]
assert valid.shape[0] == repr_valid.shape[0]
assert test.shape[0] == repr_test.shape[0]
logging.info('New shapes {0} {1} {2}'.format(repr_train.shape,
repr_valid.shape,
repr_test.shape))
assert repr_train.shape[1] == repr_valid.shape[1]
assert repr_valid.shape[1] == repr_test.shape[1]
#
# log transform as well?
if args.log:
log_repr_train = numpy.log(repr_train)
log_repr_valid = numpy.log(repr_valid)
log_repr_test = numpy.log(repr_test)
# extending the original dataset
ext_train = None
ext_valid = None
ext_test = None
log_ext_train = None
log_ext_valid = None
log_ext_test = None
if args.no_ext:
ext_train = repr_train
ext_valid = repr_valid
ext_test = repr_test
if args.log:
log_ext_train = log_repr_train
log_ext_valid = log_repr_valid
log_ext_test = log_repr_test
else:
logging.info('\nConcatenating datasets')
ext_train = numpy.concatenate((train, repr_train), axis=1)
ext_valid = numpy.concatenate((valid, repr_valid), axis=1)
ext_test = numpy.concatenate((test, repr_test), axis=1)
assert train.shape[0] == ext_train.shape[0]
assert valid.shape[0] == ext_valid.shape[0]
assert test.shape[0] == ext_test.shape[0]
assert ext_train.shape[1] == train.shape[1] + repr_train.shape[1]
assert ext_valid.shape[1] == valid.shape[1] + repr_valid.shape[1]
assert ext_test.shape[1] == test.shape[1] + repr_test.shape[1]
if args.log:
log_ext_train = numpy.concatenate((train, log_repr_train), axis=1)
log_ext_valid = numpy.concatenate((valid, log_repr_valid), axis=1)
log_ext_test = numpy.concatenate((test, log_repr_test), axis=1)
assert train.shape[0] == log_ext_train.shape[0]
assert valid.shape[0] == log_ext_valid.shape[0]
assert test.shape[0] == log_ext_test.shape[0]
assert ext_train.shape[1] == train.shape[1] + log_repr_train.shape[1]
assert ext_valid.shape[1] == valid.shape[1] + log_repr_valid.shape[1]
assert ext_test.shape[1] == test.shape[1] + log_repr_test.shape[1]
logging.info('New shapes {0} {1} {2}'.format(ext_train.shape,
ext_valid.shape,
ext_test.shape))
#
# storing them
train_out_path = os.path.join(out_path, '{}.{}'.format(args.suffix, args.train_ext))
valid_out_path = os.path.join(out_path, '{}.{}'.format(args.suffix, args.valid_ext))
test_out_path = os.path.join(out_path, '{}.{}'.format(args.suffix, args.test_ext))
logging.info('\nSaving training set to: {}'.format(train_out_path))
numpy.savetxt(train_out_path, ext_train, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
logging.info('Saving validation set to: {}'.format(valid_out_path))
numpy.savetxt(valid_out_path, ext_valid, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
logging.info('Saving test set to: {}'.format(test_out_path))
numpy.savetxt(test_out_path, ext_test, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
split_file_path = os.path.join(out_path, '{}.{}.{}'.format(args.suffix,
dataset_name,
PICKLE_SPLIT_EXT))
logging.info('Saving pickle data splits to: {}'.format(split_file_path))
with open(split_file_path, 'wb') as split_file:
pickle.dump((ext_train, ext_valid, ext_test), split_file)
if args.log:
# storing them
log_train_out_path = os.path.join(out_path,
'log.{}.{}'.format(args.suffix, args.train_ext))
log_valid_out_path = os.path.join(out_path,
'log.{}.{}'.format(args.suffix, args.valid_ext))
log_test_out_path = os.path.join(out_path,
'log.{}.{}'.format(args.suffix, args.test_ext))
logging.info('\nSaving log training set to: {}'.format(log_train_out_path))
numpy.savetxt(log_train_out_path,
log_ext_train, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
logging.info('Saving log validation set to: {}'.format(log_valid_out_path))
numpy.savetxt(log_valid_out_path,
log_ext_valid, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
logging.info('Saving log test set to: {}'.format(log_test_out_path))
numpy.savetxt(log_test_out_path,
log_ext_test, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
log_split_file_path = os.path.join(out_path, 'log.{}.{}.{}'.format(args.suffix,
dataset_name,
PICKLE_SPLIT_EXT))
logging.info('Saving pickle log data splits to: {}'.format(log_split_file_path))
with open(log_split_file_path, 'wb') as split_file:
pickle.dump((log_ext_train, log_ext_valid, log_ext_test), split_file)
| 16,883
| 39.684337
| 96
|
py
|
spyn-repr
|
spyn-repr-master/bin/theanok_benchmark.py
|
import sys
sys.setrecursionlimit(50000)
import dataset
import numpy
from numpy.testing import assert_array_almost_equal
import theano.misc.pkl_utils
import datetime
import os
import logging
from spn.utils import stats_format
from spn.linked.spn import evaluate_on_dataset
from spn.theanok.spn import *
from spn.theanok.layers import *
from spn.theanok.spn import evaluate_on_dataset_batch
from spn.factory import build_theanok_spn_from_block_linked
import pickle
from time import perf_counter
import argparse
THEANO_MODEL_EXT = 'theano_model'
#
# TODO: make this parametric by cli
# loading spn
parser = argparse.ArgumentParser()
parser.add_argument("dataset", type=str,
help='Specify a dataset name from data/ (es. caltech101)')
parser.add_argument('--seed', type=int, nargs='?',
default=1337,
help='Seed for the random generator')
parser.add_argument('-m', '--model', type=str,
default='models/caltech101/caltech101_spn_500/best.caltech101.model',
help='Model path to load')
parser.add_argument('-o', '--output', type=str, nargs='?',
default='./exp/learnspn-b/',
help='Output dir path')
parser.add_argument('-v', '--verbose', type=int, nargs='?',
default=1,
help='Verbosity level')
parser.add_argument('--max-nodes-layer', type=int,
default=None,
help='Max number of nodes per layer')
#
# parsing the args
args = parser.parse_args()
#
# setting verbosity level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbose == 2:
logging.basicConfig(level=logging.DEBUG)
model_path = args.model
logging.info('\nLoading spn model from: {}'.format(model_path))
spn = None
with open(model_path, 'rb') as model_file:
load_start_t = perf_counter()
spn = pickle.load(model_file)
load_end_t = perf_counter()
logging.info('done in {}'.format(load_end_t - load_start_t))
#
# loading dataset
dataset_name = args.dataset
logging.info('Loading dataset {}'.format(dataset_name))
train, valid, test = dataset.load_train_val_test_csvs(dataset_name, path='data/')
logging.info('\nEvaluating on training set')
eval_s_t = perf_counter()
train_preds = evaluate_on_dataset(spn, train)
eval_e_t = perf_counter()
train_avg_ll = numpy.mean(train_preds)
logging.info('\t{}'.format(train_avg_ll))
logging.info('\tdone in {}'.format(eval_e_t - eval_s_t))
logging.info('Evaluating on validation set')
eval_s_t = perf_counter()
valid_preds = evaluate_on_dataset(spn, valid)
eval_e_t = perf_counter()
valid_avg_ll = numpy.mean(valid_preds)
logging.info('\t{}'.format(valid_avg_ll))
logging.info('\tdone in {}'.format(eval_e_t - eval_s_t))
logging.info('Evaluating on test set')
eval_s_t = perf_counter()
test_preds = evaluate_on_dataset(spn, test)
eval_e_t = perf_counter()
test_avg_ll = numpy.mean(test_preds)
logging.info('\t{}'.format(test_avg_ll))
logging.info('\tdone in {}'.format(eval_e_t - eval_s_t))
freqs, features = dataset.data_2_freqs(train)
logging.info('Encoding train')
ind_train = dataset.one_hot_encoding(train, feature_values=features)
logging.info('\t{}'.format(ind_train.shape))
logging.info('Encoding valid')
ind_valid = dataset.one_hot_encoding(valid, feature_values=features)
logging.info('\t{}'.format(ind_valid.shape))
logging.info('Encoding test')
ind_test = dataset.one_hot_encoding(test, feature_values=features)
logging.info('\t'.format(ind_test.shape))
#
# converting the spn
theano_model_path = '{}.{}'.format(args.model, THEANO_MODEL_EXT)
theano_spn = None
logging.info('Looking for theano spn model in {}'.format(theano_model_path))
if os.path.exists(theano_model_path):
logging.info('Loading theanok pickle model')
with open(theano_model_path, 'rb') as mfile:
# theano_spn = theano.misc.pkl_utils.load(mfile)
theano_spn = BlockLayeredSpn.load(mfile)
# theano_spn = pickle.load(mfile)
else:
logging.info('Creating model anew')
theano_spn = build_theanok_spn_from_block_linked(spn, ind_train.shape[1], features,
max_n_nodes_layer=args.max_nodes_layer)
logging.info('Saving model to {}'.format(theano_model_path))
with open(theano_model_path, 'wb') as mfile:
# theano.misc.pkl_utils.dump(theano_spn, mfile)
theano_spn.dump(mfile)
# pickle.dump(theano_spn, mfile)
logging.info('Theanok spn:\n{}'.format(theano_spn))
#
# evaluating theanok spn with mini batches
# batch_sizes = [100, 200, 500, 1000, None]
batch_sizes = [None]
for batch_size in batch_sizes:
logging.info('\n\n\tsize: {}'.format(batch_size))
logging.info('Evaluating on training set')
eval_s_t = perf_counter()
b_train_preds = evaluate_on_dataset_batch(theano_spn, ind_train, batch_size)
eval_e_t = perf_counter()
b_train_avg_ll = numpy.mean(b_train_preds)
logging.info('\t{}'.format(b_train_avg_ll))
logging.info('\tdone in {}'.format(eval_e_t - eval_s_t))
logging.info('Evaluating on validation set')
eval_s_t = perf_counter()
b_valid_preds = evaluate_on_dataset_batch(theano_spn, ind_valid, batch_size)
eval_e_t = perf_counter()
b_valid_avg_ll = numpy.mean(b_valid_preds)
logging.info('\t{}'.format(b_valid_avg_ll))
logging.info('\tdone in {}'.format(eval_e_t - eval_s_t))
logging.info('Evaluating on test set')
eval_s_t = perf_counter()
b_test_preds = evaluate_on_dataset_batch(theano_spn, ind_test, batch_size)
eval_e_t = perf_counter()
b_test_avg_ll = numpy.mean(b_test_preds)
logging.info('\t{}'.format(b_test_avg_ll))
logging.info('\tdone in {}'.format(eval_e_t - eval_s_t))
assert_array_almost_equal(b_train_preds, train_preds, decimal=5)
assert_array_almost_equal(b_valid_preds, valid_preds, decimal=5)
assert_array_almost_equal(b_test_preds, test_preds, decimal=5)
| 5,970
| 31.451087
| 92
|
py
|
spyn-repr
|
spyn-repr-master/bin/marg_feature_gen.py
|
import argparse
try:
from time import perf_counter
except:
from time import time
perf_counter = time
import dataset
import numpy
import datetime
import os
import logging
from spn import MARG_IND
from spn.linked.representation import extract_features_marginalization_rand
from spn.linked.representation import extract_features_marginalization_rectangles
PICKLE_SPLIT_EXT = 'pickle'
FEATURE_FILE_EXT = 'features'
SCOPE_FILE_EXT = 'scopes'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("dataset", type=str,
help='Dataset dir')
parser.add_argument('--seed', type=int, nargs='?',
default=1337,
help='Seed for the random generator')
parser.add_argument('-o', '--output', type=str, nargs='?',
default='./data/repr/',
help='Output dir path')
parser.add_argument('--suffix', type=str,
help='Dataset output suffix')
parser.add_argument('--rand-marg-rect', type=int, nargs='+',
default=None,
help='Generating features by marginalization over random rectangles')
parser.add_argument('--rand-marg', type=int, nargs='+',
default=None,
help='Generating features by marginalization over random subsets')
parser.add_argument('-v', '--verbose', type=int, nargs='?',
default=1,
help='Verbosity level')
#
# parsing the args
args = parser.parse_args()
#
# fixing a seed
rand_gen = numpy.random.RandomState(args.seed)
os.makedirs(args.output, exist_ok=True)
#
# setting verbosity level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbose == 2:
logging.basicConfig(level=logging.DEBUG)
logging.info("Starting with arguments:\n%s", args)
#
# loading dataset splits
logging.info('Loading datasets: %s', args.dataset)
dataset_name = args.dataset
train, valid, test = dataset.load_train_val_test_csvs(dataset_name)
logging.info('train shape: {}\nvalid shape: {}\ntest shape: {}'.format(train.shape,
valid.shape,
test.shape))
n_instances = train.shape[0]
n_features = train.shape[1]
assert valid.shape[1] == n_features
assert test.shape[1] == n_features
feature_file_path = '{}.{}.{}'.format(args.suffix,
dataset_name,
FEATURE_FILE_EXT)
feature_file_path = os.path.join(args.output, feature_file_path)
logging.info('Saving features to {}'.format(feature_file_path))
if args.rand_marg:
logging.info('Rand mask feature generation')
n_configs = len(args.rand_marg) // 2
assert len(args.rand_marg) % 2 == 0
feature_sizes = [args.rand_marg[i * 2] for i in range(n_configs)]
n_rand_sizes = [args.rand_marg[i * 2 + 1] for i in range(n_configs)]
logging.info('Features sizes {} rand sizes {}'.format(feature_sizes, n_rand_sizes))
feat_s_t = perf_counter()
feature_masks = extract_features_marginalization_rand(n_features,
feature_sizes,
n_rand_sizes,
feature_file_path=feature_file_path,
marg_value=MARG_IND,
rand_gen=rand_gen,
dtype=float)
feat_e_t = perf_counter()
logging.info('\tExtracted features in {}'.format(feat_e_t - feat_s_t))
elif args.rand_marg_rect:
logging.info('Rand rectangular mask feature generation')
n_configs = len(args.rand_marg_rect) // 5
assert len(args.rand_marg_rect) % 5 == 0
feature_sizes = [args.rand_marg_rect[i * 5] for i in range(n_configs)]
rect_min_sizes = [(args.rand_marg_rect[i * 5 + 1], args.rand_marg_rect[i * 5 + 2])
for i in range(n_configs)]
rect_max_sizes = [(args.rand_marg_rect[i * 5 + 3], args.rand_marg_rect[i * 5 + 3])
for i in range(n_configs)]
logging.info('Features sizes {} min sizes {} max sizes {}'.format(feature_sizes,
rect_min_sizes,
rect_max_sizes))
#
# a very dirty way to get these parameters
if 'bmnist' in dataset_name:
n_rows = 28
n_cols = 28
elif 'caltech101' in dataset_name:
n_rows = 28
n_cols = 28
elif 'ocr_letters' in dataset_name:
n_rows = 16
n_cols = 8
else:
raise ValueError(
'Unrecognized dataset, cannot retrieve number of columns and rows')
logging.info('Rectangular stats: features {} rows {} cols {}'.format(n_features,
n_rows,
n_cols))
feat_s_t = perf_counter()
feature_masks = extract_features_marginalization_rectangles(n_features,
n_rows, n_cols,
feature_sizes,
rect_min_sizes,
rect_max_sizes,
feature_file_path=feature_file_path,
marg_value=MARG_IND,
rand_gen=rand_gen,
dtype=float)
feat_e_t = perf_counter()
logging.info('\tExtracted features in {}'.format(feat_e_t - feat_s_t))
| 6,508
| 38.448485
| 104
|
py
|
spyn-repr
|
spyn-repr-master/bin/visualize_spn.py
|
from spn import MARG_IND
from spn.utils import stats_format
from spn.utils import approx_scope_histo_quartiles
from spn.linked.spn import evaluate_on_dataset
from spn.linked.nodes import SumNode
from spn.linked.nodes import ProductNode
from spn.linked.representation import load_features_from_file
from spn.linked.representation import retrieve_all_nodes_mpe_instantiations
from spn.linked.representation import scope_stats
from spn.linked.representation import extract_features_all_marginals_spn
from spn.linked.representation import extract_features_all_marginals_ml
from spn.linked.representation import node_activations_for_instance
from spn.linked.representation import extract_features_marginalization_grid
from spn.linked.representation import extract_feature_marginalization_from_masks
from spn.linked.representation import instance_from_disjoint_feature_masks
from spn.linked.representation import get_nearest_neighbour
from spn.linked.representation import random_rectangular_feature_mask
from spn.linked.representation import extract_instances_groups
from visualize import array_2_mat
from visualize import plot_m_by_n_images, plot_m_by_n_by_p_by_q_images
from visualize import tiling_sizes
from visualize import binary_cmap, inv_binary_cmap
from visualize import ternary_cmap, inv_ternary_cmap
from visualize import scope_histogram, layer_scope_histogram, multiple_scope_histogram
from visualize import scope_maps, scope_map_layerwise
from visualize import plot_m_by_n_heatmaps
# from visualize import visualize_node_activations_for_instance
import argparse
try:
from time import perf_counter
except:
from time import time
perf_counter = time
import dataset
import numpy
import random
import datetime
import os
import logging
import matplotlib
import matplotlib.pyplot as pyplot
from collections import defaultdict
import pickle
PREDS_EXT = 'lls'
TRAIN_PREDS_EXT = 'train.{}'.format(PREDS_EXT)
VALID_PREDS_EXT = 'valid.{}'.format(PREDS_EXT)
TEST_PREDS_EXT = 'test.{}'.format(PREDS_EXT)
DATA_EXT = 'data'
TRAIN_DATA_EXT = 'train.{}'.format(DATA_EXT)
VALID_DATA_EXT = 'valid.{}'.format(DATA_EXT)
TEST_DATA_EXT = 'test.{}'.format(DATA_EXT)
PICKLE_SPLIT_EXT = 'pickle'
FEATURE_FILE_EXT = 'features'
INFO_FILE_EXT = 'features.info'
SCOPE_FILE_EXT = 'scopes'
SAMPLE_IMGS_EXT = 'samples'
MPE_VIS_EXT = 'mpe'
def filter_sum_nodes(spn):
return [node for node in spn.top_down_nodes() if isinstance(node, SumNode)]
def filter_product_nodes(spn):
return [node for node in spn.top_down_nodes() if isinstance(node, ProductNode)]
def filter_leaf_nodes(spn):
return [node for node in spn.top_down_nodes()
if not isinstance(node, ProductNode) and not isinstance(node, SumNode)]
def filter_nodes_by_layer(spn, layer_id):
return [node for i, layer in enumerate(spn.bottom_up_layers())
for node in layer.nodes() if layer_id == i]
def filter_nodes_by_scope_length(spn, min_scope_len, max_scope_len):
return [node for node in spn.top_down_nodes()
if ((hasattr(node, 'var_scope') and
len(node.var_scope) >= min_scope_len and
len(node.var_scope) < max_scope_len)
or
(hasattr(node, 'var') and
len(node.var) >= min_scope_len and
len(node.var) < max_scope_len))]
def filter_nodes(spn, filter_str):
nodes = None
if filter_str == 'all':
nodes = list(spn.top_down_nodes())
elif filter_str == 'sum':
nodes = filter_sum_nodes(spn)
elif filter_str == 'prod':
nodes = filter_product_nodes(spn)
elif filter_str == 'leaves':
nodes = filter_leaf_nodes(spn)
elif 'layer' in filter_str:
layer_id = int(filter_str.replace('layer', ''))
nodes = filter_nodes_by_layer(spn, layer_id)
elif 'scope' in filter_str:
scope_ids = int(filter_str.replace('scope', ''))
min_scope, max_scope = scope_ids.split(',')
min_scope, max_scope = int(min_scope), int(max_scope)
nodes = filter_nodes_by_scope_length(spn, min_scope, max_scope)
return nodes
def retrieve_instance(splits, instance_id):
cum_id = 0
for split in splits:
n_instances = len(split)
cum_id += n_instances
if instance_id < cum_id:
return split[cum_id - n_instances + instance_id]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("dataset", type=str,
help='Dataset name (es bmnist)')
parser.add_argument('--model', type=str, nargs='+',
default=[],
help='Spn model file path')
parser.add_argument('--seed', type=int, nargs='?',
default=1337,
help='Seed for the random generator')
parser.add_argument('--sample', type=int,
default=None,
help='Sampling N instances and visualize them')
parser.add_argument('--nn', action='store_true',
help='Displaying the images nns in the train')
parser.add_argument('--size', type=int, nargs='+',
default=(28, 28),
help='Image sample sizes (rows x cols) pixels')
parser.add_argument('--fig-size', type=int, nargs='+',
default=(12, 8),
help='Figure size')
parser.add_argument('--space', type=float, nargs='+',
default=(0.1, 0.1),
help='Space between tiles')
parser.add_argument('--n-cols', type=int,
default=10,
help='Number of columns in image tiling')
parser.add_argument('--n-cols-layer', type=int,
default=5,
help='Number of columns in layer mpe image tiling')
parser.add_argument('--n-cols-scope', type=int,
default=5,
help='Number of columns in scope mpe image tiling')
parser.add_argument('--tile-size', type=float, nargs='+',
default=(1.5, 1.5),
help='Size of a single image tile height')
parser.add_argument('--scope-range', type=int, nargs='+',
default=None,
help='Scope range for mpe instantiations')
parser.add_argument('--hid-groups', type=str, nargs='+',
default=None,
help='Data path and Number of instance clusters after mpe descents')
parser.add_argument('--mask-sizes', type=int, nargs='+',
default=(10, 10, 10, 10),
help='Min max sizes for rect random mask')
parser.add_argument('--max-n-images', type=int,
default=9,
help='Max N of images to visualize at once')
parser.add_argument('--max-n-images-layers', type=int,
default=25,
help='Max N of mpe images to visualize at once')
parser.add_argument('--max-n-images-scopes', type=int,
default=25,
help='Max N of mpe images to visualize at once')
parser.add_argument('--dpi', type=int,
default=900,
help='Image dpi')
parser.add_argument('--ylim', type=int,
default=10e5,
help='Max limit y axis')
parser.add_argument('--xlim', type=int, nargs='+',
default=None,
help='Max limit x axis')
parser.add_argument('-o', '--output', type=str, nargs='?',
default='./data/repr/',
help='Output dir path')
parser.add_argument('--suffix', type=str,
help='Dataset output suffix')
parser.add_argument('--lines', type=str,
default=None,
help='Path to tsv containing line data')
parser.add_argument('--mpe', type=str, nargs='+',
default=None,
help='MPE node visualization type (sum|prod|layer|scope)')
parser.add_argument('--invert', action='store_true',
help='Inverting colormaps')
parser.add_argument('--scope', type=str, nargs='+',
default=None,
help='Showing scope length diagrams (hist|layer|map)')
parser.add_argument('--activations', type=str, nargs='+',
default=None,
help='Visualize one instance activations'
'Params: instance-id [filters]')
parser.add_argument('--marg-activations', type=str, nargs='+',
default=None,
help='Visualize one instance marginal activations'
'Params: instance-id [instance-id]*')
parser.add_argument('--all-maxes', action='store_true',
help='Getting all max children in during MPE traversal')
parser.add_argument('--save', type=str,
default=None,
help='Saving format')
parser.add_argument('-v', '--verbose', type=int, nargs='?',
default=1,
help='Verbosity level')
logging.basicConfig(level=logging.INFO)
#
# parsing the args
args = parser.parse_args()
assert len(args.size) == 2
n_rows, n_cols = args.size
assert len(args.tile_size) == 2
row_tile_size, col_tile_size = args.tile_size
logging.info('Images size {}, tile size {}'.format(args.size,
args.tile_size))
#
# fixing a seed
rand_gen = numpy.random.RandomState(args.seed)
random.seed(args.seed)
os.makedirs(args.output, exist_ok=True)
#
# setting verbosity level
# if args.verbose == 1:
# logging.basicConfig(level=logging.INFO)
# elif args.verbose == 2:
# logging.basicConfig(level=logging.DEBUG)
logging.info("Starting with arguments:\n%s", args)
train = None
valid = None
test = None
pickle_split_path = '{}.pickle'.format(args.dataset)
print('Looking for {}'.format(pickle_split_path))
if os.path.exists(pickle_split_path):
logging.info('Loading from pickle {}'.format(pickle_split_path))
with open(pickle_split_path, 'rb') as split_file:
train, valid, test = pickle.load(split_file)
else:
logging.info('Loading datasets: %s', args.dataset)
dataset_name = args.dataset
train, valid, test = dataset.load_train_val_test_csvs(dataset_name)
n_instances = train.shape[0]
n_features = train.shape[1]
n_test_instances = test.shape[0]
freqs, feature_vals = dataset.data_2_freqs(train)
logging.info('\ttrain: {}\n\tvalid: {}\n\ttest: {}'.format(train.shape,
valid.shape,
test.shape))
#
# loading models
# assert len(args.model) > 0
spns = []
for model_path in args.model:
logging.info('\nLoading spn model from: {}'.format(model_path))
spn = None
with open(model_path, 'rb') as model_file:
load_start_t = perf_counter()
spn = pickle.load(model_file)
load_end_t = perf_counter()
logging.info('done in {}'.format(load_end_t - load_start_t))
spns.append(spn)
logging.info('Spn stats:\n\tlayers\t{}\n\t'
'nodes\t{}\n\tweights\t{}\n\tleaves\t{}'.format(spn.n_layers(),
spn.n_nodes(),
spn.n_weights(),
spn.n_leaves()))
# NOTE
# from now on only the last model will be used by some options!
color_map = None
w_space, h_space = float(args.space[0]), float(args.space[1])
logging.info('Spaces w: {} h: {}'.format(w_space, h_space))
if args.sample is not None:
sample_save_path = None
pdf = False
if args.save:
sample_save_path = os.path.join(args.output, '{}.{}.{}'.format(args.sample,
dataset_name,
SAMPLE_IMGS_EXT))
pdf = True if args.save == 'pdf' else False
if args.invert:
color_map = inv_binary_cmap
else:
color_map = binary_cmap
logging.info('Sampling {} instances'.format(args.sample))
sample_s_t = perf_counter()
sampled_instances = spn.sample(args.sample, rand_gen=rand_gen)
sample_e_t = perf_counter()
logging.info('\tdone in {}'.format(sample_e_t - sample_s_t))
image_matrixes = [array_2_mat(img, n_rows, n_cols) for img in sampled_instances]
n_images = min(len(image_matrixes), args.max_n_images)
m, n = tiling_sizes(n_images, args.n_cols)
canvas_size = n * col_tile_size, m * row_tile_size
logging.info('Displaying {} x {} images on canvas {}'.format(m, n, canvas_size))
plot_m_by_n_images(image_matrixes[:n_images],
m, n,
fig_size=canvas_size,
cmap=color_map,
save_path=sample_save_path,
w_space=w_space,
h_space=h_space,
pdf=pdf)
if args.nn:
sample_save_path = None
pdf = False
if args.save:
sample_save_path = os.path.join(args.output, '{}.{}.{}.nn'.format(args.sample,
dataset_name,
SAMPLE_IMGS_EXT))
pdf = True if args.save == 'pdf' else False
#
# retrieving the nn
nns = get_nearest_neighbour(sampled_instances, train)
image_matrixes = [array_2_mat(img, n_rows, n_cols) for nn_id, img in nns]
logging.info('Displaying {} x {} images on canvas {}'.format(m, n, canvas_size))
plot_m_by_n_images(image_matrixes[:n_images],
m, n,
fig_size=canvas_size,
w_space=w_space,
h_space=h_space,
cmap=color_map,
save_path=sample_save_path,
pdf=pdf)
if args.mpe is not None:
only_first_max = False if args.all_maxes else True
if args.invert:
color_map = inv_ternary_cmap
else:
color_map = ternary_cmap
#
# getting nodes mpe instantiation
logging.info('Getting nodes MPE instantiations')
mpe_s_t = perf_counter()
node_stats = retrieve_all_nodes_mpe_instantiations(spn,
n_features,
only_first_max=only_first_max)
mpe_e_t = perf_counter()
logging.info('\tdone in {}'.format(mpe_e_t - mpe_s_t))
#
# now different kind of visualization depending on filters
if 'sum' in args.mpe:
logging.info('Visualizing only sum nodes')
pdf = False
sample_save_path = None
if args.save:
sample_save_path = os.path.join(args.output, '{}.{}.{}'.format('sum-mpe',
dataset_name,
MPE_VIS_EXT))
pdf = True if args.save == 'pdf' else False
sum_node_mpes = {node: info.mpes
for node, info in node_stats.items() if isinstance(node, SumNode)}
#
# retrieving images
mpe_insts_list = []
for node, mpe_insts in sum_node_mpes.items():
len_mpe = mpe_insts.shape[0] if mpe_insts.ndim > 1 else 1
if len_mpe > 1:
mpe_insts_list.append(mpe_insts[0])
else:
mpe_insts_list.append(mpe_insts)
image_matrixes = [array_2_mat(img, n_rows, n_cols) for img in mpe_insts_list]
n_images = min(len(image_matrixes), args.max_n_images)
m, n = tiling_sizes(n_images, args.n_cols)
canvas_size = m * row_tile_size, n * col_tile_size
logging.info('Displaying {} x {} images on canvas {}'.format(m, n, canvas_size))
plot_m_by_n_images(image_matrixes[:n_images],
m, n,
fig_size=canvas_size,
cmap=color_map,
save_path=sample_save_path,
pdf=pdf)
elif 'prod' in args.mpe:
logging.info('Visualizing only product nodes')
pdf = False
sample_save_path = None
if args.save:
sample_save_path = os.path.join(args.output, '{}.{}.{}'.format('prod-mpe',
dataset_name,
MPE_VIS_EXT))
pdf = True if args.save == 'pdf' else False
prod_node_mpes = {node: info.mpes
for node, info in node_stats.items() if isinstance(node, ProductNode)}
#
# retrieving images
mpe_insts_list = []
for node, mpe_insts in prod_node_mpes.items():
len_mpe = mpe_insts.shape[0] if mpe_insts.ndim > 1 else 1
if len_mpe > 1:
mpe_insts_list.append(mpe_insts[0])
else:
mpe_insts_list.append(mpe_insts)
image_matrixes = [array_2_mat(img, n_rows, n_cols) for img in mpe_insts_list]
n_images = min(len(image_matrixes), args.max_n_images)
m, n = tiling_sizes(n_images, args.n_cols)
canvas_size = m * row_tile_size, n * col_tile_size
logging.info('Displaying {} x {} images on canvas {}'.format(m, n, canvas_size))
plot_m_by_n_images(image_matrixes[:n_images],
m, n,
fig_size=canvas_size,
cmap=color_map,
save_path=sample_save_path,
pdf=pdf)
elif 'layer' in args.mpe:
logging.info('Visualizing by layers')
pdf = False
sample_save_path = None
if args.save:
sample_save_path = os.path.join(args.output, '{}.{}.{}'.format('layer-mpe',
dataset_name,
MPE_VIS_EXT))
pdf = True if args.save == 'pdf' else False
layer_node_mpes = defaultdict(list)
for node, info in node_stats.items():
layer_node_mpes[info.layer].append((node, info.mpes))
n_layers = len(layer_node_mpes)
m_layer, n_layer = tiling_sizes(n_layers, args.n_cols_layer)
layer_mpe_insts_list = []
for layer, node_mpes in layer_node_mpes.items():
mpe_insts_list = []
for node, mpe_insts in node_mpes:
len_mpe = mpe_insts.shape[0] if mpe_insts.ndim > 1 else 1
if len_mpe > 1:
mpe_insts_list.append(array_2_mat(mpe_insts[0], n_rows, n_cols))
else:
mpe_insts_list.append(array_2_mat(mpe_insts, n_rows, n_cols))
n_images = min(len(mpe_insts_list), args.max_n_images_layers)
layer_mpe_insts_list.append(mpe_insts_list[:n_images])
m, n = args.n_cols, args.n_cols
canvas_size = ((m * m_layer + m_layer - 1) * row_tile_size,
(n * n_layer + n_layer - 1) * col_tile_size)
logging.info('Displaying {} x {} images on canvas {}'.format(m, n, canvas_size))
plot_m_by_n_by_p_by_q_images(layer_mpe_insts_list,
m, n, m_layer, n_layer,
fig_size=canvas_size,
cmap=color_map,
save_path=sample_save_path,
pdf=pdf)
elif 'scope' in args.mpe:
print(args.mpe)
#
# ordering by scope
pdf = False
sample_save_path = None
if args.save:
sample_save_path = os.path.join(args.output, '{}.{}.{}'.format('scope-mpe',
dataset_name,
MPE_VIS_EXT))
pdf = True if args.save == 'pdf' else False
if args.scope_range:
assert len(args.scope_range) == 2
min_scope_len, max_scope_len = int(args.scope_range[0]), int(args.scope_range[1])
logging.info('Visualizing by scopes {} -> {}'.format(min_scope_len,
max_scope_len))
scope_nodes = [info.mpes for node, info in node_stats.items()
if len(info.scope) >= min_scope_len and
len(info.scope) < max_scope_len]
#
# shuffling them
random.shuffle(scope_nodes)
mpe_insts_list = []
for mpe_insts in scope_nodes:
len_mpe = mpe_insts.shape[0] if mpe_insts.ndim > 1 else 1
if len_mpe > 1:
mpe_insts_list.append(mpe_insts[0])
else:
# print(array_2_mat(mpe_insts, n_rows, n_cols))
mpe_insts_list.append(mpe_insts)
n_images = min(len(mpe_insts_list), args.max_n_images_scopes)
logging.info('Found {} instantiations, limiting to {}'.format(len(mpe_insts_list),
n_images))
m, n = tiling_sizes(n_images, args.n_cols)
canvas_size = n * col_tile_size, m * row_tile_size
logging.info('Displaying {} x {} images on canvas {}'.format(m, n, canvas_size))
mpe_insts_mat = [array_2_mat(img, n_rows, n_cols)
for img in mpe_insts_list[:n_images]]
plot_m_by_n_images(mpe_insts_mat,
m, n,
fig_size=canvas_size,
cmap=color_map,
save_path=sample_save_path,
dpi=args.dpi,
w_space=w_space,
h_space=h_space,
pdf=pdf)
if args.nn:
if args.invert:
color_map = inv_binary_cmap
else:
color_map = binary_cmap
sample_save_path = None
pdf = False
if args.save:
sample_save_path = os.path.join(args.output, '{}.{}.{}.nn'.format('scope-mpe',
dataset_name,
MPE_VIS_EXT))
pdf = True if args.save == 'pdf' else False
#
# retrieving the nn
nns = get_nearest_neighbour(mpe_insts_list[:n_images], train, masked=False)
image_matrixes = [array_2_mat(img, n_rows, n_cols) for nn_id, img in nns]
logging.info(
'Displaying {} x {} images on canvas {}'.format(m, n, canvas_size))
plot_m_by_n_images(image_matrixes[:n_images],
m, n,
fig_size=canvas_size,
w_space=w_space,
h_space=h_space,
cmap=color_map,
save_path=sample_save_path,
pdf=pdf)
else:
logging.info('Visualizing by scopes')
scope_node_mpes = defaultdict(list)
for node, info in node_stats.items():
scope_node_mpes[len(info.scope)].append((node, info.mpes))
n_scopes = len(scope_node_mpes)
m_scope, n_scope = tiling_sizes(n_scopes, args.n_cols_scope)
scope_mpe_insts_list = []
for scope_length in sorted(scope_node_mpes):
logging.info(scope_length)
node_mpes = scope_node_mpes[scope_length]
mpe_insts_list = []
for node, mpe_insts in node_mpes:
len_mpe = mpe_insts.shape[0] if mpe_insts.ndim > 1 else 1
if len_mpe > 1:
mpe_insts_list.append(array_2_mat(mpe_insts[0], n_rows, n_cols))
else:
mpe_insts_list.append(array_2_mat(mpe_insts, n_rows, n_cols))
n_images = min(len(mpe_insts_list), args.max_n_images_scopes)
scope_mpe_insts_list.append(mpe_insts_list[:n_images])
m, n = args.n_cols, args.n_cols
canvas_size = ((m * m_scope + m_scope - 1) * row_tile_size,
(n * n_scope + n_scope - 1) * col_tile_size)
logging.info('Displaying {} x {} images on canvas {}'.format(m, n, canvas_size))
plot_m_by_n_by_p_by_q_images(scope_mpe_insts_list,
m, n, m_scope, n_scope,
fig_size=canvas_size,
cmap=color_map,
save_path=sample_save_path,
pdf=pdf)
if args.scope:
if 'hist' in args.scope:
logging.info('Showing scope histogram')
pdf = False
sample_save_path = None
if args.save:
sample_save_path = os.path.join(args.output, '{}.{}'.format('scope-histogram',
dataset_name))
pdf = True if args.save == 'pdf' else False
xlims = None
if args.xlim:
xlims = int(args.xlim[0]), int(args.xlim[1])
#
# plotting histogram
scope_list = scope_histogram(spn,
# fig_size=args.size,
ylim=args.ylim,
xlim=xlims,
dpi=args.dpi,
save_path=sample_save_path,
pdf=pdf)
#
# computing scope quartiles
no_leaves = True
if no_leaves:
scope_list[0] = 0
quartiles = approx_scope_histo_quartiles(scope_list)
logging.info('Approx quartile scope lengths {}'.format(quartiles))
elif 'map' in args.scope:
logging.info('Showing scope maps')
pdf = False
sample_save_path = None
if args.save:
sample_save_path = os.path.join(args.output, '{}.{}'.format('scope-map',
dataset_name))
pdf = True if args.save == 'pdf' else False
xlim = None
if args.xlim:
xlim = int(args.xlim[0])
#
# retrieving scope lists
scope_lists = scope_maps(spns,
cmap=pyplot.get_cmap('Purples'),
min_val=None,
max_val=None,
xlim=xlim,
fig_size=args.fig_size,
w_space=w_space,
h_space=h_space,
dpi=args.dpi,
save_path=sample_save_path,
pdf=pdf)
elif 'lmap' in args.scope:
logging.info('Showing scope maps layerwise')
pdf = False
sample_save_path = None
if args.save:
sample_save_path = os.path.join(args.output, '{}.{}'.format('scope-map-layerwise',
dataset_name))
pdf = True if args.save == 'pdf' else False
xlim = None
if args.xlim:
xlim = int(args.xlim[0])
#
# retrieving scope lists
scope_lists = scope_map_layerwise(spn,
cmap=pyplot.get_cmap('Purples'),
xlim=xlim,
fig_size=args.fig_size,
w_space=w_space,
h_space=h_space,
dpi=args.dpi,
save_path=sample_save_path,
pdf=pdf)
elif 'comp-hist' in args.scope:
logging.info('Showing comparative scope histograms')
pdf = False
sample_save_path = None
if args.save:
sample_save_path = os.path.join(args.output, '{}.{}'.format('comp-scope-histogram',
dataset_name))
pdf = True if args.save == 'pdf' else False
multiple_scope_histogram(spns,
save_path=sample_save_path,
y_log=True,
pdf=pdf)
elif 'layer' in args.scope:
logging.info('Showing scope histgram by layer')
pdf = False
sample_save_path = None
if args.save:
sample_save_path = os.path.join(args.output, '{}.{}'.format('scope-layer-histogram',
dataset_name))
pdf = True if args.save == 'pdf' else False
n_layers = spn.n_layers()
m_layer, n_layer = tiling_sizes(n_layers, args.n_cols_layer)
layer_scope_histogram(spn,
m_layer, n_layer,
save_path=sample_save_path,
pdf=pdf)
if args.activations:
assert len(args.activations) > 0
instance_id, filters = args.activations[0], args.activations[1:]
if len(args.activations) > 1:
filters_str = '-'.join(str(f) for f in filters)
sample_save_path = None
pdf = False
if args.save:
sample_save_path = os.path.join(args.output, '{}.{}.{}'.format(filters_str,
dataset_name,
SAMPLE_IMGS_EXT))
pdf = True if args.save == 'pdf' else False
if args.invert:
color_map = inv_binary_cmap
else:
color_map = binary_cmap
instance_id = int(instance_id)
instance = retrieve_instance((train, valid, test), instance_id)
#
# collecting one vis for each filter
visualizations = [array_2_mat(instance, n_rows, n_cols)]
cmaps = [color_map]
min_max_list = [(None, None)]
for node_filter in filters:
nodes = filter_nodes(spn, node_filter)
logging.info('Considering only {} nodes {}'.format(len(nodes), node_filter))
activations = node_activations_for_instance(spn, nodes, instance)
visualizations.append(array_2_mat(activations, n_rows, n_cols))
cmaps.append(matplotlib.cm.jet)
min_max_list.append((min(activations), max(activations)))
#
# marginals
# marg_instance = extract_features_all_marginals_spn(spn,
# instance.reshape(1, instance.shape[0]),
# feature_vals)
# marg_instance_ml = extract_features_all_marginals_ml(train,
# instance.reshape(1,
# instance.shape[0]),
# feature_vals)
# two_by_two_marg_masks = extract_features_marginalization_grid(n_rows, n_cols,
# 4, 4)
# two_by_two_marg_features = extract_feature_marginalization_from_masks(spn,
# instance.reshape(
# 1, instance.shape[0]),
# two_by_two_marg_masks)
# two_by_two_marg_instance = instance_from_disjoint_feature_masks(None,
# two_by_two_marg_masks,
# two_by_two_marg_features[0])
# print(two_by_two_marg_instance)
# print(two_by_two_marg_instance.shape)
# visualizations.append(array_2_mat(two_by_two_marg_instance, n_rows, n_cols))
# cmaps.append(matplotlib.cm.gray)
# marg_instance = marg_instance[0]
# visualizations.append(array_2_mat(marg_instance, n_rows, n_cols))
# cmaps.append(matplotlib.cm.gray)
# marg_instance_ml = marg_instance_ml[0]
# visualizations.append(array_2_mat(marg_instance_ml, n_rows, n_cols))
# cmaps.append(matplotlib.cm.gray)
else:
#
# default filters
sample_save_path = None
pdf = False
if args.save:
sample_save_path = os.path.join(args.output,
'{}.all.all-mean.sum.prod.marg-1-2.{}.{}'.format(instance_id,
dataset_name,
SAMPLE_IMGS_EXT))
pdf = True if args.save == 'pdf' else False
if args.invert:
color_map = inv_binary_cmap
else:
color_map = binary_cmap
norm = True
instance_id = int(instance_id)
instance = retrieve_instance((train, valid, test), instance_id)
#
# collecting one vis for each filter
visualizations = [array_2_mat(instance, n_rows, n_cols)]
cmaps = [color_map]
min_max_list = [(None, None)]
#
# all mean
nodes = filter_nodes(spn, 'all')
logging.info('Considering only all nodes {} (mean)'.format(len(nodes)))
activations = node_activations_for_instance(spn, nodes, instance, mean=True)
visualizations.append(array_2_mat(activations, n_rows, n_cols))
cmaps.append(matplotlib.cm.jet)
min_max_list.append((min(activations), max(activations)))
#
# all
nodes = filter_nodes(spn, 'all')
logging.info('Considering only all nodes {}'.format(len(nodes)))
activations = node_activations_for_instance(spn, nodes, instance)
if norm:
activations = numpy.sqrt(activations)
visualizations.append(array_2_mat(activations, n_rows, n_cols))
cmaps.append(matplotlib.cm.jet)
min_all = min(activations)
max_all = max(activations)
min_max_list.append((min_all, max_all))
#
# sum
nodes = filter_nodes(spn, 'sum')
logging.info('Considering only sum nodes {}'.format(len(nodes)))
activations = node_activations_for_instance(spn, nodes, instance)
# if norm:
# activations = numpy.sqrt(activations)
visualizations.append(array_2_mat(activations, n_rows, n_cols))
cmaps.append(matplotlib.cm.jet)
min_sum = min(activations)
max_sum = max(activations)
#
# prod
nodes = filter_nodes(spn, 'prod')
logging.info('Considering only prod nodes {}'.format(len(nodes)))
activations = node_activations_for_instance(spn, nodes, instance)
visualizations.append(array_2_mat(activations, n_rows, n_cols))
cmaps.append(matplotlib.cm.jet)
min_prod = min(activations)
max_prod = max(activations)
#
# normalizing sum and prods
min_max_list.append((min(min_sum, min_prod), max(max_prod, max_sum)))
min_max_list.append((min(min_sum, min_prod), max(max_prod, max_sum)))
#
# marginal with mask
feature_mask = numpy.zeros(len(instance), dtype=bool)
feature_mask = random_rectangular_feature_mask(feature_mask,
n_rows, n_cols,
*args.mask_sizes)
inv_feature_mask = numpy.logical_not(feature_mask)
nodes = filter_nodes(spn, 'all')
logging.info('Considering only all nodes mask {}'.format(len(nodes)))
activations = node_activations_for_instance(spn,
nodes,
instance,
marg_mask=feature_mask)
if norm:
activations = numpy.sqrt(activations)
visualizations.append(array_2_mat(activations, n_rows, n_cols))
cmaps.append(matplotlib.cm.jet)
# min_mask = min(min(activations), min_mask)
# max_mask = max(max(activations), max_mask)
min_mask = min(activations)
max_mask = max(activations)
min_max_list.append((min_mask, max_mask))
nodes = filter_nodes(spn, 'all')
logging.info('Considering only all nodes inv mask {}'.format(len(nodes)))
activations = node_activations_for_instance(spn,
nodes,
instance,
marg_mask=inv_feature_mask)
if norm:
activations = numpy.sqrt(activations)
visualizations.append(array_2_mat(activations, n_rows, n_cols))
cmaps.append(matplotlib.cm.jet)
min_mask = min(min(activations), min_mask)
max_mask = max(max(activations), max_mask)
min_max_list.append((min_mask, max_mask))
nodes = filter_nodes(spn, 'all')
logging.info('Considering only all nodes all marg {}'.format(len(nodes)))
all_instance = numpy.zeros(len(instance), dtype=instance.dtype)
all_instance[:] = MARG_IND
activations = node_activations_for_instance(spn,
nodes,
all_instance,
marg_mask=feature_mask)
if norm:
activations = numpy.sqrt(activations)
visualizations.append(array_2_mat(activations, n_rows, n_cols))
cmaps.append(matplotlib.cm.jet)
min_mask = min(min(activations), min_mask)
max_mask = max(max(activations), max_mask)
min_max_list.append((min_mask, max_mask))
# nodes = filter_nodes(spn, 'all')
# logging.info('Considering only all nodes all marg {} (mean)'.format(len(nodes)))
# all_instance = numpy.zeros(len(instance), dtype=instance.dtype)
# all_instance[:] = MARG_IND
# activations = node_activations_for_instance(spn,
# nodes,
# all_instance,
# marg_mask=feature_mask,
# mean=True)
# # if norm:
# activations = numpy.sqrt(activations)
# visualizations.append(array_2_mat(activations, n_rows, n_cols))
# cmaps.append(matplotlib.cm.jet)
# # min_mask = min(min(activations), min_mask)
# # max_mask = max(max(activations), max_mask)
# print(min(activations), max(activations))
# min_max_list.append((None, None))
min_max_list[2] = (min_mask, max_mask)
m = 1
n = len(visualizations)
canvas_size = n * col_tile_size, m * row_tile_size
print(canvas_size)
plot_m_by_n_heatmaps(visualizations,
min_max_list,
m=m, n=n,
cmaps=cmaps,
colorbars=False,
fig_size=canvas_size,
w_space=w_space, h_space=h_space,
dpi=args.dpi,
save_path=sample_save_path,
pdf=pdf)
if args.marg_activations:
# instance_id, filters = args.marg_activations[0], args.marg_activations[1:]
instance_ids = args.marg_activations
sample_save_path = None
pdf = False
if args.save:
instance_ids_str = '-'.join(i for i in instance_ids)
sample_save_path = os.path.join(args.output, '{}.{}.marg'.format(instance_ids_str,
dataset_name))
pdf = True if args.save == 'pdf' else False
if args.invert:
color_map = inv_binary_cmap
else:
color_map = binary_cmap
visualizations = []
cmaps = []
min_max_list = []
min_list = []
max_list = []
# for spn in spns:
#
spn_1 = spns[0]
spn_2 = spns[1]
spn_3 = spns[2]
if 'ocr_letters' == dataset_name:
max_patch_res = 8
else:
max_patch_res = 7
for instance_id in instance_ids:
instance_id = int(instance_id)
instance = retrieve_instance((train, valid, test), instance_id)
# collecting one vis for each filter
visualizations.append(array_2_mat(instance, n_rows, n_cols))
cmaps.append(color_map)
min_max_list.append((None, None))
norm = True
# # all single marginals ml
# marg_instance_ml = extract_features_all_marginals_ml(train,
# instance.reshape(1,
# instance.shape[0]),
# feature_vals)
# marg_instance_ml = numpy.log(marg_instance_ml[0])
# visualizations.append(array_2_mat(marg_instance_ml, n_rows, n_cols))
# cmaps.append(matplotlib.cm.gray)
# min_marg_ml = min(marg_instance_ml)
# max_marg_ml = max(marg_instance_ml)
# print(min_marg_ml, max_marg_ml)
# all single marginals spn
marg_instance_spn = extract_features_all_marginals_spn(spn_1,
instance.reshape(
1, instance.shape[0]),
feature_vals)
marg_instance_spn = marg_instance_spn[0]
visualizations.append(array_2_mat(marg_instance_spn, n_rows, n_cols))
cmaps.append(matplotlib.cm.gray)
min_marg_spn = min(marg_instance_spn)
max_marg_spn = max(marg_instance_spn)
min_list.append(min_marg_spn)
max_list.append(max_marg_spn)
print(min_marg_spn, max_marg_spn)
marg_masks_2 = extract_features_marginalization_grid(n_rows, n_cols,
2, 2)
marg_features_2 = extract_feature_marginalization_from_masks(spn_1,
instance.reshape(1,
instance.shape[0]),
marg_masks_2)
marg_instance_2 = instance_from_disjoint_feature_masks(None,
marg_masks_2,
marg_features_2[0])
# print(two_by_two_marg_instance)
# print(two_by_two_marg_instance.shape)
visualizations.append(array_2_mat(marg_instance_2, n_rows, n_cols))
cmaps.append(matplotlib.cm.gray)
min_marg_2 = min(marg_instance_2)
max_marg_2 = max(marg_instance_2)
min_list.append(min_marg_2)
max_list.append(max_marg_2)
print(min_marg_2, max_marg_2)
marg_masks_4 = extract_features_marginalization_grid(n_rows, n_cols,
4, 4)
marg_features_4 = extract_feature_marginalization_from_masks(spn_1,
instance.reshape(1,
instance.shape[0]),
marg_masks_4)
marg_instance_4 = instance_from_disjoint_feature_masks(None,
marg_masks_4,
marg_features_4[0])
# print(two_by_two_marg_instance)
# print(two_by_two_marg_instance.shape)
visualizations.append(array_2_mat(marg_instance_4, n_rows, n_cols))
cmaps.append(matplotlib.cm.gray)
min_marg_4 = min(marg_instance_4)
max_marg_4 = max(marg_instance_4)
min_list.append(min_marg_4)
max_list.append(max_marg_4)
print(min_marg_4, max_marg_4)
marg_masks_7 = extract_features_marginalization_grid(n_rows, n_cols,
max_patch_res, max_patch_res)
marg_features_7 = extract_feature_marginalization_from_masks(spn_1,
instance.reshape(1,
instance.shape[0]),
marg_masks_7)
marg_instance_7 = instance_from_disjoint_feature_masks(None,
marg_masks_7,
marg_features_7[0])
# print(two_by_two_marg_instance)
# print(two_by_two_marg_instance.shape)
visualizations.append(array_2_mat(marg_instance_7, n_rows, n_cols))
cmaps.append(matplotlib.cm.gray)
min_marg_7 = min(marg_instance_7)
max_marg_7 = max(marg_instance_7)
min_list.append(min_marg_7)
max_list.append(max_marg_7)
print(min_marg_7, max_marg_7)
marg_masks_7 = extract_features_marginalization_grid(n_rows, n_cols,
max_patch_res, max_patch_res)
marg_features_7 = extract_feature_marginalization_from_masks(spn_2,
instance.reshape(1,
instance.shape[0]),
marg_masks_7)
marg_instance_7 = instance_from_disjoint_feature_masks(None,
marg_masks_7,
marg_features_7[0])
# print(two_by_two_marg_instance)
# print(two_by_two_marg_instance.shape)
visualizations.append(array_2_mat(marg_instance_7, n_rows, n_cols))
cmaps.append(matplotlib.cm.gray)
min_marg_7 = min(marg_instance_7)
max_marg_7 = max(marg_instance_7)
min_list.append(min_marg_7)
max_list.append(max_marg_7)
print(min_marg_7, max_marg_7)
marg_masks_7 = extract_features_marginalization_grid(n_rows, n_cols,
max_patch_res, max_patch_res)
marg_features_7 = extract_feature_marginalization_from_masks(spn_3,
instance.reshape(1,
instance.shape[0]),
marg_masks_7)
marg_instance_7 = instance_from_disjoint_feature_masks(None,
marg_masks_7,
marg_features_7[0])
# print(two_by_two_marg_instance)
# print(two_by_two_marg_instance.shape)
visualizations.append(array_2_mat(marg_instance_7, n_rows, n_cols))
cmaps.append(matplotlib.cm.gray)
min_marg_7 = min(marg_instance_7)
max_marg_7 = max(marg_instance_7)
min_list.append(min_marg_7)
max_list.append(max_marg_7)
print(min_marg_7, max_marg_7)
min_all = min(min_list)
max_all = max(max_list)
min_max_list = [(min_all, max_all)] * (len(visualizations))
# n_models = len(spns)
# m = n_models
# n = len(visualizations) // n_models
m = len(instance_ids)
n = len(visualizations) // m
logging.info('Printing {} x {}'.format(m, n))
for i in range(len(instance_ids)):
min_max_list[i * n] = (None, None)
# for i in range(n_models):
# min_max_list[n * i] = (None, None)
# min_max_list[0] = (None, None)
canvas_size = n * col_tile_size, m * row_tile_size
print(canvas_size)
plot_m_by_n_heatmaps(visualizations,
min_max_list,
m=m, n=n,
cmaps=cmaps,
colorbars=False,
fig_size=canvas_size,
w_space=w_space, h_space=h_space,
dpi=args.dpi,
save_path=sample_save_path,
pdf=pdf)
if args.lines:
# import seaborn
from matplotlib.backends.backend_pdf import PdfPages
# pyplot.style.use('ggplot')
import seaborn
# seaborn.set_style('white')
# seaborn.set_context(rc={'lines.markeredgewidth': 0.1})
# seaborn.set_context('poster')
seaborn.set_style('white')
seaborn.set_context('poster', font_scale=1.8)
logging.info('Visualizing accuracy lines in {}'.format(args.lines))
#
# reading them from file
lines = numpy.loadtxt(args.lines, delimiter='\t')
n_series = lines.shape[1]
n_obs = lines.shape[0]
x_axis = numpy.arange(100, 1001, 100)
names = ['SPN-I', 'SPN-II', 'SPN-III', 'MT-I', 'MT-II', 'MT-III']
colors = seaborn.color_palette("husl", 6)
# colors = ['red', 'green', 'blue', 'cyan', 'magenta', 'yellow']
line_styles = ['dotted', '--', '-', 'dotted', '--', '-']
markers = ['o', 'o', 'o', 'x', 'x', 'x']
markersizes = [8., 8., 8., 12., 12., 12.]
linewidths = [4., 2.5, 2.5, 4., 2.5, 2.5]
logging.info('There are {} series with {} obs'.format(n_series, n_obs))
sample_save_path = None
pdf = False
if args.save:
sample_save_path = os.path.join(args.output, 'lines.{}'.format(dataset_name))
pdf = True if args.save == 'pdf' else False
# matplotlib.rcParams.update({'font.size': 52})
fig_size = args.fig_size
fig, ax = pyplot.subplots(figsize=fig_size, dpi=args.dpi)
for i in range(n_series):
pyplot.plot(x_axis, lines[:, i],
label=names[i],
linestyle=line_styles[i],
linewidth=linewidths[i],
markersize=markersizes[i],
markeredgewidth=1,
markeredgecolor=colors[i],
marker=markers[i],
color=colors[i])
legend = ax.legend(names, loc='lower right')
pyplot.xlabel('# features')
pyplot.ylabel('test accuracy')
if sample_save_path:
fig.savefig(sample_save_path + '.svg')
if pdf:
pp = PdfPages(sample_save_path + '.pdf')
pp.savefig(fig)
pp.close()
pyplot.show()
if args.hid_groups:
sample_save_path = None
pdf = False
if args.save:
sample_save_path = os.path.join(args.output, 'lines.{}'.format(dataset_name))
pdf = True if args.save == 'pdf' else False
if args.invert:
color_map = inv_binary_cmap
else:
color_map = binary_cmap
assert len(args.hid_groups) > 1
repr_data_path, *group_ids = args.hid_groups
group_ids = [int(gid) for gid in group_ids]
logging.info('Visualizing groups from repr data: {}'.format(args.dataset))
with open(repr_data_path, 'rb') as gfile:
repr_train, repr_valid, repr_test = pickle.load(gfile)
#
# freeing memory
repr_train = repr_train.astype(numpy.int8)
repr_valid = None
repr_test = None
logging.info('Visualizing instances from groups {}'.format(group_ids))
#
# processing the training set
ext_s_t = perf_counter()
group_train = extract_instances_groups(repr_train)
ext_e_t = perf_counter()
n_groups = group_train.shape[1]
logging.info('Found {} groups in {} secs'.format(n_groups,
ext_e_t - ext_s_t))
repr_train = None
if len(group_ids) == 1 and group_ids[0] == -1:
group_ids = numpy.arange(n_groups)
#
# now extracting the groups
for gid in group_ids:
sample_save_path = None
pdf = False
if args.save:
sample_save_path = os.path.join(args.output, '{}.{}'.format(gid,
dataset_name))
pdf = True if args.save == 'pdf' else False
logging.info('Considering group {}'.format(gid))
instance_map = group_train[:, gid].astype(bool)
assert len(instance_map) == train.shape[0]
logging.info('There are {} images in group'.format(sum(instance_map)))
#
# getting instances
instances = train[instance_map, :]
print(instances.shape)
image_matrixes = [array_2_mat(img, n_rows, n_cols) for img in instances]
#
# shuffling?
random.shuffle(image_matrixes)
n_images = min(len(image_matrixes), args.max_n_images)
m, n = tiling_sizes(n_images, args.n_cols)
canvas_size = m * row_tile_size, n * col_tile_size
logging.info('Displaying {} x {} images on canvas {}'.format(m, n, canvas_size))
plot_m_by_n_images(image_matrixes[:n_images],
m, n,
fig_size=canvas_size,
cmap=color_map,
w_space=w_space, h_space=h_space,
dpi=args.dpi,
save_path=sample_save_path,
pdf=pdf)
| 59,334
| 41.748559
| 114
|
py
|
spyn-repr
|
spyn-repr-master/bin/spn_repr_data.py
|
import sys
sys.setrecursionlimit(1000000000)
import argparse
try:
from time import perf_counter
except:
from time import time
perf_counter = time
import dataset
import numpy
import datetime
import os
import logging
from spn.utils import stats_format
from spn import MARG_IND
from spn.linked.representation import extract_features_nodes_mpe
from spn.linked.representation import node_in_path_feature
from spn.linked.representation import acc_node_in_path_feature
from spn.linked.representation import filter_non_sum_nodes
from spn.linked.representation import max_child_id_feature
from spn.linked.representation import max_hidden_var_feature, filter_hidden_var_nodes
from spn.linked.representation import hidden_var_val, hidden_var_log_val
from spn.linked.representation import max_hidden_var_val, max_hidden_var_log_val
from spn.linked.representation import extract_features_nodes
from spn.linked.representation import child_var_val, child_var_log_val
from spn.linked.representation import var_val, var_log_val
from spn.linked.representation import filter_non_leaf_nodes
from spn.linked.representation import filter_all_nodes
from spn.linked.representation import extract_feature_marginalization_from_masks
from spn.linked.representation import extract_feature_marginalization_from_masks_theanok
from spn.linked.representation import extract_feature_marginalization_from_masks_opt_unique
from spn.linked.representation import extract_feature_marginalization_from_masks_theanok_opt_unique
from spn.linked.representation import extract_features_marginalization_rand
from spn.linked.representation import extract_features_marginalization_rectangles
from spn.linked.representation import extract_features_all_marginals_spn
from spn.linked.representation import extract_features_all_marginals_ml
from spn.linked.representation import all_single_marginals_ml
from spn.linked.representation import all_single_marginals_spn
from spn.linked.representation import extract_features_node_activations
from spn.linked.representation import load_features_from_file
from spn.factory import build_theanok_spn_from_block_linked
from spn.theanok.spn import BlockLayeredSpn
import pickle
PREDS_EXT = 'lls'
TRAIN_PREDS_EXT = 'train.{}'.format(PREDS_EXT)
VALID_PREDS_EXT = 'valid.{}'.format(PREDS_EXT)
TEST_PREDS_EXT = 'test.{}'.format(PREDS_EXT)
DATA_EXT = 'data'
TRAIN_DATA_EXT = 'train.{}'.format(DATA_EXT)
VALID_DATA_EXT = 'valid.{}'.format(DATA_EXT)
TEST_DATA_EXT = 'test.{}'.format(DATA_EXT)
THEANO_MODEL_EXT = 'theano_model'
PICKLE_SPLIT_EXT = 'pickle'
FEATURE_FILE_EXT = 'features'
INFO_FILE_EXT = 'features.info'
SCOPE_FILE_EXT = 'scopes'
RETRIEVE_FUNC_DICT = {
'in-path': node_in_path_feature,
'acc-path': acc_node_in_path_feature,
'max-var': max_hidden_var_feature,
'hid-val': hidden_var_val,
'hid-log-val': hidden_var_log_val,
'ch-val': child_var_val,
'ch-log-val': child_var_log_val,
'var-val': var_val,
'var-log-val': var_log_val
}
FILTER_FUNC_DICT = {
'non-lea': filter_non_leaf_nodes,
'non-sum': filter_non_sum_nodes,
'hid-var': filter_hidden_var_nodes,
'all': filter_all_nodes
}
DTYPE_DICT = {
'int': numpy.int32,
'float': numpy.float32,
'float.8': numpy.float32,
}
FMT_DICT = {
'int': '%d',
'float': '%.18e',
'float.8': '%.8e',
}
from spn.linked.nodes import SumNode
from spn.linked.nodes import ProductNode
def filter_sum_nodes(spn):
return [node for node in spn.top_down_nodes() if isinstance(node, SumNode)]
def filter_product_nodes(spn):
return [node for node in spn.top_down_nodes() if isinstance(node, ProductNode)]
def filter_leaf_nodes(spn):
return [node for node in spn.top_down_nodes()
if not isinstance(node, ProductNode) and not isinstance(node, SumNode)]
def filter_nodes_by_layer(spn, layer_id):
return [node for i, layer in enumerate(spn.bottom_up_layers())
for node in layer.nodes() if layer_id == i]
def filter_nodes_by_scope_length(spn, min_scope_len, max_scope_len):
return [node for node in spn.top_down_nodes()
if ((hasattr(node, 'var_scope') and
len(node.var_scope) >= min_scope_len and
len(node.var_scope) < max_scope_len)
or
(hasattr(node, 'var') and
len(node.var) >= min_scope_len and
len(node.var) < max_scope_len))]
def filter_nodes(spn, filter_str):
nodes = None
if filter_str == 'all':
nodes = list(spn.top_down_nodes())
elif filter_str == 'sum':
nodes = filter_sum_nodes(spn)
elif filter_str == 'prod':
nodes = filter_product_nodes(spn)
elif filter_str == 'leaves':
nodes = filter_leaf_nodes(spn)
elif 'layer' in filter_str:
layer_id = int(filter_str.replace('layer', ''))
nodes = filter_nodes_by_layer(spn, layer_id)
elif 'scope' in filter_str:
scope_ids = int(filter_str.replace('scope', ''))
min_scope, max_scope = scope_ids.split(',')
min_scope, max_scope = int(min_scope), int(max_scope)
nodes = filter_nodes_by_scope_length(spn, min_scope, max_scope)
return nodes
def evaluate_on_dataset(spn, data):
n_instances = data.shape[0]
pred_lls = numpy.zeros(n_instances)
for i, instance in enumerate(data):
(pred_ll, ) = spn.single_eval(instance)
pred_lls[i] = pred_ll
return pred_lls
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("dataset", type=str,
help='Dataset dir')
parser.add_argument('--train-ext', type=str,
help='Training set name regex')
parser.add_argument('--valid-ext', type=str,
help='Validation set name regex')
parser.add_argument('--test-ext', type=str,
help='Test set name regex')
parser.add_argument('--model', type=str,
help='Spn model file path')
parser.add_argument('--seed', type=int, nargs='?',
default=1337,
help='Seed for the random generator')
parser.add_argument('-o', '--output', type=str, nargs='?',
default='./data/repr/',
help='Output dir path')
parser.add_argument('--ret-func', type=str, nargs='?',
default='max-var',
help='Node value retrieve func in creating representations')
parser.add_argument('--filter-func', type=str, nargs='?',
default='hid-var',
help='Node filter func in creating representations')
parser.add_argument('--suffix', type=str,
help='Dataset output suffix')
parser.add_argument('--node-activations', type=str, nargs='+',
default=None,
help='Dataset output suffix')
parser.add_argument('--sep', type=str, nargs='?',
default=',',
help='Dataset output separator')
parser.add_argument('--fmt', type=str, nargs='?',
default='int',
help='Dataset output number formatter')
parser.add_argument('--shuffle-ext', type=int, nargs='?',
default=None,
help='Whether to shuffle stacked features')
parser.add_argument('--theano', type=int, nargs='?',
default=None,
help='Whether to use theano for marginal feature eval (batch size)')
parser.add_argument('--max-nodes-layer', type=int,
default=None,
help='Max number of nodes per layer in a theano representation')
# parser.add_argument('--rand-marg-rect', type=int, nargs='+',
# default=None,
# help='Generating features by marginalization over random rectangles')
# parser.add_argument('--rand-marg', type=int, nargs='+',
# default=None,
# help='Generating features by marginalization over random subsets')
parser.add_argument('--features', type=str, nargs='?',
default=None,
help='Loading feature masks from file')
parser.add_argument('--no-ext', action='store_true',
help='Whether to concatenate the new representation to the old dataset')
parser.add_argument('--save-features', action='store_true',
help='Saving the generated features')
parser.add_argument('--save-text', action='store_true',
help='Saving the repr data to text as well')
parser.add_argument('--rand-features', type=float, nargs='+',
default=None,
help='Using only random features, generated as a binomial with param p')
parser.add_argument('--no-mpe', action='store_true',
help='Whether not to use MPE inference in the upward pass')
parser.add_argument('--sing-marg', action='store_true',
help='Whether to evaluate all single marginals')
parser.add_argument('--sing-marg-ml', action='store_true',
help='Whether to evaluate all single marginals with ML estimator')
parser.add_argument('--alpha', type=float,
default=0.0,
help='Smoothing parameter')
parser.add_argument('--opt-unique', action='store_true',
help='Whether to activate the unique patches opt while computing marg features')
parser.add_argument('-v', '--verbose', type=int, nargs='?',
default=1,
help='Verbosity level')
#
# parsing the args
args = parser.parse_args()
#
# fixing a seed
rand_gen = numpy.random.RandomState(args.seed)
os.makedirs(args.output, exist_ok=True)
#
# setting verbosity level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbose == 2:
logging.basicConfig(level=logging.DEBUG)
logging.info("Starting with arguments:\n%s", args)
#
# loading dataset splits
logging.info('Loading datasets: %s', args.dataset)
dataset_path = args.dataset
train, valid, test = dataset.load_dataset_splits(dataset_path,
filter_regex=[args.train_ext,
args.valid_ext,
args.test_ext])
dataset_name = args.train_ext.split('.')[0]
n_instances = train.shape[0]
n_test_instances = test.shape[0]
logging.info('\ttrain: {}\n\tvalid: {}\n\ttest: {}'.format(train.shape,
valid.shape,
test.shape))
freqs, feature_vals = dataset.data_2_freqs(train)
dtype = DTYPE_DICT[args.fmt]
repr_train = None
repr_valid = None
repr_test = None
if args.features:
logging.info('\nLoading spn model from: {}'.format(args.model))
spn = None
with open(args.model, 'rb') as model_file:
load_start_t = perf_counter()
spn = pickle.load(model_file)
load_end_t = perf_counter()
logging.info('done in {}'.format(load_end_t - load_start_t))
#
# loading features from file
feature_file_path = args.features
feature_masks = load_features_from_file(feature_file_path)
logging.info('Loaded {} feature masks from {}'.format(len(feature_masks),
feature_file_path))
if args.theano is not None:
train_out_path = os.path.join(args.output, '{}.{}'.format(args.suffix, args.train_ext))
valid_out_path = os.path.join(args.output, '{}.{}'.format(args.suffix, args.valid_ext))
test_out_path = os.path.join(args.output, '{}.{}'.format(args.suffix, args.test_ext))
#
# if it is 0 then we set it to None to evaluate it in a single batch
batch_size = args.theano if args.theano > 0 else None
logging.info('Evaluation with theano')
feat_s_t = perf_counter()
ind_train = dataset.one_hot_encoding(train, feature_vals)
feat_e_t = perf_counter()
logging.info('Train one hot encoding done in {}'.format(feat_e_t - feat_s_t))
feat_s_t = perf_counter()
ind_valid = dataset.one_hot_encoding(valid, feature_vals)
feat_e_t = perf_counter()
logging.info('Valid one hot encoding done in {}'.format(feat_e_t - feat_s_t))
feat_s_t = perf_counter()
ind_test = dataset.one_hot_encoding(test, feature_vals)
feat_e_t = perf_counter()
logging.info('Test one hot encoding done in {}'.format(feat_e_t - feat_s_t))
theano_model_path = os.path.join(args.output,
'{}.{}.{}'.format(args.suffix,
dataset_name,
THEANO_MODEL_EXT))
theanok_spn = None
logging.info('Looking for theano spn model in {}'.format(theano_model_path))
if os.path.exists(theano_model_path):
logging.info('Loading theanok pickle model')
with open(theano_model_path, 'rb') as mfile:
pic_s_t = perf_counter()
theanok_spn = BlockLayeredSpn.load(mfile)
pic_e_t = perf_counter()
logging.info('\tLoaded in {} secs'.format(pic_e_t - pic_s_t))
else:
feat_s_t = perf_counter()
theanok_spn = build_theanok_spn_from_block_linked(spn,
ind_train.shape[1],
feature_vals,
max_n_nodes_layer=args.max_nodes_layer)
feat_e_t = perf_counter()
logging.info('Spn transformed in theano in {}'.format(feat_e_t - feat_s_t))
with open(theano_model_path, 'wb') as mfile:
pic_s_t = perf_counter()
print('rec lim', sys.getrecursionlimit())
theanok_spn.dump(mfile)
pic_e_t = perf_counter()
logging.info('Serialized into {}\n\tdone in {}'.format(theano_model_path,
pic_e_t - pic_s_t))
extract_feature_func = None
if args.opt_unique:
logging.info('Using unique opt')
extract_feature_func = extract_feature_marginalization_from_masks_theanok_opt_unique
else:
extract_feature_func = extract_feature_marginalization_from_masks_theanok
logging.info('\nConverting training set')
feat_s_t = perf_counter()
repr_train = extract_feature_func(theanok_spn,
ind_train,
feature_masks,
feature_vals=feature_vals,
batch_size=batch_size,
marg_value=MARG_IND,
# rand_gen=rand_gen,
dtype=float)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
#
# saving it to disk asap
logging.info('\nSaving training set to: {}'.format(train_out_path))
numpy.savetxt(train_out_path, repr_train, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
logging.info('\nConverting validation set')
feat_s_t = perf_counter()
repr_valid = extract_feature_func(theanok_spn,
ind_valid,
feature_masks,
feature_vals=feature_vals,
batch_size=batch_size,
marg_value=MARG_IND,
# rand_gen=rand_gen,
dtype=float)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
logging.info('Saving validation set to: {}'.format(valid_out_path))
numpy.savetxt(valid_out_path, repr_valid, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
logging.info('\nConverting test set')
feat_s_t = perf_counter()
repr_test = extract_feature_func(theanok_spn,
ind_test,
feature_masks,
feature_vals=feature_vals,
batch_size=batch_size,
marg_value=MARG_IND,
# rand_gen=rand_gen,
dtype=float)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
logging.info('Saving test set to: {}'.format(test_out_path))
numpy.savetxt(test_out_path, repr_test, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
else:
extract_feature_func = None
if args.opt_unique:
logging.info('Using unique opt')
extract_feature_func = extract_feature_marginalization_from_masks_opt_unique
else:
extract_feature_func = extract_feature_marginalization_from_masks
logging.info('\nConverting training set')
feat_s_t = perf_counter()
repr_train = extract_feature_func(spn,
train,
feature_masks,
marg_value=MARG_IND,
# rand_gen=rand_gen,
dtype=float)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
logging.info('Converting validation set')
feat_s_t = perf_counter()
repr_valid = extract_feature_func(spn,
valid,
feature_masks,
marg_value=MARG_IND,
# rand_gen=rand_gen,
dtype=float)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
logging.info('Converting test set')
feat_s_t = perf_counter()
repr_test = extract_feature_func(spn,
test,
feature_masks,
marg_value=MARG_IND,
# rand_gen=rand_gen,
dtype=float)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
elif args.rand_features is not None:
rand_n_features, rand_perc = args.rand_features
rand_n_features = int(rand_n_features)
logging.info('\nGenerating {0} random features (with perc {1})'.format(rand_n_features,
rand_perc))
#
# adding random features
repr_train = dataset.random_binary_dataset(train.shape[0],
rand_n_features,
perc=rand_perc,
rand_gen=rand_gen)
repr_valid = dataset.random_binary_dataset(valid.shape[0],
rand_n_features,
perc=rand_perc,
rand_gen=rand_gen)
repr_test = dataset.random_binary_dataset(test.shape[0],
rand_n_features,
perc=rand_perc,
rand_gen=rand_gen)
elif args.sing_marg:
logging.info('\nLoading spn model from: {}'.format(args.model))
spn = None
with open(args.model, 'rb') as model_file:
load_start_t = perf_counter()
spn = pickle.load(model_file)
load_end_t = perf_counter()
logging.info('done in {}'.format(load_end_t - load_start_t))
logging.info('Extracting single marginals')
all_marginals = all_single_marginals_spn(spn,
feature_vals,
dtype=numpy.int32)
logging.info('Converting train set')
feat_s_t = perf_counter()
repr_train = extract_features_all_marginals_spn(spn,
train,
feature_vals,
all_marginals,
dtype=numpy.int32)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
logging.info('Converting valid set')
feat_s_t = perf_counter()
repr_valid = extract_features_all_marginals_spn(spn,
valid,
feature_vals,
all_marginals,
dtype=numpy.int32)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
logging.info('Converting test set')
feat_s_t = perf_counter()
repr_test = extract_features_all_marginals_spn(spn,
test,
feature_vals,
all_marginals,
dtype=numpy.int32)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
elif args.sing_marg_ml:
logging.info('Extracting single marginals with an ML estimator')
alpha = args.alpha
all_marginals = all_single_marginals_ml(train,
feature_vals,
alpha=alpha)
logging.info('Converting train set')
feat_s_t = perf_counter()
repr_train = extract_features_all_marginals_ml(None,
train,
feature_vals,
alpha=alpha,
all_marginals=all_marginals,
dtype=numpy.int32)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
logging.info('Converting valid set')
feat_s_t = perf_counter()
repr_valid = extract_features_all_marginals_ml(None,
valid,
feature_vals,
alpha=alpha,
all_marginals=all_marginals,
dtype=numpy.int32)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
logging.info('Converting test set')
feat_s_t = perf_counter()
repr_test = extract_features_all_marginals_ml(None,
test,
feature_vals,
alpha=alpha,
all_marginals=all_marginals,
dtype=numpy.int32)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
elif args.node_activations:
logging.info('\nLoading spn model from: {}'.format(args.model))
spn = None
with open(args.model, 'rb') as model_file:
load_start_t = perf_counter()
spn = pickle.load(model_file)
load_end_t = perf_counter()
logging.info('done in {}'.format(load_end_t - load_start_t))
logging.info('Extracting node activations features')
node_filter_str = args.node_activations[0]
mean = False
if len(args.node_activations) > 1:
mean = bool(int(args.node_activations[1]))
logging.info('Using mean: {}'.format(mean))
nodes = filter_nodes(spn, node_filter_str)
logging.info('Considering nodes: {} ({})'.format(node_filter_str, len(nodes)))
logging.info('Converting train set')
feat_s_t = perf_counter()
repr_train = extract_features_node_activations(spn,
nodes,
train,
marg_mask=None,
mean=mean,
log=False,
hard=False,
dtype=float)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
logging.info('Converting valid set')
feat_s_t = perf_counter()
repr_valid = extract_features_node_activations(spn,
nodes,
valid,
marg_mask=None,
mean=mean,
log=False,
hard=False,
dtype=float)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
logging.info('Converting test set')
feat_s_t = perf_counter()
repr_test = extract_features_node_activations(spn,
nodes,
test,
marg_mask=None,
mean=mean,
log=False,
hard=False,
dtype=float)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
else:
logging.info('Eval repr')
logging.info('\nLoading spn model from: {}'.format(args.model))
spn = None
with open(args.model, 'rb') as model_file:
load_start_t = perf_counter()
spn = pickle.load(model_file)
load_end_t = perf_counter()
logging.info('done in {}'.format(load_end_t - load_start_t))
ret_func = RETRIEVE_FUNC_DICT[args.ret_func]
filter_func = FILTER_FUNC_DICT[args.filter_func]
extract_repr_func = None
if args.no_mpe:
extract_repr_func = extract_features_nodes
else:
extract_repr_func = extract_features_nodes_mpe
feature_info_path = os.path.join(args.output, '{}.{}.{}'.format(args.suffix,
dataset_name,
INFO_FILE_EXT))
logging.info('Using function {}'.format(extract_repr_func))
logging.info('\nConverting training set')
feat_s_t = perf_counter()
repr_train = extract_repr_func(spn,
train,
filter_node_func=filter_func,
retrieve_func=ret_func,
remove_zero_features=False,
output_feature_info=feature_info_path,
dtype=dtype,
verbose=False)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
logging.info('Converting validation set')
feat_s_t = perf_counter()
repr_valid = extract_repr_func(spn,
valid,
filter_node_func=filter_func,
retrieve_func=ret_func,
remove_zero_features=False,
output_feature_info=None,
dtype=dtype,
verbose=False)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
logging.info('Converting test set')
feat_s_t = perf_counter()
repr_test = extract_repr_func(spn,
test,
filter_node_func=filter_func,
retrieve_func=ret_func,
remove_zero_features=False,
output_feature_info=None,
dtype=dtype,
verbose=False)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
assert train.shape[0] == repr_train.shape[0]
assert valid.shape[0] == repr_valid.shape[0]
assert test.shape[0] == repr_test.shape[0]
logging.info('New shapes {0} {1} {2}'.format(repr_train.shape,
repr_valid.shape,
repr_test.shape))
assert repr_train.shape[1] == repr_valid.shape[1]
assert repr_valid.shape[1] == repr_test.shape[1]
#
# shuffling?
if args.shuffle_ext is not None:
logging.info('\n\nShuffling data features')
#
# shuffling k times
for k in range(args.shuffle_ext):
repr_train = dataset.shuffle_columns(repr_train, rand_gen)
repr_valid = dataset.shuffle_columns(repr_valid, rand_gen)
repr_test = dataset.shuffle_columns(repr_test, rand_gen)
# repr_train = dataset.shuffle_columns(repr_train, numpy_rand_gen)
# repr_valid = dataset.shuffle_columns(repr_valid, numpy_rand_gen)
# repr_test = dataset.shuffle_columns(repr_test, numpy_rand_gen)
assert train.shape[0] == repr_train.shape[0]
assert valid.shape[0] == repr_valid.shape[0]
assert test.shape[0] == repr_test.shape[0]
logging.info('Shape checking {0} {1} {2}\n'.format(repr_train.shape,
repr_valid.shape,
repr_test.shape))
#
# extending the original dataset
ext_train = None
ext_valid = None
ext_test = None
if args.no_ext:
ext_train = repr_train
ext_valid = repr_valid
ext_test = repr_test
else:
logging.info('\nConcatenating datasets')
ext_train = numpy.concatenate((train, repr_train), axis=1)
ext_valid = numpy.concatenate((valid, repr_valid), axis=1)
ext_test = numpy.concatenate((test, repr_test), axis=1)
assert train.shape[0] == ext_train.shape[0]
assert valid.shape[0] == ext_valid.shape[0]
assert test.shape[0] == ext_test.shape[0]
assert ext_train.shape[1] == train.shape[1] + repr_train.shape[1]
assert ext_valid.shape[1] == valid.shape[1] + repr_valid.shape[1]
assert ext_test.shape[1] == test.shape[1] + repr_test.shape[1]
logging.info('New shapes {0} {1} {2}'.format(ext_train.shape,
ext_valid.shape,
ext_test.shape))
#
# storing them
if args.save_text:
train_out_path = os.path.join(args.output, '{}.{}'.format(args.suffix, args.train_ext))
valid_out_path = os.path.join(args.output, '{}.{}'.format(args.suffix, args.valid_ext))
test_out_path = os.path.join(args.output, '{}.{}'.format(args.suffix, args.test_ext))
logging.info('\nSaving training set to: {}'.format(train_out_path))
numpy.savetxt(train_out_path, ext_train, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
logging.info('Saving validation set to: {}'.format(valid_out_path))
numpy.savetxt(valid_out_path, ext_valid, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
logging.info('Saving test set to: {}'.format(test_out_path))
numpy.savetxt(test_out_path, ext_test, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
#
# saving in pickle
split_file_path = os.path.join(args.output, '{}.{}.{}'.format(args.suffix,
dataset_name,
PICKLE_SPLIT_EXT))
logging.info('Saving pickle data splits to: {}'.format(split_file_path))
with open(split_file_path, 'wb') as split_file:
pickle.dump((ext_train, ext_valid, ext_test), split_file, protocol=4)
| 35,122
| 41.990208
| 105
|
py
|
spyn-repr
|
spyn-repr-master/bin/merge_repr.py
|
from spn.linked.representation import load_features_from_file
from spn.linked.representation import save_features_to_file
import numpy
from numpy.testing import assert_array_equal
import os
import logging
import argparse
import pickle
DATA_EXT = 'data'
TRAIN_DATA_EXT = 'ts.{}'.format(DATA_EXT)
VALID_DATA_EXT = 'valid.{}'.format(DATA_EXT)
TEST_DATA_EXT = 'test.{}'.format(DATA_EXT)
SPLITS_EXT = [TRAIN_DATA_EXT,
VALID_DATA_EXT,
TEST_DATA_EXT]
PICKLE_SPLIT_EXT = 'pickle'
FEATURE_FILE_EXT = 'features'
INFO_FILE_EXT = 'features.info'
SCOPE_FILE_EXT = 'scopes'
FMT_DICT = {
'int': '%d',
'float': '%.18e',
'float.8': '%.8e',
}
def merging_features(feature_base_paths,
dtype=float):
train_splits = []
valid_splits = []
test_splits = []
for feature_base_path in feature_base_paths:
logging.info('Considering path {}'.format(feature_base_path))
#
# check for a single pickle file first
pickle_path = '{}.{}'.format(feature_base_path,
PICKLE_SPLIT_EXT)
train = None
valid = None
test = None
print('Looking for {}'.format(pickle_path))
if os.path.exists(pickle_path):
logging.info('Loading from pickle {}'.format(pickle_path))
with open(pickle_path, 'rb') as split_file:
train, valid, test = pickle.load(split_file)
else:
repr_splits = []
for s in SPLITS_EXT:
split_path = '{}.{}'.format(feature_base_path, s)
repr_splits.append(numpy.loadtxt(split_path, dtype=dtype, delimiter=','))
train, valid, test = repr_splits
train_splits.append(train)
valid_splits.append(valid)
test_splits.append(test)
#
# composing them
train_n_features = train_splits[0].shape[1]
for s in train_splits[1:]:
assert train_splits[0].shape[0] == s.shape[0]
train_n_features += s.shape[1]
valid_n_features = valid_splits[0].shape[1]
for s in valid_splits[1:]:
assert valid_splits[0].shape[0] == s.shape[0]
valid_n_features += s.shape[1]
test_n_features = test_splits[0].shape[1]
for s in test_splits[1:]:
assert test_splits[0].shape[0] == s.shape[0]
test_n_features += s.shape[1]
ext_train = numpy.concatenate(train_splits, axis=1)
ext_valid = numpy.concatenate(valid_splits, axis=1)
ext_test = numpy.concatenate(test_splits, axis=1)
assert ext_train.shape[0] == train_splits[0].shape[0]
assert ext_valid.shape[0] == valid_splits[0].shape[0]
assert ext_test.shape[0] == test_splits[0].shape[0]
logging.info('\tAll train shape: {}'.format(ext_train.shape))
logging.info('\tAll valid shape: {}'.format(ext_valid.shape))
logging.info('\tAll test shape: {}'.format(ext_test.shape))
return ext_train, ext_valid, ext_test
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("paths", type=str, nargs='+',
help='Features file base paths')
parser.add_argument('--dtype', type=str,
default=float,
help='Batch split size')
parser.add_argument('-o', '--output', type=str,
default='repr/rect/',
help='Dataset output suffix')
parser.add_argument('--fmt', type=str, nargs='?',
default='float',
help='Dataset output number formatter')
parser.add_argument('--suffix', type=str,
help='Dataset output suffix')
parser.add_argument('--save-text', action='store_true',
help='Saving the repr data to text as well')
#
# parsing the args
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
logging.info("Starting with arguments:\n%s", args)
ext_train, ext_valid, ext_test = merging_features(args.paths, args.dtype)
#
# saving them into a single pickle file
if args.save_text:
train_out_path = os.path.join(args.output, '{}.{}'.format(args.suffix, args.train_ext))
valid_out_path = os.path.join(args.output, '{}.{}'.format(args.suffix, args.valid_ext))
test_out_path = os.path.join(args.output, '{}.{}'.format(args.suffix, args.test_ext))
logging.info('\nSaving training set to: {}'.format(train_out_path))
numpy.savetxt(train_out_path, ext_train, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
logging.info('Saving validation set to: {}'.format(valid_out_path))
numpy.savetxt(valid_out_path, ext_valid, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
logging.info('Saving test set to: {}'.format(test_out_path))
numpy.savetxt(test_out_path, ext_test, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
split_file_path = os.path.join(args.output, '{}.{}'.format(args.suffix,
PICKLE_SPLIT_EXT))
logging.info('Saving pickle data splits to: {}'.format(split_file_path))
with open(split_file_path, 'wb') as split_file:
pickle.dump((ext_train, ext_valid, ext_test), split_file, protocol=4)
logging.info('All done.')
| 5,310
| 32.19375
| 95
|
py
|
spyn-repr
|
spyn-repr-master/bin/feature_split.py
|
from spn.linked.representation import load_features_from_file
from spn.linked.representation import save_features_to_file
import numpy
from numpy.testing import assert_array_equal
import os
import logging
import argparse
def batch_feature_split(feature_path, batch_size):
#
# load features first
feature_masks = load_features_from_file(feature_path)
n_features = len(feature_masks)
logging.info('Loaded {} feature masks from {}'.format(n_features,
feature_path))
feature_splits = []
for i in range(0, n_features, batch_size):
masks_split = feature_masks[i:i + batch_size]
feature_splits.append(masks_split)
logging.info('Considering range {}:{} (size {})'.format(i, i + batch_size,
len(masks_split)))
tot_n_features = sum([len(s) for s in feature_splits])
logging.info('Prepare serialization for {} splits and {} tot features'.format(len(feature_splits),
tot_n_features))
assert tot_n_features == n_features
feature_split_paths = []
for i, masks in enumerate(feature_splits):
split_path = '{}.{}.{}'.format(feature_path,
i * batch_size,
min(i * batch_size + batch_size - 1,
n_features - 1))
save_features_to_file(masks, split_path)
logging.info('Saved split (size {}) to {}'.format(len(masks), split_path))
feature_split_paths.append(split_path)
return feature_masks, feature_split_paths, feature_splits
def load_batch_feature_splits(feature_paths):
feature_masks = []
for split_path in feature_paths:
split_masks = load_features_from_file(split_path)
print(split_masks.shape)
feature_masks.append(split_masks)
return numpy.concatenate(feature_masks, axis=0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("path", type=str,
help='Feature file path')
parser.add_argument('-s', '--split', type=int,
help='Batch split size')
#
# parsing the args
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
logging.info("Starting with arguments:\n%s", args)
#
# save them
feature_masks, split_paths, feature_splits = batch_feature_split(args.path, args.split)
#
# loading them back as a sanity check
rec_feature_masks = load_batch_feature_splits(split_paths)
assert_array_equal(feature_masks, rec_feature_masks)
| 2,748
| 32.938272
| 102
|
py
|
spyn-repr
|
spyn-repr-master/bin/libra_repr_data.py
|
import argparse
try:
from time import perf_counter
except:
from time import time
perf_counter = time
import dataset
import numpy
import datetime
import os
import logging
from spn.utils import stats_format
from spn import MARG_IND
from spn.linked.representation import extract_features_marginalization_acquery
from spn.linked.representation import extract_features_marginalization_acquery_opt_unique
from spn.linked.representation import load_features_from_file
import pickle
PREDS_EXT = 'lls'
TRAIN_PREDS_EXT = 'train.{}'.format(PREDS_EXT)
VALID_PREDS_EXT = 'valid.{}'.format(PREDS_EXT)
TEST_PREDS_EXT = 'test.{}'.format(PREDS_EXT)
DATA_EXT = 'data'
TRAIN_DATA_EXT = 'train.{}'.format(DATA_EXT)
VALID_DATA_EXT = 'valid.{}'.format(DATA_EXT)
TEST_DATA_EXT = 'test.{}'.format(DATA_EXT)
PICKLE_SPLIT_EXT = 'pickle'
FEATURE_FILE_EXT = 'features'
SCOPE_FILE_EXT = 'scopes'
FMT_DICT = {'int': '%d',
'float': '%.18e',
'float.8': '%.8e',
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("dataset", type=str,
help='Dataset dir')
parser.add_argument('--train-ext', type=str,
help='Training set name regex')
parser.add_argument('--valid-ext', type=str,
help='Validation set name regex')
parser.add_argument('--test-ext', type=str,
help='Test set name regex')
parser.add_argument('--model', type=str,
help='Libra model file path (ac)')
parser.add_argument('--seed', type=int, nargs='?',
default=1337,
help='Seed for the random generator')
parser.add_argument('-o', '--output', type=str, nargs='?',
default='./data/repr/',
help='Output dir path')
parser.add_argument('--suffix', type=str,
help='Dataset output suffix')
parser.add_argument('--sep', type=str, nargs='?',
default=',',
help='Dataset output separator')
parser.add_argument('--acquery-path', type=str, nargs='?',
default='/home/valerio/Petto Redigi/libra-tk-1.0.1/bin/acquery',
help='Path to Libra\'s acquery bin')
parser.add_argument('--fmt', type=str, nargs='?',
default='int',
help='Dataset output number formatter')
parser.add_argument('--features', type=str, nargs='?',
default=None,
help='Loading feature masks from file')
parser.add_argument('--no-ext', action='store_true',
help='Whether to concatenate the new representation to the old dataset')
parser.add_argument('--save-text', action='store_true',
help='Saving the repr data to text as well')
parser.add_argument('--overwrite', type=int, nargs='?',
default=1,
help='Whether to overwrite the generated feature files')
parser.add_argument('--opt-unique', action='store_true',
help='Whether to activate the unique patches opt while computing marg features')
parser.add_argument('-v', '--verbose', type=int, nargs='?',
default=1,
help='Verbosity level')
#
# parsing the args
args = parser.parse_args()
overwrite = True if args.overwrite > 0 else False
#
# fixing a seed
rand_gen = numpy.random.RandomState(args.seed)
os.makedirs(args.output, exist_ok=True)
#
# setting verbosity level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbose == 2:
logging.basicConfig(level=logging.DEBUG)
logging.info("Starting with arguments:\n%s", args)
#
# loading dataset splits
logging.info('Loading datasets: %s', args.dataset)
dataset_path = args.dataset
train, valid, test = dataset.load_dataset_splits(dataset_path,
filter_regex=[args.train_ext,
args.valid_ext,
args.test_ext])
dataset_name = args.train_ext.split('.')[0]
n_instances = train.shape[0]
n_test_instances = test.shape[0]
logging.info('\ttrain: {}\n\tvalid: {}\n\ttest: {}'.format(train.shape,
valid.shape,
test.shape))
freqs, feature_vals = dataset.data_2_freqs(train)
repr_train = None
repr_valid = None
repr_test = None
feature_file_path = args.features
feature_masks = load_features_from_file(feature_file_path)
logging.info('Loaded {} feature masks from {}'.format(len(feature_masks),
feature_file_path))
extract_feature_func = None
if args.opt_unique:
logging.info('Using unique opt')
extract_feature_func = extract_features_marginalization_acquery_opt_unique
else:
extract_feature_func = extract_features_marginalization_acquery
logging.info('\nConverting training set')
feat_s_t = perf_counter()
repr_train = extract_feature_func(train,
args.model,
feature_masks,
args.output,
dtype=float,
prefix=args.suffix,
overwrite_feature_file=overwrite,
exec_path=args.acquery_path)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
logging.info('Converting validation set')
feat_s_t = perf_counter()
repr_valid = extract_feature_func(valid,
args.model,
feature_masks,
args.output,
dtype=float,
prefix=args.suffix,
overwrite_feature_file=overwrite,
exec_path=args.acquery_path)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
logging.info('Converting test set')
feat_s_t = perf_counter()
repr_test = extract_feature_func(test,
args.model,
feature_masks,
args.output,
dtype=float,
prefix=args.suffix,
overwrite_feature_file=overwrite,
exec_path=args.acquery_path)
feat_e_t = perf_counter()
logging.info('\t done in {}'.format(feat_e_t - feat_s_t))
assert train.shape[0] == repr_train.shape[0]
assert valid.shape[0] == repr_valid.shape[0]
assert test.shape[0] == repr_test.shape[0]
logging.info('New shapes {0} {1} {2}'.format(repr_train.shape,
repr_valid.shape,
repr_test.shape))
assert repr_train.shape[1] == repr_valid.shape[1]
assert repr_valid.shape[1] == repr_test.shape[1]
#
# extending the original dataset
ext_train = None
ext_valid = None
ext_test = None
if args.no_ext:
ext_train = repr_train
ext_valid = repr_valid
ext_test = repr_test
else:
logging.info('\nConcatenating datasets')
ext_train = numpy.concatenate((train, repr_train), axis=1)
ext_valid = numpy.concatenate((valid, repr_valid), axis=1)
ext_test = numpy.concatenate((test, repr_test), axis=1)
assert train.shape[0] == ext_train.shape[0]
assert valid.shape[0] == ext_valid.shape[0]
assert test.shape[0] == ext_test.shape[0]
assert ext_train.shape[1] == train.shape[1] + repr_train.shape[1]
assert ext_valid.shape[1] == valid.shape[1] + repr_valid.shape[1]
assert ext_test.shape[1] == test.shape[1] + repr_test.shape[1]
logging.info('New shapes {0} {1} {2}'.format(ext_train.shape,
ext_valid.shape,
ext_test.shape))
#
# storing them
if args.save_text:
train_out_path = os.path.join(args.output, '{}.{}'.format(args.suffix, args.train_ext))
valid_out_path = os.path.join(args.output, '{}.{}'.format(args.suffix, args.valid_ext))
test_out_path = os.path.join(args.output, '{}.{}'.format(args.suffix, args.test_ext))
logging.info('\nSaving training set to: {}'.format(train_out_path))
numpy.savetxt(train_out_path, ext_train, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
logging.info('Saving validation set to: {}'.format(valid_out_path))
numpy.savetxt(valid_out_path, ext_valid, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
logging.info('Saving test set to: {}'.format(test_out_path))
numpy.savetxt(test_out_path, ext_test, delimiter=args.sep, fmt=FMT_DICT[args.fmt])
split_file_path = os.path.join(args.output, '{}.{}.{}'.format(args.suffix,
dataset_name,
PICKLE_SPLIT_EXT))
logging.info('Saving pickle data splits to: {}'.format(split_file_path))
with open(split_file_path, 'wb') as split_file:
pickle.dump((ext_train, ext_valid, ext_test), split_file)
| 9,969
| 36.481203
| 104
|
py
|
spyn-repr
|
spyn-repr-master/bin/learnspn_exp.py
|
import argparse
try:
from time import perf_counter
except:
from time import time
perf_counter = time
import dataset
import numpy
from numpy.testing import assert_almost_equal
import random
import datetime
import os
import logging
from algo.learnspn import LearnSPN
from spn import NEG_INF
from spn.utils import stats_format
import pickle
MODEL_EXT = 'model'
#########################################
# creating the opt parser
parser = argparse.ArgumentParser()
parser.add_argument("dataset", type=str, nargs=1,
help='Specify a dataset name from data/ (es. nltcs)')
parser.add_argument('-k', '--n-row-clusters', type=int, nargs='?',
default=2,
help='Number of clusters to split rows into' +
' (for DPGMM it is the max num of clusters)')
parser.add_argument('-c', '--cluster-method', type=str, nargs='?',
default='GMM',
help='Cluster method to apply on rows' +
' ["GMM"|"DPGMM"|"HOEM"]')
parser.add_argument('--seed', type=int, nargs='?',
default=1337,
help='Seed for the random generator')
parser.add_argument('-o', '--output', type=str, nargs='?',
default='./exp/learnspn-b/',
help='Output dir path')
parser.add_argument('-g', '--g-factor', type=float, nargs='+',
default=[1.0],
help='The "p-value like" for G-Test on columns')
parser.add_argument('-i', '--n-iters', type=int, nargs='?',
default=100,
help='Number of iterates for the row clustering algo')
parser.add_argument('-r', '--n-restarts', type=int, nargs='?',
default=4,
help='Number of restarts for the row clustering algo' +
' (only for GMM)')
parser.add_argument('-p', '--cluster-penalty', type=float, nargs='+',
default=[1.0],
help='Penalty for the cluster number' +
' (i.e. alpha in DPGMM and rho in HOEM, not used in GMM)')
parser.add_argument('-s', '--sklearn-args', type=str, nargs='?',
help='Additional sklearn parameters in the for of a list' +
' "[name1=val1,..,namek=valk]"')
parser.add_argument('-m', '--min-inst-slice', type=int, nargs='+',
default=[50],
help='Min number of instances in a slice to split by cols')
parser.add_argument('-a', '--alpha', type=float, nargs='+',
default=[0.1],
help='Smoothing factor for leaf probability estimation')
parser.add_argument('--clt-leaves', action='store_true',
help='Whether to use Chow-Liu trees as leaves')
parser.add_argument('--kde-leaves', action='store_true',
help='Whether to use kernel density estimations as leaves')
parser.add_argument('--save-model', action='store_true',
help='Whether to store the model file as a pickle file')
parser.add_argument('-v', '--verbose', type=int, nargs='?',
default=1,
help='Verbosity level')
#
# parsing the args
args = parser.parse_args()
if args.clt_leaves and args.kde_leaves:
raise ValueError('Cannot use both CLT and KDE leaves!')
#
# setting verbosity level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbose == 2:
logging.basicConfig(level=logging.DEBUG)
logging.info("Starting with arguments:\n%s", args)
# I shall print here all the stats
#
# gathering parameters
alphas = args.alpha
min_inst_slices = args.min_inst_slice
g_factors = args.g_factor
cluster_penalties = args.cluster_penalty
cltree_leaves = args.clt_leaves
kde_leaves = args.kde_leaves
sklearn_args = None
if args.sklearn_args is not None:
sklearn_key_value_pairs = args.sklearn_args.translate(
{ord('['): '', ord(']'): ''}).split(',')
sklearn_args = {key.strip(): value.strip() for key, value in
[pair.strip().split('=')
for pair in sklearn_key_value_pairs]}
else:
sklearn_args = {}
logging.info(sklearn_args)
# initing the random generators
seed = args.seed
MAX_RAND_SEED = 99999999 # sys.maxsize
rand_gen = random.Random(seed)
numpy_rand_gen = numpy.random.RandomState(seed)
#
# elaborating the dataset
#
logging.info('Loading datasets: %s', args.dataset)
(dataset_name,) = args.dataset
train, valid, test = dataset.load_train_val_test_csvs(dataset_name)
n_instances = train.shape[0]
n_test_instances = test.shape[0]
#
# estimating the frequencies for the features
logging.info('Estimating features on training set...')
freqs, features = dataset.data_2_freqs(train)
#
# Opening the file for test prediction
#
logging.info('Opening log file...')
date_string = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
out_path = args.output + dataset_name + '_' + date_string
out_log_path = out_path + '/exp.log'
test_lls_path = out_path + '/test.lls'
#
# creating dir if non-existant
if not os.path.exists(os.path.dirname(out_log_path)):
os.makedirs(os.path.dirname(out_log_path))
best_valid_avg_ll = NEG_INF
best_state = {}
best_test_lls = None
preamble = ("""g-factor:\tclu-pen:\tmin-ins:\talpha:\tn_edges:""" +
"""\tn_levels:\tn_weights:\tn_leaves:""" +
"""\ttrain_ll\tvalid_ll:\ttest_ll\n""")
with open(out_log_path, 'w') as out_log:
out_log.write("parameters:\n{0}\n\n".format(args))
out_log.write(preamble)
out_log.flush()
#
# looping over all parameters combinations
for g_factor in g_factors:
for cluster_penalty in cluster_penalties:
for min_inst_slice in min_inst_slices:
#
# Creating the structure learner
learner = LearnSPN(g_factor=g_factor,
min_instances_slice=min_inst_slice,
# alpha=alpha,
row_cluster_method=args.cluster_method,
cluster_penalty=cluster_penalty,
n_cluster_splits=args.n_row_clusters,
n_iters=args.n_iters,
n_restarts=args.n_restarts,
sklearn_args=sklearn_args,
cltree_leaves=cltree_leaves,
kde_leaves=kde_leaves,
rand_gen=numpy_rand_gen)
learn_start_t = perf_counter()
#
# build an spn on the training set
spn = learner.fit_structure(data=train,
feature_sizes=features)
# spn = learner.fit_structure_bagging(data=train,
# feature_sizes=features,
# n_components=10)
learn_end_t = perf_counter()
logging.info('Structure learned in {} secs'.format(learn_end_t -
learn_start_t))
#
# print(spn)
#
# gathering statistics
n_edges = spn.n_edges()
n_levels = spn.n_layers()
n_weights = spn.n_weights()
n_leaves = spn.n_leaves()
#
# saving just in case
if args.save_model:
prefix_str = stats_format([g_factor,
cluster_penalty,
min_inst_slice,
learner._alpha],
'_',
digits=5)
model_path = os.path.join(out_path,
'best.{0}.{1}'.format(dataset_name,
MODEL_EXT))
with open(model_path, 'wb') as model_file:
pickle.dump(spn, model_file)
logging.info('Dumped spn to {}'.format(model_path))
#
# smoothing can be done after the spn has been built
for alpha in alphas:
logging.info('Smoothing leaves with alpha = %f', alpha)
spn.smooth_leaves(alpha)
#
# Compute LL on training set
logging.info('Evaluating on training set')
train_ll = 0.0
for instance in train:
(pred_ll, ) = spn.eval(instance)
train_ll += pred_ll
train_avg_ll = train_ll / train.shape[0]
#
# Compute LL on validation set
logging.info('Evaluating on validation set')
valid_ll = 0.0
for instance in valid:
(pred_ll, ) = spn.eval(instance)
valid_ll += pred_ll
valid_avg_ll = valid_ll / valid.shape[0]
#
# Compute LL on test set
test_lls = numpy.zeros(test.shape[0])
logging.info('Evaluating on test set')
test_ll = 0.0
for i, instance in enumerate(test):
(pred_ll, ) = spn.eval(instance)
test_ll += pred_ll
test_lls[i] = pred_ll
test_avg_ll = test_ll / test.shape[0]
#
# updating best stats according to valid ll
if valid_avg_ll > best_valid_avg_ll:
best_valid_avg_ll = valid_avg_ll
best_state['alpha'] = alpha
best_state['min-inst-slice'] = min_inst_slice
best_state['g-factor'] = g_factor
best_state['cluster-penalty'] = cluster_penalty
best_state['train_ll'] = train_avg_ll
best_state['valid_ll'] = valid_avg_ll
best_state['test_ll'] = test_avg_ll
best_test_lls = test_lls
#
# This now overwrites the old best model
if args.save_model:
prefix_str = stats_format([g_factor,
cluster_penalty,
min_inst_slice,
alpha],
'_',
digits=5)
model_path = os.path.join(out_path,
'best.{0}.{1}'.format(dataset_name,
MODEL_EXT))
with open(model_path, 'wb') as model_file:
pickle.dump(spn, model_file)
logging.info('Dumped spn to {}'.format(model_path))
#
# writing to file a line for the grid
stats = stats_format([g_factor,
cluster_penalty,
min_inst_slice,
alpha,
n_edges, n_levels,
n_weights, n_leaves,
train_avg_ll,
valid_avg_ll,
test_avg_ll],
'\t',
digits=5)
out_log.write(stats + '\n')
out_log.flush()
#
# writing as last line the best params
out_log.write("{0}".format(best_state))
out_log.flush()
#
# saving the best test_lls
assert_almost_equal(best_state['test_ll'], best_test_lls.mean())
numpy.savetxt(test_lls_path, best_test_lls, delimiter='\n')
logging.info('Grid search ended.')
logging.info('Best params:\n\t%s', best_state)
| 12,700
| 36.688427
| 89
|
py
|
spyn-repr
|
spyn-repr-master/tests/test_dataset.py
|
import dataset
import numpy
def test_sampling():
# loading nltcs
print('Loading datasets')
train, valid, test = dataset.load_train_val_test_csvs('nltcs')
# checking for their shape
n_instances = train.shape[0]
n_test_instances = test.shape[0]
n_valid_instances = valid.shape[0]
nltcs_train = 16181
nltcs_valid = 2157
nltcs_test = 3236
print('Training set with {0} instances\n'.format(n_instances) +
'Validation set with {0} instances\n'.format(n_valid_instances) +
'Test set with {0} instances'.format(n_test_instances))
assert n_instances == nltcs_train
assert n_valid_instances == nltcs_valid
assert n_test_instances == nltcs_test
# random sampling
perc = 0.1
sample_train, sample_valid, sample_test = \
dataset.sample_sets((train, valid, test), perc)
n_s_instances = sample_train.shape[0]
n_s_valid_instances = sample_valid.shape[0]
n_s_test_instances = sample_test.shape[0]
print('Sampled training set with {0} instances\n'
.format(n_s_instances) +
'Sampled validation set with {0} instances\n'
.format(n_s_valid_instances) +
'Sampled test set with {0} instances'
.format(n_s_test_instances))
assert n_s_instances == int(nltcs_train * perc)
assert n_s_valid_instances == int(nltcs_valid * perc)
assert n_s_test_instances == int(nltcs_test * perc)
def test_bootstrap_sampling():
# loading nltcs
synth_data = numpy.array([[1, 1, 1],
[1, 1, 0],
[1, 0, 0],
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 1]])
perc = 1.0
replace = True
sampled_data_1 = dataset.sample_instances(synth_data,
perc=perc,
replace=replace)
sampled_data_2 = dataset.sample_instances(synth_data,
perc=perc,
replace=replace)
sampled_data_3 = dataset.sample_instances(synth_data,
perc=perc,
replace=replace)
print('first sample:\n', sampled_data_1)
print('second sample:\n', sampled_data_2)
print('third sample:\n', sampled_data_3)
def test_cluster_freqs():
data = numpy.array([[0, 1, 1, 0],
[1, 2, 0, 0],
[1, 0, 1, 0],
[0, 0, 2, 0],
[0, 1, 3, 0],
[1, 1, 1, 0]])
n_clusters = 3
freqs, features = dataset.data_clust_freqs(data,
n_clusters)
print('frequencies, features',
freqs, features)
def test_merge_datasets():
#
# a loop to convert them all
for dataset_name in dataset.DATASET_NAMES:
dataset.merge_datasets(dataset_name)
| 3,136
| 31.010204
| 75
|
py
|
spyn-repr
|
spyn-repr-master/cltree/probs.py
|
import numpy
import numba
@numba.jit
def scope_union(factor_scope_1, factor_scope_2):
"""
A factor scope is a numpy boolean array
"""
return factor_scope_1 + factor_scope_2
#@numba.njit
def factor_length(factor_scope, feature_vals):
"""
WRITEME
"""
f_scope = feature_vals[factor_scope]
f_length = 0
if f_scope.shape[0] > 0:
f_length = f_scope.prod()
return f_length
# @numba.jit
def n_factor_features(factor_scope):
"""
WRITEME
"""
return factor_scope.sum()
@numba.njit
def numba_cumsum(array):
"""
Up to version 0.19 numba misses numpy's cumsum op.
Thus we implement it by hand
"""
for i in range(1, array.shape[0]):
array[i] += array[i - 1]
#@numba.njit
def compute_factor_stride(feature_vals,
factor_scope,
prod_scope,
factor_stride):
"""
WRITEME
"""
n_features = feature_vals.shape[0]
#
# copying in stride the feature vals for the vars in scope
for i in range(n_features):
if factor_scope[i]:
factor_stride[i] = feature_vals[i] - 1
#
# computing the strides
numba_cumsum(factor_stride)
#
# masking with the prod scope
factor_stride[~factor_scope] = 0
return factor_stride[prod_scope]
@numba.njit
def compute_factor_product(factor_1,
factor_2,
prod_factor,
assignment,
feature_vals,
factor_stride_1,
factor_stride_2):
"""
WRITEME
"""
j = 0
k = 0
factor_length = prod_factor.shape[0]
n_features = assignment.shape[0]
for i in range(factor_length):
#
# operating in the log domain
prod_factor[i] = factor_1[j] + factor_2[k]
for l in range(n_features):
assignment[l] += 1
if assignment[l] == feature_vals[l]:
assignment[l] = 0
l_feature_val = feature_vals[l] - 1
j -= (l_feature_val * factor_stride_1[l])
k -= (l_feature_val * factor_stride_2[l])
else:
j += factor_stride_1[l]
k += factor_stride_2[l]
break
return prod_factor
@numba.jit
def factor_product(factor_1,
factor_2,
factor_scope_1,
factor_scope_2,
feature_vals):
"""
WRITEME
"""
n_features = feature_vals.shape[0]
#
# getting the scope
prod_factor_scope = scope_union(factor_scope_1, factor_scope_2)
#
# preallocating
prod_factor_length = factor_length(prod_factor_scope, feature_vals)
prod_factor = numpy.zeros(prod_factor_length, dtype=factor_1.dtype)
n_prod_features = n_factor_features(prod_factor_scope)
assignment = numpy.zeros(n_prod_features, dtype=factor_1.dtype)
#
# computing strides
factor_stride_1 = numpy.zeros(n_features, dtype=numpy.uint16)
factor_stride_2 = numpy.zeros(n_features, dtype=numpy.uint16)
factor_stride_1 = compute_factor_stride(feature_vals,
factor_scope_1,
prod_factor_scope,
factor_stride_1)
factor_stride_2 = compute_factor_stride(feature_vals,
factor_scope_2,
prod_factor_scope,
factor_stride_2)
#
# computing the actual product
prod_feature_vals = feature_vals[prod_factor_scope]
prod_factor = compute_factor_product(factor_1,
factor_2,
prod_factor,
assignment,
prod_feature_vals,
factor_stride_1,
factor_stride_2)
return prod_factor, prod_factor_scope
| 4,185
| 26.539474
| 71
|
py
|
spyn-repr
|
spyn-repr-master/cltree/utils.py
|
import graphviz
from cltree.cltree import CLTree
def add_nodes(graph, nodes):
"""
"""
for n in nodes:
if isinstance(n, tuple):
graph.node(n[0], **n[1])
else:
graph.node(n)
return graph
def add_edges(graph, edges):
for e in edges:
if isinstance(e[0], tuple):
graph.edge(*e[0], **e[1])
else:
graph.edge(*e)
return graph
"""
styles = {
'graph': {
'label': 'A Fancy Graph',
'fontsize': '16',
'fontcolor': 'white',
'bgcolor': '#333333',
'rankdir': 'BT',
},
'nodes': {
'fontname': 'Helvetica',
'shape': 'hexagon',
'fontcolor': 'white',
'color': 'white',
'style': 'filled',
'fillcolor': '#006699',
},
'edges': {
'style': 'dashed',
'color': 'white',
'arrowhead': 'open',
'fontname': 'Courier',
'fontsize': '12',
'fontcolor': 'white',
}
}
"""
def apply_styles(graph, styles):
graph.graph_attr.update(
('graph' in styles and styles['graph']) or {}
)
graph.node_attr.update(
('nodes' in styles and styles['nodes']) or {}
)
graph.edge_attr.update(
('edges' in styles and styles['edges']) or {}
)
return graph
def cltree_2_dot(cltree,
format='svg',
styles=None,
output='cltree',
prefix='clt_',
cluster=None):
"""
WRITEME
"""
#
# create the graph
if cluster is not None:
graph = graphviz.Digraph(format=format,
name=cluster)
else:
graph = graphviz.Digraph(format=format)
#
# getting a prefix for the nodes
node_prefix = prefix
for node in map(str, cltree.features):
graph.node(node_prefix + node, label=node)
n_features = len(cltree.features)
# print('CLTree', cltree._tree)
#
# adding parent->children edges
for child_id, parent_id in zip(range(1, n_features),
cltree._tree[1:]):
graph.edge(node_prefix + str(cltree.features[parent_id]),
node_prefix + str(cltree.features[child_id]))
#
# applying styles
if styles is not None:
apply_styles(graph, styles)
#
# optionally saving in the specified format
if output is not None:
graph.render(output)
return graph
| 2,592
| 21.745614
| 65
|
py
|
spyn-repr
|
spyn-repr-master/cltree/cltree.py
|
import numpy
import numba
import scipy.sparse
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy.sparse.csgraph import depth_first_order
from spn import LOG_ZERO
@numba.njit
def safe_log(x):
"""
Assuming x to be a scalar
"""
if x > 0.0:
return numpy.log(x)
else:
return LOG_ZERO
##
# TODO: generalize to cat vars
@numba.njit
def compute_mutual_information(feature_vals,
log_probs,
log_joint_probs,
m_i_table):
"""
WRITEME
"""
n_features = feature_vals.shape[0]
for i in range(n_features):
for j in range(i + 1, n_features):
# if i != j:
for val_i in range(feature_vals[i]):
for val_j in range(feature_vals[j]):
log_joint_i_j = log_joint_probs[i, j, val_i, val_j]
m_i_table[i, j] = (m_i_table[i, j] +
numpy.exp(log_joint_i_j) *
(log_joint_i_j -
log_probs[i, val_i] -
log_probs[j, val_j]))
m_i_table[j, i] = m_i_table[i, j]
return m_i_table
#
# TODO: generalize from binary to categorical vars
@numba.njit
def compute_log_probs(freqs,
probs,
log_probs,
n_instances,
alpha=0.0):
"""
WRITEME
"""
n_features = freqs.shape[0]
#
# smoothing, if needed
# probs = (freqs + 2 * alpha) / (n_instances + 4 * alpha)
for i in range(n_features):
#
# smoothing if needed
probs[i] = (freqs[i] + 2 * alpha) / (n_instances + 4 * alpha)
#
# going to logs
log_probs[i, 0] = safe_log(1 - probs[i])
log_probs[i, 1] = safe_log(probs[i])
return log_probs
@numba.njit
def compute_joint_bin_freqs(joint_freqs,
co_occs,
n_instances):
"""
Assuming features to be binary
TODO: generalize this to categorical vars
"""
n_features = co_occs.shape[0]
for i in range(n_features):
for j in range(i + 1, n_features):
joint_freqs[i, j, 1, 1] = co_occs[i, j]
joint_freqs[i, j, 0, 1] = co_occs[j, j] - co_occs[i, j]
joint_freqs[i, j, 1, 0] = co_occs[i, i] - co_occs[i, j]
joint_freqs[i, j, 0, 0] = (n_instances -
joint_freqs[i, j, 1, 1] -
joint_freqs[i, j, 0, 1] -
joint_freqs[i, j, 1, 0])
# saving for symmetry
joint_freqs[j, i, 1, 1] = joint_freqs[i, j, 1, 1]
joint_freqs[j, i, 0, 1] = joint_freqs[i, j, 1, 0]
joint_freqs[j, i, 1, 0] = joint_freqs[i, j, 0, 1]
joint_freqs[j, i, 0, 0] = joint_freqs[i, j, 0, 0]
return joint_freqs
@numba.njit
def compute_log_joint_bin_probs(joint_freqs,
log_joint_probs,
n_instances,
alpha=0.0):
"""
Assuming features to be binary
TODO: generalize this to categorical vars
"""
n_features = joint_freqs.shape[0]
for i in range(n_features):
for j in range(i + 1, n_features):
log_joint_probs[i, j, 1, 1] = \
safe_log((joint_freqs[i, j, 1, 1] + alpha) /
(n_instances + 4.0 * alpha))
log_joint_probs[i, j, 0, 1] = \
safe_log((joint_freqs[i, j, 0, 1] + alpha) /
(n_instances + 4.0 * alpha))
log_joint_probs[i, j, 1, 0] = \
safe_log((joint_freqs[i, j, 1, 0] + alpha) /
(n_instances + 4.0 * alpha))
log_joint_probs[i, j, 0, 0] = \
safe_log((joint_freqs[i, j, 0, 0] + alpha) /
(n_instances + 4.0 * alpha))
# saving for symmetry
log_joint_probs[j, i, 1, 1] = log_joint_probs[i, j, 1, 1]
log_joint_probs[j, i, 0, 1] = log_joint_probs[i, j, 1, 0]
log_joint_probs[j, i, 1, 0] = log_joint_probs[i, j, 0, 1]
log_joint_probs[j, i, 0, 0] = log_joint_probs[i, j, 0, 0]
return log_joint_probs
@numba.njit
def compute_log_cond_probs(feature_vals,
log_probs,
log_joint_probs,
log_cond_probs):
"""
FIXME:
when both log_probs are LOG_ZERO then result is 0->prob of 1
plus, the safe_log function shall be used once only, at the end
of all computations
"""
n_features = feature_vals.shape[0]
for i in range(n_features):
for j in range(n_features):
if i != j:
for val_i in range(feature_vals[i]):
for val_j in range(feature_vals[j]):
log_cond_probs[i, j, val_i, val_j] = \
(log_joint_probs[i, j, val_i, val_j] -
log_probs[j, val_j])
return log_cond_probs
@numba.njit
def compute_log_factors(tree,
feature_vals,
log_probs,
log_joint_probs,
log_factors):
"""
FIXME:
when both log_probs are LOG_ZERO then result is 0->prob of 1
plus, the safe_log function shall be used once only, at the end
of all computations
This shall compute only the conditioned factors
"""
n_features = feature_vals.shape[0]
#
# for the root we have a redundant representation
log_factors[0, 0, 0] = log_probs[0, 0]
log_factors[0, 0, 1] = log_probs[0, 0]
log_factors[0, 1, 0] = log_probs[0, 1]
log_factors[0, 1, 1] = log_probs[0, 1]
#
# for the rest
for feature_id, parent_id in zip(range(1, n_features), tree[1:]):
for feature_val in range(feature_vals[feature_id]):
for parent_val in range(feature_vals[parent_id]):
log_factors[feature_id, feature_val, parent_val] = \
(log_joint_probs[feature_id, parent_id, feature_val, parent_val] -
log_probs[parent_id, parent_val])
return log_factors
from spn import MARG_IND
def tree_2_factor_matrix(tree,
factors):
"""
factors = n_features x n_factors
(n_features x n_features for trees)
"""
n_features = factors.shape[0]
print(factors.shape)
#
# setting the root node
factors[0, 0] = True
for i, feature in zip(range(1, n_features), tree[1:]):
factors[i, i] = True
factors[feature, i] = True
return factors
def instantiate_factors(tree,
feature_vals,
evidence,
factors,
ev_factors):
"""
ev_factors = factors
"""
n_features = tree.shape[0]
#
# evidence for root
if evidence[0] != MARG_IND:
ev_factors[0] = factors[0][evidence[0]]
for i, parent in zip(range(1, n_features), tree[1:]):
if evidence[i] != MARG_IND:
child_evidence = [evidence[i]]
else:
child_evidence = list(range(feature_vals[i]))
if evidence[parent] != MARG_IND:
parent_evidence = [evidence[parent]]
else:
parent_evidence = list(range(feature_vals[parent]))
ev_factors[i] = factors[i][child_evidence, parent_evidence]
return ev_factors
def marginalize(features,
feature_vals,
factors,
factor_matrix,
tree,
evidence):
"""
ex: evidence = [0, 1, MARG_IND, 0, MARG_IND]
ordering = [False, True, False] the second elem is a leaf
"""
n_features = len(tree)
#
# getting vars to marginalize over
# marg_vars = ()
print(evidence == MARG_IND)
print(features[evidence == MARG_IND])
sum_out_features = features[evidence == MARG_IND]
print(sum_out_features)
log_prob = 0.0
remaining_factors = numpy.ones(n_features, dtype=bool)
#
# for each feature to marginalize
for m_feature in sum_out_features:
print('Consider feature', m_feature)
#
#
sum_prob = 0.0
for val in range(feature_vals[m_feature]):
print('Sum over', m_feature, val)
#
# getting the factors it appears into
log_prod_prob = log_prob
factor_list = factor_matrix[m_feature]
for f in features[factor_list]:
factor = factors[f]
print('Considering Factor', f)
if factor.shape[0] > 1:
log_prod_prob += factor[val]
else:
log_prod_prob += factor[0]
print('log_prod_prob', log_prod_prob)
#
# erasing the factor from consideration
remaining_factors[f] = False
sum_prob += numpy.exp(log_prod_prob)
print('sum prob', sum_prob)
factor_matrix[:, ~remaining_factors] = False
#
#
log_prob = numpy.log(sum_prob)
print('log prob', log_prob)
#
# for the remaining factors (they shall be instantiated)
print('Remaining Factors', remaining_factors)
for f in features[remaining_factors]:
factor = factors[f]
log_prob += factor[0]
return log_prob
# def minimum_spanning_tree(X, copy=True):
# """
# X are edge weights of fully connected graph
# """
# if copy:
# X = X.copy()
# if X.shape[0] != X.shape[1]:
# raise ValueError("X needs to be square matrix of edge weights")
# n_vertices = X.shape[0]
# spanning_edges = []
# # initialize with node 0:
# visited_vertices = [0]
# num_visited = 1
# # exclude self connections:
# diag_indices = numpy.arange(n_vertices)
# X[diag_indices, diag_indices] = numpy.inf
# while num_visited != n_vertices:
# new_edge = numpy.argmin(X[visited_vertices], axis=None)
# # 2d encoding of new_edge from flat, get correct indices
# new_edge = divmod(new_edge, n_vertices)
# # print('new_edge', new_edge)
# # print(visited_vertices[new_edge[0]])
# new_edge = [visited_vertices[new_edge[0]], new_edge[1]]
# # add edge to tree
# spanning_edges.append(new_edge)
# visited_vertices.append(new_edge[1])
# # remove all edges inside current tree
# X[visited_vertices, new_edge[1]] = numpy.inf
# X[new_edge[1], visited_vertices] = numpy.inf
# num_visited += 1
# return numpy.vstack(spanning_edges)
# @numba.jit
# def minimum_spanning_tree_numba(X,
# visited_vertices,
# spanning_edges,
# diag_indices):
# """
# X are edge weights of fully connected graph
# """
# # if X.shape[0] != X.shape[1]:
# # raise ValueError("X needs to be square matrix of edge weights")
# n_vertices = X.shape[0]
# # spanning_edges = []
# # initialize with node 0:
# visited_vertices[0] = True
# num_visited = 1
# # exclude self connections:
# # diag_indices = numpy.arange(n_vertices)
# # X[diag_indices, diag_indices] = numpy.inf
# for i in range(n_vertices):
# X[i, i] = numpy.inf
# while num_visited != n_vertices:
# new_edge = numpy.argmin(X[visited_vertices])
# # 2d encoding of new_edge from flat, get correct indices
# # new_edge = divmod(new_edge, n_vertices)
# new_edge_0 = new_edge // n_vertices
# new_edge_1 = new_edge % n_vertices
# # print('new_edge', (new_edge_0, new_edge_1))
# # print(visited_vertices.nonzero()[0][new_edge_0])
# new_edge_0 = visited_vertices.nonzero()[0][new_edge_0]
# # add edge to tree
# # spanning_edges.append(new_edge)
# spanning_edges[num_visited - 1, 0] = new_edge_0
# spanning_edges[num_visited - 1, 1] = new_edge_1
# # visited_vertices.append(new_edge[1])
# visited_vertices[new_edge_1] = True
# # remove all edges inside current tree
# X[visited_vertices, new_edge_1] = numpy.inf
# X[new_edge_1, visited_vertices] = numpy.inf
# num_visited += 1
# return numpy.vstack(spanning_edges)
@numba.njit
def eval_instance(instance,
features,
tree,
log_marg_probs,
log_cond_probs):
"""
WRITEME
"""
ll = log_marg_probs[0, instance[features[0]]]
for feature_id in range(1, tree.shape[0]):
feature = features[feature_id]
parent_id = tree[feature_id]
ll += log_cond_probs[feature_id, parent_id,
instance[feature],
instance[features[parent_id]]]
return ll
@numba.njit
def eval_instance_fact(instance,
features,
tree,
factors):
"""
WRITEME
"""
ll = 0.0
for feature_id in range(tree.shape[0]):
feature = features[feature_id]
parent_id = tree[feature_id]
ll += factors[feature_id,
instance[feature],
instance[features[parent_id]]]
return ll
class CLTree:
"""
A class for modeling a Chow-Liu tree
"""
def __init__(self,
data,
features=None,
factors=None,
tree=None,
n_feature_vals=2,
feature_vals=None,
alpha=0.1,
sparse=True,
mem_free=True):
"""
WRITEME
"""
#
# learning it from data
if data is not None:
self._learn_from_data(data,
features,
n_feature_vals,
feature_vals,
alpha,
sparse,
mem_free)
elif (features is not None and
feature_vals is not None and
tree is not None and
factors is not None):
self._build_from_factors(features,
feature_vals,
tree,
factors)
else:
raise ValueError('Invalid CL Tree initialization')
def _learn_from_data(self,
data,
features=None,
n_feature_vals=2,
feature_vals=None,
alpha=0.1,
sparse=True,
mem_free=True):
"""
Chow and Liu learning algorithm
"""
#
# this trick helps for sparse matrices
# TODO: check if this cond is needed or the sparse dot is equal to
# the dense one performance-wise
if sparse:
self._data = scipy.sparse.csr_matrix(data)
else:
self._data = data
self._alpha = alpha
self._n_features = data.shape[1]
self._n_instances = data.shape[0]
self.features = features
#
# assuming homogeneous features this could be restrictive
# TODO: extend the whole code to categorical non homogeneous features
self._feature_vals = feature_vals
if self._feature_vals is None:
self._feature_vals = numpy.array([n_feature_vals
for i in range(self._n_features)])
#
# getting the max to pre-allocate the memory
# TODO: generalize
self._n_feature_vals = n_feature_vals
if self._n_feature_vals is None:
self._n_feature_vals = max(self._feature_vals)
if self.features is None:
self.features = numpy.array([i for i in range(self._n_features)])
#
# pre-allocating arrays for freqs and probs
# self._marg_freqs = numpy.zeros(self._n_features)
self._joint_freqs = numpy.zeros((self._n_features,
self._n_features,
self._n_feature_vals,
self._n_feature_vals))
self._log_marg_probs = numpy.zeros((self._n_features,
self._n_feature_vals))
self._log_joint_probs = numpy.zeros((self._n_features,
self._n_features,
self._n_feature_vals,
self._n_feature_vals))
self._log_cond_probs = numpy.zeros((self._n_features,
self._n_features,
self._n_feature_vals,
self._n_feature_vals))
self._mutual_info = numpy.zeros((self._n_features,
self._n_features))
#
# computing freqs and probs (and smoothing)
co_occ_matrix = self._data.T.dot(self._data)
#
# marginal frequencies
if sparse:
co_occ_matrix = numpy.array(co_occ_matrix.todense())
self._marg_freqs = co_occ_matrix.diagonal()
else:
self._marg_freqs = co_occ_matrix.diagonal()
self._log_marg_probs = self.log_marg_probs(self._marg_freqs,
self._log_marg_probs)
#
# joint estimation
self._joint_freqs = self.joint_freqs(self._joint_freqs,
co_occ_matrix)
self._log_joint_probs = self.log_joint_probs(self._joint_freqs,
self._log_joint_probs)
#
# conditional estimation
self._log_cond_probs = self.log_cond_probs(self._log_marg_probs,
self._log_joint_probs,
self._log_cond_probs)
self._mutual_info = self.mutual_information(self._log_marg_probs,
self._log_joint_probs,
self._mutual_info)
#
# computing the MST (this way we are not overwriting mutual_info)
# this can be useful for testing but not for efficiency
# mst = minimum_spanning_tree(-self._mutual_info, copy=copy_mi)
mst = minimum_spanning_tree(-(self._mutual_info + 1))
dfs_tree = depth_first_order(mst, directed=False, i_start=0)
#
# representing the CLTree as a sequence of parents ids
self._tree = numpy.zeros(self._n_features, dtype=int)
# self._tree[0] = -1
# the root is its parent
self._tree[0] = 0
for feature in range(1, self._n_features):
self._tree[feature] = dfs_tree[1][feature]
#
# computing the factored represetation
self._factors = numpy.zeros((self._n_features,
self._n_feature_vals,
self._n_feature_vals))
self._factors = self.log_factors(self._log_marg_probs,
self._log_joint_probs,
self._factors)
#
# removing references,this is optional for test purposes
if mem_free:
self._mutual_info = None
self._joint_freqs = None
self._log_marg_probs = None
self._log_joint_probs = None
self._log_cond_probs = None
self._marg_freqs = None
self._data = None
def _build_from_factors(self,
features,
feature_vals,
tree,
factors):
"""
Building a node from (already learned) info
about the features, the tree representation and
the factors values
"""
#
# just storing information
self._features = features
self._n_features = len(features)
self._feature_vals = feature_vals
self._tree = tree
self._factors = factors
#
#
# numba ops wrappers
def joint_freqs(self, joint_freqs, co_occs):
"""
WRITEME
"""
return compute_joint_bin_freqs(joint_freqs,
co_occs,
self._n_instances)
def log_marg_probs(self, freqs, log_probs):
"""
WRITEME
"""
probs = numpy.zeros(freqs.shape[0])
return compute_log_probs(freqs,
probs,
log_probs,
self._n_instances,
self._alpha)
def log_joint_probs(self, joint_freqs, log_joint_probs):
"""
WRITEME
"""
return compute_log_joint_bin_probs(joint_freqs,
log_joint_probs,
self._n_instances,
self._alpha)
def log_cond_probs(self,
log_marg_probs,
log_joint_probs,
log_cond_probs):
"""
WRITEME
"""
return compute_log_cond_probs(self._feature_vals,
log_marg_probs,
log_joint_probs,
log_cond_probs)
def log_factors(self,
log_probs,
log_joint_probs,
log_factors):
"""
WRITEME
"""
return compute_log_factors(self._tree,
self._feature_vals,
log_probs,
log_joint_probs,
log_factors)
def mutual_information(self,
log_marg_probs,
log_joint_probs,
mutual_info):
"""
WRITEME
"""
return compute_mutual_information(self._feature_vals,
log_marg_probs,
log_joint_probs,
mutual_info)
# def smooth(self, alpha):
# """
# Recomputing logs with different smooths
# This may be totally useless
# """
# #
# # saving alpha since wrapper are using the local value
# self._alpha = alpha
# #
# # recompute and smooth (is this too side-effect prone?)
# self._log_marg_probs = self.log_marg_probs(self._marg_freqs,
# self._log_marg_probs)
# self._log_joint_probs = self.log_joint_probs(self._joint_freqs,
# self._log_joint_probs)
# self._log_cond_probs = self.log_cond_probs(self._log_marg_probs,
# self._log_joint_probs,
# self._log_cond_probs)
def eval(self, instance):
"""
WRITEME
"""
return eval_instance(instance,
self.features,
self._tree,
self._log_marg_probs,
self._log_cond_probs)
def eval_fact(self, instance):
"""
WRITEME
"""
return eval_instance_fact(instance,
self.features,
self._tree,
self._factors)
def __repr__(self):
"""
WRITEME
"""
return ('CLTree on features {0}:\n\t{1}'.format(self.features,
self._tree))
def tree_repr(self):
"""
Internal representation of the tree
"""
return ("{0}".format(self._tree))
def factors_repr(self):
"""
String representation for internal factors
TODO: this assumes vars are binary
"""
return ("{0}".format(self._factors[0, :]))
| 25,021
| 31.923684
| 86
|
py
|
spyn-repr
|
spyn-repr-master/cltree/__init__.py
| 0
| 0
| 0
|
py
|
|
spyn-repr
|
spyn-repr-master/cltree/tests/test_probs.py
|
import numpy
from cltree.probs import numba_cumsum
from cltree.probs import scope_union
from cltree.probs import compute_factor_stride
from cltree.probs import n_factor_features
from cltree.probs import compute_factor_product
from cltree.probs import factor_product
from cltree.probs import factor_length
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
def test_numba_cumsum():
array = numpy.array([1, 1, 2, 6, 0, 1])
array_cum_sum = array.cumsum()
numba_cumsum(array)
assert_array_equal(array_cum_sum, array)
def test_scope_union():
n = 10
scope_1 = numpy.zeros(n, dtype=bool)
scope_1[:n // 2] = True
scope_2 = numpy.zeros(n, dtype=bool)
scope_2[n // 2:] = True
full_scope = numpy.ones(n, dtype=bool)
assert_array_equal(full_scope, scope_union(scope_1, scope_2))
print(scope_1, scope_2)
scope_1[- 1] = True
scope_2[0] = True
assert_array_equal(full_scope, scope_union(scope_1, scope_2))
def test_compute_factor_length():
n_features = 10
feature_vals = numpy.random.choice(numpy.arange(n_features), n_features)
#
# a factor of scope 0 shall have length zero
factor_scope_zero = numpy.zeros(n_features, dtype=bool)
factor_size = factor_length(factor_scope_zero, feature_vals)
# print(factor_size, feature_vals, factor_scope_zero)
assert factor_size == 0
#
# if all are assigned
factor_scope_all = numpy.ones(n_features, dtype=bool)
factor_size = factor_length(factor_scope_all, feature_vals)
# print(factor_size)
assert factor_size == feature_vals.prod()
#
# random allocation
def test_compute_factor_stride():
n_features = 6
feature_vals = numpy.array([2, 2, 2, 3, 2, 3])
factor_scope = numpy.array([1, 0, 1, 1, 0, 0], dtype=bool)
prod_scope = numpy.array([1, 1, 1, 1, 0, 1], dtype=bool)
print(prod_scope)
n_prod_features = n_factor_features(prod_scope)
factor_stride = numpy.zeros(n_features, dtype=int)
factor_stride = compute_factor_stride(feature_vals,
factor_scope,
prod_scope,
factor_stride)
print(factor_stride)
assert factor_stride.shape[0] == n_prod_features
prod_stride = numpy.array([1, 0, 2, 4, 0])
assert_array_equal(prod_stride, factor_stride)
def test_compute_factor_product():
factor_1 = numpy.log(numpy.array([0.5, 0.5, 0.2, 0.8]))
factor_2 = numpy.log(numpy.array([0.7, 0.3]))
prod_factor = numpy.zeros(4)
assignment = numpy.zeros(2)
feature_vals = numpy.array([2, 2])
factor_stride_1 = numpy.array([1, 2])
factor_stride_2 = numpy.array([1, 0])
prod_factor = compute_factor_product(factor_1,
factor_2,
prod_factor,
assignment,
feature_vals,
factor_stride_1,
factor_stride_2)
print(prod_factor)
prob_prod_factor = numpy.log(numpy.array([0.35, 0.15, 0.14, 0.24]))
assert_array_almost_equal(prob_prod_factor, prod_factor)
#
# now on different features
factor_1 = numpy.log(numpy.array([0.5, 0.5, 0.2, 0.8]))
factor_2 = numpy.log(numpy.array([0.3, 0.7, 0.2, 0.8]))
prod_factor = numpy.zeros(8)
assignment = numpy.zeros(3)
feature_vals = numpy.array([2, 2, 2])
factor_stride_1 = numpy.array([1, 2, 0])
factor_stride_2 = numpy.array([0, 1, 2])
prod_factor = compute_factor_product(factor_1,
factor_2,
prod_factor,
assignment,
feature_vals,
factor_stride_1,
factor_stride_2)
print(prod_factor)
prob_prod_factor = numpy.log(numpy.array([0.5 * 0.3, 0.5 * 0.3,
0.2 * 0.7, 0.8 * 0.7,
0.5 * 0.2, 0.5 * 0.2,
0.2 * 0.8, 0.8 * 0.8]))
print(prob_prod_factor)
assert_array_almost_equal(prob_prod_factor, prod_factor)
def test_factor_product():
factor_1 = numpy.log(numpy.array([0.5, 0.5, 0.2, 0.8], dtype='float32'))
factor_2 = numpy.log(numpy.array([0.3, 0.7, 0.2, 0.8], dtype='float32'))
factor_scope_1 = numpy.array([1, 1, 0, 0], dtype=bool)
factor_scope_2 = numpy.array([0, 1, 0, 1], dtype=bool)
feature_vals = numpy.array([2, 2, 2, 2])
prod_factor, prod_factor_scope = factor_product(factor_1,
factor_2,
factor_scope_1,
factor_scope_2,
feature_vals)
print(prod_factor)
print(prod_factor_scope)
prob_prod_factor = numpy.log(numpy.array([0.5 * 0.3, 0.5 * 0.3,
0.2 * 0.7, 0.8 * 0.7,
0.5 * 0.2, 0.5 * 0.2,
0.2 * 0.8, 0.8 * 0.8]))
assert_array_almost_equal(prob_prod_factor, prod_factor)
| 5,481
| 35.791946
| 76
|
py
|
spyn-repr
|
spyn-repr-master/cltree/tests/test_cltree.py
|
from spn import LOG_ZERO
import numpy
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
# from cltree.cltree import minimum_spanning_tree
# from cltree.cltree import minimum_spanning_tree_numba
from cltree.cltree import CLTree
from cltree.cltree import instantiate_factors
from cltree.cltree import tree_2_factor_matrix
from cltree.cltree import marginalize
try:
from time import perf_counter
except:
from time import time as perf_counter
data = numpy.array([[1, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0],
[1, 0, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0]])
n_features = 4
n_instances = 6
copy_mi = True
def test_compute_probs_sparse():
#
# creating the tree upon synthetic data
clt = CLTree(data=data,
alpha=0.0,
sparse=True,
mem_free=False)
counts_1 = data.sum(axis=0)
counts_0 = n_instances - counts_1
counts = numpy.column_stack([counts_0, counts_1])
probs = counts / n_instances
print('Computed marg freqs and probs', counts, probs)
log_probs = numpy.log(probs)
log_probs[numpy.isinf(log_probs)] = LOG_ZERO
print ('Computed marg logs for alpha=0', log_probs)
print ('CLT marg logs for alpha=0', clt._log_marg_probs)
assert_almost_equal(clt._log_marg_probs, log_probs)
assert_almost_equal(clt._marg_freqs, counts_1)
#
# now with another value for alpha
alpha = 1.0
clt = CLTree(data=data,
alpha=alpha,
sparse=True,
mem_free=False)
probs = (counts + 2 * alpha) / (n_instances + 4 * alpha)
print('Computed probs with alpha=', alpha, probs)
log_probs = numpy.log(probs)
log_probs[numpy.isinf(log_probs)] = LOG_ZERO
print ('Computed marg logs for alpha=', alpha, log_probs)
print ('CLT marg logs for alpha=', alpha, clt._log_marg_probs)
assert_almost_equal(clt._log_marg_probs, log_probs)
assert_almost_equal(clt._marg_freqs, counts_1)
def test_compute_probs_dense():
#
# creating the tree upon synthetic data
clt = CLTree(data=data,
alpha=0.0,
sparse=False,
mem_free=False)
counts_1 = data.sum(axis=0)
counts_0 = n_instances - counts_1
counts = numpy.column_stack([counts_0, counts_1])
probs = counts / n_instances
print('Computed marg freqs and probs', counts, probs)
log_probs = numpy.log(probs)
log_probs[numpy.isinf(log_probs)] = LOG_ZERO
print ('Computed marg logs for alpha=0', log_probs)
print ('CLT marg logs for alpha=0', clt._log_marg_probs)
assert_almost_equal(clt._log_marg_probs, log_probs)
assert_almost_equal(clt._marg_freqs, counts_1)
#
# now with another value for alpha
alpha = 1.0
clt = CLTree(data=data,
alpha=alpha,
sparse=False,
mem_free=False)
probs = (counts + 2 * alpha) / (n_instances + 4 * alpha)
print('Computed probs with alpha=', alpha, probs)
log_probs = numpy.log(probs)
log_probs[numpy.isinf(log_probs)] = LOG_ZERO
print ('Computed marg logs for alpha=', alpha, log_probs)
print ('CLT marg logs for alpha=', alpha, clt._log_marg_probs)
assert_almost_equal(clt._log_marg_probs, log_probs)
assert_almost_equal(clt._marg_freqs, counts_1)
def test_compute_joint_probs():
#
# creating the tree upon synthetic data
clt = CLTree(data=data,
alpha=0.0,
sparse=True,
mem_free=False)
joint_counts = numpy.zeros((n_features,
n_features,
2,
2))
for i in range(n_features):
for j in range(n_features):
if i != j:
for instance in data:
joint_counts[i, j, instance[i], instance[j]] += 1
print('Computed co freqs')
print(joint_counts)
print(clt._joint_freqs)
assert_almost_equal(clt._joint_freqs, joint_counts)
#
# going to logs
joint_probs = joint_counts / n_instances
log_joint_probs = numpy.log(joint_probs)
log_joint_probs[numpy.isinf(log_joint_probs)] = LOG_ZERO
#
# to have a complete match the diagonal entries are left to zero
for i in range(n_features):
log_joint_probs[i, i] = 0
assert_almost_equal(log_joint_probs, clt._log_joint_probs)
#
# dense case
clt = CLTree(data=data,
alpha=0.0,
sparse=False,
mem_free=False)
assert_almost_equal(log_joint_probs, clt._log_joint_probs)
#
# changing alpha
alpha = 1.0
joint_probs = (joint_counts + alpha) / (n_instances + 4.0 * alpha)
log_joint_probs = numpy.log(joint_probs)
log_joint_probs[numpy.isinf(log_joint_probs)] = LOG_ZERO
#
# to have a complete match the diagonal entries are left to zero
for i in range(n_features):
log_joint_probs[i, i] = 0
clt = CLTree(data=data,
alpha=alpha,
sparse=True,
mem_free=False)
assert_almost_equal(log_joint_probs, clt._log_joint_probs)
#
# doing a dense version
clt = CLTree(data=data,
alpha=alpha,
sparse=False,
mem_free=False)
assert_almost_equal(log_joint_probs, clt._log_joint_probs)
#
# TODO: this test is deprecated since there is no point in computing cond
# probs anymore
def test_compute_cond_probs():
#
# creating the tree upon synthetic data
clt = CLTree(data=data,
alpha=0.0,
sparse=False,
mem_free=False)
# cond_probs = numpy.zeros((n_features,
# n_features,
# 2,
# 2))
# for i in range(n_features):
# for j in range(n_features):
# if i != j:
# for instance in data:
# cond_probs[i, j, instance[i], instance[j]] += 1
# #
# # now normalizing
# sums = cond_probs[i, j].sum(axis=0)
# print('sums', sums)
# cond_probs[i, j] /= sums
# cond_probs[numpy.isnan(cond_probs)] = 0
# print ('Computed cond probs', cond_probs)
# log_cond_probs = numpy.log(cond_probs)
# log_cond_probs[numpy.isinf(log_cond_probs)] = LOG_ZERO
# for i in range(n_features):
# log_cond_probs[i, i] = 0
# print(log_cond_probs)
# print('logs\n', clt._log_cond_probs)
# assert_almost_equal(log_cond_probs, clt._log_cond_probs)
joint_counts = numpy.zeros((n_features,
n_features,
2,
2))
for i in range(n_features):
for j in range(n_features):
if i != j:
for instance in data:
joint_counts[i, j, instance[i], instance[j]] += 1
print('Computed co freqs')
print(joint_counts)
print(clt._joint_freqs)
assert_almost_equal(clt._joint_freqs, joint_counts)
#
# checking sparseness
clt = CLTree(data=data,
alpha=0.0,
sparse=True,
mem_free=False)
assert_almost_equal(clt._joint_freqs, joint_counts)
#
# going to logs
joint_probs = joint_counts / n_instances
log_joint_probs = numpy.log(joint_probs)
log_joint_probs[numpy.isinf(log_joint_probs)] = LOG_ZERO
#
# to have a complete match the diagonal entries are left to zero
for i in range(n_features):
log_joint_probs[i, i] = 0
counts_1 = data.sum(axis=0)
counts_0 = n_instances - counts_1
counts = numpy.column_stack([counts_0, counts_1])
probs = counts / n_instances
print('Computed marg freqs and probs', counts, probs)
log_probs = numpy.log(probs)
log_probs[numpy.isinf(log_probs)] = LOG_ZERO
log_cond_probs = numpy.zeros((n_features,
n_features,
2,
2))
for i in range(n_features):
for j in range(n_features):
if i != j:
for k in range(2):
for h in range(2):
log_cond_probs[i, j, k, h] = \
log_joint_probs[i, j, k, h] - log_probs[j, h]
print('Computed log cond probs with alpha=0', log_cond_probs)
assert_array_almost_equal(log_cond_probs, clt._log_cond_probs)
#
# testing factors
print('\nTesting factors')
for i in range(n_features):
parent_id = clt._tree[i]
for k in range(2):
for h in range(2):
if i != parent_id:
print(clt._log_cond_probs[i, parent_id, k, h],
clt._factors[i, k, h])
assert_array_almost_equal(clt._log_cond_probs[i, parent_id, k, h],
clt._factors[i, k, h])
alpha = 2.0
clt = CLTree(data, alpha=alpha,
sparse=False,
mem_free=False)
#
# going to logs
joint_probs = (joint_counts + alpha) / (n_instances + 4 * alpha)
log_joint_probs = numpy.log(joint_probs)
log_joint_probs[numpy.isinf(log_joint_probs)] = LOG_ZERO
#
# to have a complete match the diagonal entries are left to zero
for i in range(n_features):
log_joint_probs[i, i] = 0
counts_1 = data.sum(axis=0)
counts_0 = n_instances - counts_1
counts = numpy.column_stack([counts_0, counts_1])
probs = (counts + 2 * alpha) / (n_instances + 4 * alpha)
print('Computed marg freqs and probs', counts, probs)
log_probs = numpy.log(probs)
log_probs[numpy.isinf(log_probs)] = LOG_ZERO
log_cond_probs = numpy.zeros((n_features,
n_features,
2,
2))
for i in range(n_features):
for j in range(n_features):
if i != j:
for k in range(2):
for h in range(2):
log_cond_probs[i, j, k, h] = \
log_joint_probs[i, j, k, h] - log_probs[j, h]
print('Computed log cond probs with alpha=0', log_cond_probs)
assert_array_almost_equal(log_cond_probs, clt._log_cond_probs)
#
# sparse version
clt = CLTree(data, alpha=alpha,
sparse=True,
mem_free=False)
assert_array_almost_equal(log_cond_probs, clt._log_cond_probs)
def test_compute_mi():
counts_1 = data.sum(axis=0)
counts_0 = n_instances - counts_1
counts = numpy.column_stack([counts_0, counts_1])
probs = counts / n_instances
print('Computed marg freqs and probs', counts, probs)
log_probs = numpy.log(probs)
log_probs[numpy.isinf(log_probs)] = LOG_ZERO
log_prods = numpy.zeros((n_features,
n_features,
2,
2))
for i in range(n_features):
for j in range(n_features):
if i != j:
for k in range(2):
for h in range(2):
log_prods[i, j, k, h] = \
log_probs[i, k] + log_probs[j, h]
joint_counts = numpy.zeros((n_features,
n_features,
2,
2))
for i in range(n_features):
for j in range(n_features):
if i != j:
for instance in data:
joint_counts[i, j, instance[i], instance[j]] += 1
print('Computed co freqs')
print(joint_counts)
#
# going to logs
joint_probs = joint_counts / n_instances
log_joint_probs = numpy.log(joint_probs)
log_joint_probs[numpy.isinf(log_joint_probs)] = LOG_ZERO
for i in range(n_features):
log_joint_probs[i, i] = 0
mutual_info = numpy.exp(log_joint_probs) * (log_joint_probs - log_prods)
mutual_info = mutual_info.sum(axis=2).sum(axis=2)
print('Computed MI:', mutual_info, type(mutual_info))
clt = CLTree(data, alpha=0.0, sparse=False, mem_free=False)
print('CLTree', clt._mutual_info, type(clt._mutual_info))
assert_almost_equal(mutual_info, clt._mutual_info)
#
# adding sparsity
clt = CLTree(data, alpha=0.0, sparse=True, mem_free=False)
assert_almost_equal(mutual_info, clt._mutual_info)
#
# now with alpha
alpha = 0.5
probs = (counts + 2 * alpha) / (n_instances + 4 * alpha)
log_probs = numpy.log(probs)
log_probs[numpy.isinf(log_probs)] = LOG_ZERO
log_prods = numpy.zeros((n_features,
n_features,
2,
2))
for i in range(n_features):
for j in range(n_features):
if i != j:
for k in range(2):
for h in range(2):
log_prods[i, j, k, h] = \
log_probs[i, k] + log_probs[j, h]
joint_probs = (joint_counts + alpha) / (n_instances + 4 * alpha)
log_joint_probs = numpy.log(joint_probs)
log_joint_probs[numpy.isinf(log_joint_probs)] = LOG_ZERO
for i in range(n_features):
log_joint_probs[i, i] = 0
mutual_info = numpy.exp(log_joint_probs) * (log_joint_probs - log_prods)
mutual_info = mutual_info.sum(axis=2).sum(axis=2)
clt = CLTree(data, alpha=alpha, sparse=False, mem_free=False)
assert_almost_equal(mutual_info, clt._mutual_info)
clt = CLTree(data, alpha=alpha, sparse=True, mem_free=False)
assert_almost_equal(mutual_info, clt._mutual_info)
#
# this test is completely useless now, commenting it
# def test_mst():
# #
# # creating as matrix the mi matrix from the cltree
# clt = CLTree(data, alpha=0.0, copy_mi=copy_mi)
# mst = minimum_spanning_tree(clt._mutual_info.copy())
# print(mst)
# visited_vertices = numpy.zeros(n_features, dtype=bool)
# cltmi = clt._mutual_info.copy()
# spanning_edges = numpy.zeros((n_features - 1, 2), dtype=int)
# diag_indices = numpy.arange(n_features)
# mst_1 = minimum_spanning_tree_numba(cltmi,
# visited_vertices,
# spanning_edges,
# diag_indices)
# print(mst_1)
# #
# # random big value
# n_m_features = 1000
# random_W = numpy.random.random((n_m_features, n_m_features))
# visited_vertices = numpy.zeros(n_m_features, dtype=bool)
# spanning_edges = numpy.zeros((n_m_features - 1, 2), dtype=int)
# diag_indices = numpy.arange(n_m_features)
# print(random_W)
# mst_start_t = perf_counter()
# mst_1 = minimum_spanning_tree(random_W.copy())
# mst_end_t = perf_counter()
# print('Classical done in', mst_end_t - mst_start_t, 'secs')
# mst_start_t = perf_counter()
# mst_2 = minimum_spanning_tree_numba(random_W.copy(),
# visited_vertices,
# spanning_edges,
# diag_indices)
# mst_end_t = perf_counter()
# spanning_edges = numpy.zeros((n_m_features - 1, 2), dtype=int)
# visited_vertices = numpy.zeros(n_m_features, dtype=bool)
# mst_start_t = perf_counter()
# mst_2 = minimum_spanning_tree_numba(random_W.copy(),
# visited_vertices,
# spanning_edges,
# diag_indices)
# mst_end_t = perf_counter()
# print('Numba done in', mst_end_t - mst_start_t, 'secs')
# assert_array_equal(mst_1, mst_2)
def test_eval_instance():
#
# comparing against values taken from Nico's code
nico_cltree_tree = numpy.array([-1, 2, 0, 2])
nico_cltree_tree[0] = 0
nico_cltree_lls = numpy.array([-2.01490302054,
-1.20397280433,
-1.20397280433,
-1.79175946923,
-1.60943791243,
-1.60943791243])
nico_cltree_subtree = numpy.array([-1, 0, 1])
nico_cltree_subtree[0] = 0
nico_cltree_sublls = numpy.array([-1.09861228867,
-0.69314718056,
-0.69314718056,
-1.79175946923,
-1.09861228867,
-0.69314718056])
#
# growing the tree on data
clt = CLTree(data, alpha=0.0, sparse=True, mem_free=False)
print(clt)
# assert_array_equal(nico_cltree_tree,
# clt._tree)
for i, instance in enumerate(data):
ll = clt.eval(instance)
ll_f = clt.eval_fact(instance)
# assert_almost_equal(nico_cltree_lls[i], ll)
assert_almost_equal(ll, ll_f)
print(ll, nico_cltree_lls[i])
#
# now by obscuring one column
subdata = data[:, [0, 2, 3]]
subclt = CLTree(subdata,
features=numpy.array([0, 2, 3]),
alpha=0.0, sparse=True, mem_free=False)
print(subclt)
# assert_array_equal(nico_cltree_subtree,
# subclt._tree)
for i, instance in enumerate(data):
ll = subclt.eval(instance)
ll_f = subclt.eval_fact(instance)
assert_almost_equal(ll, ll_f)
print(ll, nico_cltree_sublls[i])
from spn import MARG_IND
def test_instantiate_factors():
# 0 -> 1
# 1 -> 2
# 1 -> 3
# 3 -> 4
tree = numpy.array([-1, 0, 1, 1, 3])
#
# all binary features
feature_vals = [2, 2, 2, 2, 2]
#
# B = 0, D = 1 : 1 = 0, 3 = 1
evidence = [MARG_IND, 0, MARG_IND, 1, MARG_IND]
#
# an array of arrays
factors = [numpy.array([0.1, 0.9]),
numpy.array([[0.2, 0.45], [0.8, 0.55]]),
numpy.array([[0.5, 0.9], [0.5, 0.1]]),
numpy.array([[0.2, 0.3], [0.8, 0.7]]),
numpy.array([[0.7, 0.6], [0.3, 0.4]])]
ev_factors = [factor for factor in factors]
ev_factors = instantiate_factors(tree,
feature_vals,
evidence,
factors,
ev_factors)
print(ev_factors)
def test_marginalize():
n_features = 5
tree = numpy.array([-1, 0, 1, 1, 3])
# 0 -> 1
# 1 -> 2
# 1 -> 3
# 3 -> 4
tree = numpy.array([-1, 0, 1, 1, 3])
#
# all binary features
feature_vals = [2, 2, 2, 2, 2]
#
# B = 0, D = 1 : 1 = 0, 3 = 1
evidence = numpy.array([MARG_IND, 0, MARG_IND, 1, MARG_IND])
factors = [numpy.array([0.1, 0.9]),
numpy.array([[0.2, 0.45], [0.8, 0.55]]),
numpy.array([[0.5, 0.9], [0.5, 0.1]]),
numpy.array([[0.2, 0.3], [0.8, 0.7]]),
numpy.array([[0.7, 0.6], [0.3, 0.4]])]
print(factors)
#
# an array of arrays
factors = [numpy.log(numpy.array([0.1, 0.9])),
numpy.log(numpy.array([[0.2, 0.45], [0.8, 0.55]])),
numpy.log(numpy.array([[0.5, 0.9], [0.5, 0.1]])),
numpy.log(numpy.array([[0.2, 0.3], [0.8, 0.7]])),
numpy.log(numpy.array([[0.7, 0.6], [0.3, 0.4]]))]
factor_matrix = numpy.zeros((n_features, n_features), dtype=bool)
ev_factors = [factor for factor in factors]
ev_factors = instantiate_factors(tree,
feature_vals,
evidence,
factors,
ev_factors)
print(ev_factors)
features = numpy.arange(n_features)
factor_matrix = tree_2_factor_matrix(tree,
factor_matrix)
print(factor_matrix)
print(evidence)
prob = marginalize(features,
feature_vals,
ev_factors,
factor_matrix,
tree,
evidence)
print(prob)
| 20,553
| 31.470774
| 86
|
py
|
spyn-repr
|
spyn-repr-master/algo/learnspn.py
|
import numpy
import numba
from scipy.misc import logsumexp
import sys
import itertools
try:
from time import perf_counter
except:
from time import time
perf_counter = time
from spn import MARG_IND
from spn import LOG_ZERO
from spn import RND_SEED
from spn.linked.nodes import CategoricalSmoothedNode
from spn.linked.nodes import SumNode
from spn.linked.nodes import ProductNode
from spn.linked.nodes import CLTreeNode
from spn.factory import SpnFactory
from collections import deque
import math
import logging
import sklearn.mixture
from algo.dataslice import DataSlice
import dataset
# import tests
NEG_INF = -sys.float_info.max
@numba.jit
def g_test(feature_id_1,
feature_id_2,
instance_ids,
data,
feature_vals,
g_factor):
"""
Applying a G-test on the two features (represented by ids) on the data
"""
# print(feature_id_1, feature_id_2, instance_ids)
#
# swap to preserve order, is this needed?
if feature_id_1 > feature_id_2:
#
# damn numba this cannot be done lol
# feature_id_1, feature_id_2 = feature_id_2, feature_id_1
tmp = feature_id_1
feature_id_1 = feature_id_2
feature_id_2 = tmp
# print(feature_id_1, feature_id_2, instance_ids)
n_instances = len(instance_ids)
feature_size_1 = feature_vals[feature_id_1]
feature_size_2 = feature_vals[feature_id_2]
#
# support vectors for counting the occurrences
feature_tot_1 = numpy.zeros(feature_size_1, dtype=numpy.uint32)
feature_tot_2 = numpy.zeros(feature_size_2, dtype=numpy.uint32)
co_occ_matrix = numpy.zeros((feature_size_1, feature_size_2),
dtype=numpy.uint32)
#
# counting for the current instances
for i in instance_ids:
co_occ_matrix[data[i, feature_id_1], data[i, feature_id_2]] += 1
# print('Co occurrences', co_occ_matrix)
#
# getting the sum for each feature
for i in range(feature_size_1):
for j in range(feature_size_2):
count = co_occ_matrix[i, j]
feature_tot_1[i] += count
feature_tot_2[j] += count
# print('Feature tots', feature_tot_1, feature_tot_2)
#
# counputing the number of zero total co-occurrences for the degree of
# freedom
feature_nonzero_1 = numpy.count_nonzero(feature_tot_1)
feature_nonzero_2 = numpy.count_nonzero(feature_tot_2)
dof = (feature_nonzero_1 - 1) * (feature_nonzero_2 - 1)
g_val = 0.0
for i, tot_1 in enumerate(feature_tot_1):
for j, tot_2 in enumerate(feature_tot_2):
count = co_occ_matrix[i, j]
if count != 0:
exp_count = tot_1 * tot_2 / n_instances
g_val += count * math.log(count / exp_count)
g_val *= 2
# testing against p value
dep_val = 2 * dof * g_factor + 0.001
# logging.info('\t[G: %f dep-val: %f]', g_val, dep_val)
# print("(", feature_id_1, feature_id_2, ") G:", g_val, "dep_val:", dep_val)
return g_val < dep_val
@numba.jit
def greedy_feature_split(data,
data_slice,
feature_vals,
g_factor,
rand_gen):
"""
WRITEME
"""
n_features = data_slice.n_features()
feature_ids_mask = numpy.ones(n_features, dtype=bool)
#
# extracting one feature at random
rand_feature_id = rand_gen.randint(0, n_features)
feature_ids_mask[rand_feature_id] = False
dependent_features = numpy.zeros(n_features, dtype=bool)
dependent_features[rand_feature_id] = True
# greedy bfs searching
features_to_process = deque()
features_to_process.append(rand_feature_id)
while features_to_process:
# get one
current_feature_id = features_to_process.popleft()
feature_id_1 = data_slice.feature_ids[current_feature_id]
# print('curr FT', current_feature_id)
# features to remove later
features_to_remove = numpy.zeros(n_features, dtype=bool)
for other_feature_id in feature_ids_mask.nonzero()[0]:
#
# print('considering other features', other_feature_id)
feature_id_2 = data_slice.feature_ids[other_feature_id]
#
# apply a G-test
if not g_test(feature_id_1,
feature_id_2,
data_slice.instance_ids,
data,
feature_vals,
g_factor):
#
# print('found dependency!', (feature_id_1, feature_id_2))
#
# updating 'sets'
features_to_remove[other_feature_id] = True
dependent_features[other_feature_id] = True
features_to_process.append(other_feature_id)
# now removing from future considerations
feature_ids_mask[features_to_remove] = False
# translating remaining features
first_component = data_slice.feature_ids[dependent_features]
second_component = data_slice.feature_ids[~ dependent_features]
return first_component, second_component
def retrieve_clustering(assignment, indexes=None):
"""
from [2, 3, 8, 3, 1]
to [{0}, {1, 3}, {2}, {3}]
or
from [2, 3, 8, 3, 1] and [21, 1, 4, 18, 11]
to [{21}, {1, 18}, {4}, {11}]
"""
clustering = []
seen_clusters = dict()
if indexes is None:
indexes = [i for i in range(len(assignment))]
for index, label in zip(indexes, assignment):
if label not in seen_clusters:
seen_clusters[label] = len(clustering)
clustering.append([])
clustering[seen_clusters[label]].append(index)
if len(clustering) < 2:
print('\n\n\n\n\n\nLess than 2 clusters\n\n\n\n\n\n\n')
assert len(clustering) > 1
return clustering
def cluster_rows(data,
data_slice,
n_clusters=2,
cluster_method='GMM',
n_iters=100,
n_restarts=3,
cluster_penalty=1.0,
rand_gen=None,
sklearn_args=None):
"""
A wrapper to abstract from the implemented clustering method
cluster_method = GMM | DPGMM | HOEM
"""
clustering = None
#
# slicing the data
sliced_data = data[data_slice.instance_ids, :][:, data_slice.feature_ids]
if cluster_method == 'GMM':
#
# retrieving other properties
cov_type = sklearn_args['covariance_type'] \
if 'covariance_type' in sklearn_args else 'diag'
#
# creating the cluster from sklearn
gmm_c = sklearn.mixture.GMM(n_components=n_clusters,
covariance_type=cov_type,
random_state=rand_gen,
n_iter=n_iters,
n_init=n_restarts)
#
# fitting to training set
fit_start_t = perf_counter()
gmm_c.fit(sliced_data)
fit_end_t = perf_counter()
#
# getting the cluster assignment
pred_start_t = perf_counter()
clustering = gmm_c.predict(sliced_data)
pred_end_t = perf_counter()
elif cluster_method == 'DPGMM':
#
# retrieving other properties
cov_type = sklearn_args['covariance_type'] \
if 'covariance_type' in sklearn_args else 'diag'
verbose = sklearn_args['verbose']\
if 'verbose' in sklearn_args else False
dpgmm_c = sklearn.mixture.DPGMM(n_components=n_clusters,
covariance_type=cov_type,
random_state=rand_gen,
n_iter=n_iters,
alpha=cluster_penalty,
verbose=verbose)
#
# fitting to training set
fit_start_t = perf_counter()
dpgmm_c.fit(sliced_data)
fit_end_t = perf_counter()
#
# getting the cluster assignment
pred_start_t = perf_counter()
clustering = dpgmm_c.predict(sliced_data)
pred_end_t = perf_counter()
elif cluster_method == 'HOEM':
raise NotImplementedError('Hard Online EM is not implemented yet')
else:
raise Exception('Clustering method not valid')
logging.info('Clustering done in %f secs', (fit_end_t - fit_start_t))
#
# translating the cluster assignment to
# a list of clusters (set of instances)
return retrieve_clustering(clustering, data_slice.instance_ids)
def cache_data_slice(data_slice, cache):
"""
WRITEME
"""
#
# getting ids
instance_ids = data_slice.instance_ids
feature_ids = data_slice.feature_ids
#
# ordering
instance_ids.sort()
feature_ids.sort()
#
# making unmutable
instances_tuple = tuple(instance_ids)
features_tuple = tuple(feature_ids)
hashed_slice = (instances_tuple, features_tuple)
#
#
cached_slice = None
try:
cached_slice = cache[hashed_slice]
except:
cache[hashed_slice] = data_slice
return cached_slice
def estimate_kernel_density_spn(data_slice,
feature_sizes,
data,
alpha,
node_id_assoc,
building_stack,
slices_to_process):
"""
A mixture with one component for each instance
"""
instance_ids = data_slice.instance_ids
feature_ids = data_slice.feature_ids
current_id = data_slice.id
n_instances = len(instance_ids)
n_features = len(feature_ids)
logging.info('Adding a kernel density estimation ' +
'over a slice {0} X {1}'.format(n_instances,
n_features))
#
# create sum node
root_sum_node = SumNode(var_scope=frozenset(feature_ids))
data_slice.type = SumNode
building_stack.append(data_slice)
root_sum_node.id = current_id
node_id_assoc[current_id] = root_sum_node
#
# for each instance
for i in instance_ids:
#
# create a slice
instance_slice = DataSlice(numpy.array([i]), feature_ids)
slices_to_process.append(instance_slice)
#
# linking with appropriate weight
data_slice.add_child(instance_slice, 1.0 / n_instances)
return root_sum_node, node_id_assoc, building_stack, slices_to_process
from collections import Counter
SCOPES_DICT = Counter()
class LearnSPN(object):
"""
Implementing Gens' and Domingos' LearnSPN
Plus variants on SPN-B/T/B
"""
def __init__(self,
g_factor=1.0,
min_instances_slice=100,
min_features_slice=0,
alpha=0.1,
row_cluster_method='GMM',
cluster_penalty=2.0,
n_cluster_splits=2,
n_iters=100,
n_restarts=3,
sklearn_args={},
cltree_leaves=False,
kde_leaves=False,
rand_gen=None):
"""
WRITEME
"""
self._g_factor = g_factor
self._min_instances_slice = min_instances_slice
self._min_features_slice = min_features_slice
self._alpha = alpha
self._row_cluster_method = row_cluster_method
self._cluster_penalty = cluster_penalty
self._n_cluster_splits = n_cluster_splits
self._n_iters = n_iters
self._n_restarts = n_restarts
self._sklearn_args = sklearn_args
self._cltree_leaves = cltree_leaves
self._kde = kde_leaves
self._rand_gen = rand_gen if rand_gen is not None \
else numpy.random.RandomState(RND_SEED)
logging.info('LearnSPN:\n\tg factor:%f\n\tmin inst:%d\n' +
'\tmin feat:%d\n' +
'\talpha:%f\n\tcluster pen:%f\n\tn clusters:%d\n' +
'\tcluster method=%s\n\tn iters: %d\n' +
'\tn restarts: %d\n\tcltree leaves:%s\n' +
'\tsklearn args: %s\n',
self._g_factor,
self._min_instances_slice,
self._min_features_slice,
self._alpha,
self._cluster_penalty,
self._n_cluster_splits,
self._row_cluster_method,
self._n_iters,
self._n_restarts,
self._cltree_leaves,
self._sklearn_args)
def fit_mixture_bootstrap(self,
train,
n_mix_components,
bootstrap_samples_ids=None,
valid=None,
test=None,
feature_sizes=None,
perc=1.0,
replace=True,
evaluate=True):
"""
WRITEME
"""
n_train_instances = train.shape[0]
n_features = train.shape[1]
#
# if not present, assuming all binary features
if feature_sizes is None:
feature_sizes = [2 for i in range(n_features)]
train_mixture_lls = None
valid_mixture_lls = None
test_mixture_lls = None
if evaluate:
train_mixture_lls = numpy.zeros((n_train_instances,
n_mix_components))
if valid is not None:
n_valid_instances = valid.shape[0]
valid_mixture_lls = numpy.zeros((n_valid_instances,
n_mix_components))
if test is not None:
n_test_instances = test.shape[0]
test_mixture_lls = numpy.zeros((n_test_instances,
n_mix_components))
mixture = []
mix_start_t = perf_counter()
#
# generating the mixtures
for m in range(n_mix_components):
#
# slicing the training set via bootstrap samples ids
# (if present, otherwise sampling)
train_mix = None
if bootstrap_samples_ids is not None:
train_mix = train[bootstrap_samples_ids[m, :], :]
logging.debug('Bootstrap sample ids: %s',
bootstrap_samples_ids[m, :])
#
# TODO: this branch shall be deprecated and pruned
# the bootstrap sample ids shall be generated always from the
# calling function and not here
else:
raise RuntimeError('This is deprecated')
# train_mix = dataset.sample_instances(train,
# perc = perc,
# replace = replace,
# rndState = self._rand_gen)
logging.info('Sampled dataset (%d X %d) for mixture: %d',
train_mix.shape[0], train_mix.shape[1],
m)
#
# learning an spn for it
learn_start_t = perf_counter()
spn_mix = self.fit_structure(train_mix, feature_sizes)
learn_end_t = perf_counter()
logging.info('> SPN learned in %f secs',
learn_end_t - learn_start_t)
if evaluate:
#
# now doing inference
train_ll = 0.0
for i, train_instance in enumerate(train):
(pred_ll, ) = spn_mix.single_eval(train_instance)
train_mixture_lls[i, m] = pred_ll
train_ll += pred_ll
logging.info('\ttrain avg ll: %f',
train_ll / train.shape[0])
if valid is not None:
valid_ll = 0.0
for i, valid_instance in enumerate(valid):
(pred_ll, ) = spn_mix.single_eval(valid_instance)
valid_mixture_lls[i, m] = pred_ll
valid_ll += pred_ll
logging.info('\tvalid avg ll: %f',
valid_ll / valid.shape[0])
if test is not None:
test_ll = 0.0
for i, test_instance in enumerate(test):
(pred_ll, ) = spn_mix.single_eval(test_instance)
test_mixture_lls[i, m] = pred_ll
test_ll += pred_ll
logging.info('\ttest avg ll: %f',
test_ll / test.shape[0])
else:
#
# adding to the mixture
mixture.append(spn_mix)
mix_end_t = perf_counter()
logging.info('-- mixtures computed in %f', mix_end_t - mix_start_t)
#
# with evaluate we return just the computed values
if evaluate:
return (train_mixture_lls,
valid_mixture_lls,
test_mixture_lls)
else:
return mixture
def fit_structure(self,
data,
feature_sizes):
"""
data is a numpy array of size {n_instances X n_features}
feature_sizes is an array of integers representing feature ranges
"""
#
# resetting the data slice ids (just in case)
DataSlice.reset_id_counter()
tot_n_instances = data.shape[0]
tot_n_features = data.shape[1]
logging.info('Learning SPN structure on a (%d X %d) dataset',
tot_n_instances, tot_n_features)
learn_start_t = perf_counter()
#
# a queue containing the data slices to process
slices_to_process = deque()
# a stack for building nodes
building_stack = deque()
# a dict to keep track of id->nodes
node_id_assoc = {}
# creating the first slice
whole_slice = DataSlice.whole_slice(tot_n_instances,
tot_n_features)
slices_to_process.append(whole_slice)
first_run = True
#
# iteratively process & split slices
#
while slices_to_process:
# process a slice
current_slice = slices_to_process.popleft()
# pointers to the current data slice
current_instances = current_slice.instance_ids
current_features = current_slice.feature_ids
current_id = current_slice.id
n_instances = len(current_instances)
n_features = len(current_features)
logging.info('\n*** Processing slice %d (%d X %d)',
current_id,
n_instances, n_features)
logging.debug('\tinstances:%s\n\tfeatures:%s',
current_instances,
current_features)
#
# is this a leaf node or we can split?
if n_features == 1:
logging.info('---> Adding a leaf (just one feature)')
(feature_id, ) = current_features
feature_size = feature_sizes[feature_id]
# slicing from the original dataset
slice_data_rows = data[current_instances, :]
current_slice_data = slice_data_rows[:, current_features]
# create the node
leaf_node = CategoricalSmoothedNode(var=feature_id,
var_values=feature_size,
data=current_slice_data,
instances=current_instances,
alpha=self._alpha)
# storing links
# input_nodes.append(leaf_node)
leaf_node.id = current_id
node_id_assoc[current_id] = leaf_node
logging.debug('\tCreated Smooth Node %s', leaf_node)
elif (n_instances <= self._min_instances_slice and n_features > 1):
#
# splitting the slice on each feature
logging.info('---> Few instances (%d), decompose all features',
n_instances)
#
# shall put a cltree or
if self._cltree_leaves:
logging.info('into a Chow-Liu tree')
#
# slicing data
slice_data_rows = data[current_instances, :]
current_slice_data = slice_data_rows[:, current_features]
current_feature_sizes = [feature_sizes[i]
for i in current_features]
#
# creating a Chow-Liu tree as leaf
leaf_node = CLTreeNode(vars=current_features,
var_values=current_feature_sizes,
data=current_slice_data,
alpha=self._alpha)
#
# storing links
leaf_node.id = current_id
node_id_assoc[current_id] = leaf_node
logging.debug('\tCreated Chow-Liu Tree Node %s', leaf_node)
elif self._kde and n_instances > 1:
estimate_kernel_density_spn(current_slice,
feature_sizes,
data,
self._alpha,
node_id_assoc,
building_stack,
slices_to_process)
# elif n_instances == 1: # FIXME: there is a bug here
else:
logging.info('into a naive factorization')
#
# putting them in queue
child_slices = [DataSlice(current_instances, [feature_id])
for feature_id in current_features]
slices_to_process.extend(child_slices)
children_ids = [child.id for child in child_slices]
#
# storing the children links
for child_slice in child_slices:
current_slice.add_child(child_slice)
current_slice.type = ProductNode
building_stack.append(current_slice)
#
# creating the product node
prod_node = ProductNode(
var_scope=frozenset(current_features))
prod_node.id = current_id
node_id_assoc[current_id] = prod_node
logging.debug('\tCreated Prod Node %s (with children %s)',
prod_node,
children_ids)
else:
#
# slicing from the original dataset
slice_data_rows = data[current_instances, :]
current_slice_data = slice_data_rows[:, current_features]
split_on_features = False
#
# first run is a split on rows
if first_run:
logging.info('-- FIRST RUN --')
first_run = False
else:
#
# try clustering on cols
# logging.debug('...trying to split on columns')
split_start_t = perf_counter()
dependent_features, other_features = greedy_feature_split(data,
current_slice,
feature_sizes,
self._g_factor,
self._rand_gen)
split_end_t = perf_counter()
logging.info('...tried to split on columns in {}'.format(split_end_t -
split_start_t))
if len(other_features) > 0:
split_on_features = True
#
# have dependent components been found?
if split_on_features:
#
# splitting on columns
logging.info('---> Splitting on features')
#
# creating two new data slices and putting them on queue
first_slice = DataSlice(current_instances,
dependent_features)
second_slice = DataSlice(current_instances,
other_features)
slices_to_process.append(first_slice)
slices_to_process.append(second_slice)
children_ids = [first_slice.id, second_slice.id]
#
# storing link parent children
current_slice.type = ProductNode
building_stack.append(current_slice)
current_slice.add_child(first_slice)
current_slice.add_child(second_slice)
#
# creating product node
prod_node = ProductNode(var_scope=frozenset(current_features))
prod_node.id = current_id
node_id_assoc[current_id] = prod_node
logging.debug('\tCreated Prod Node %s (with children %s)',
prod_node,
children_ids)
else:
#
# clustering on rows
logging.info('---> Splitting on rows')
#
# at most n_rows clusters, for sklearn
k_row_clusters = min(self._n_cluster_splits,
n_instances - 1)
clustering = cluster_rows(data,
current_slice,
n_clusters=k_row_clusters,
cluster_method=self._row_cluster_method,
n_iters=self._n_iters,
n_restarts=self._n_restarts,
cluster_penalty=self._cluster_penalty,
rand_gen=self._rand_gen,
sklearn_args=self._sklearn_args)
# logging.debug('obtained clustering %s', clustering)
logging.info('clustered into %d parts (min %d)',
len(clustering), k_row_clusters)
# splitting
cluster_slices = [DataSlice(cluster, current_features)
for cluster in clustering]
cluster_slices_ids = [slice.id
for slice in cluster_slices]
cluster_weights = [slice.n_instances() / n_instances
for slice in cluster_slices]
#
# appending for processing
slices_to_process.extend(cluster_slices)
#
# storing links
# current_slice.children = cluster_slices_ids
# current_slice.weights = cluster_weights
current_slice.type = SumNode
building_stack.append(current_slice)
for child_slice, child_weight in zip(cluster_slices,
cluster_weights):
current_slice.add_child(child_slice, child_weight)
#
# building a sum node
SCOPES_DICT[frozenset(current_features)] += 1
sum_node = SumNode(var_scope=frozenset(current_features))
sum_node.id = current_id
node_id_assoc[current_id] = sum_node
logging.debug('\tCreated Sum Node %s (with children %s)',
sum_node,
cluster_slices_ids)
learn_end_t = perf_counter()
logging.info('Structure learned in %f secs',
(learn_end_t - learn_start_t))
#
# linking the spn graph (parent -> children)
#
logging.info('===> Building tree')
link_start_t = perf_counter()
root_build_node = building_stack[0]
root_node = node_id_assoc[root_build_node.id]
logging.debug('root node: %s', root_node)
root_node = SpnFactory.pruned_spn_from_slices(node_id_assoc,
building_stack)
link_end_t = perf_counter()
logging.info('\tLinked the spn in %f secs (root_node %s)',
(link_end_t - link_start_t),
root_node)
#
# building layers
#
logging.info('===> Layering spn')
layer_start_t = perf_counter()
spn = SpnFactory.layered_linked_spn(root_node)
layer_end_t = perf_counter()
logging.info('\tLayered the spn in %f secs',
(layer_end_t - layer_start_t))
logging.info('\nLearned SPN\n\n%s', spn.stats())
logging.info('%s', SCOPES_DICT.most_common(30))
return spn
def fit_structure_bagging(self,
data,
feature_sizes,
n_components,
initial_bagging_only=True,
perc=1.0,
replace=True):
"""
data is a numpy array
"""
#
# resetting the data slice ids (just in case)
DataSlice.reset_id_counter()
bagging = True if n_components > 1 else False
if not bagging:
initial_bagging_only = False
tot_n_instances = data.shape[0]
tot_n_features = data.shape[1]
inst_compo_ratio = tot_n_instances / n_components
logging.info('Learning SPN structure on a (%d X %d) dataset',
tot_n_instances, tot_n_features)
learn_start_t = perf_counter()
#
# a queue containing the data slices to process
slices_to_process = deque()
# a stack for building nodes
building_stack = deque()
# a dict to keep track of id->nodes
node_id_assoc = {}
# creating the first slice
whole_slice = DataSlice.whole_slice(tot_n_instances,
tot_n_features)
slices_to_process.append(whole_slice)
whole_slice.bagging = False
first_run = True
#
# caching
slice_cache = {}
n_cached_objects = 0
#
# iteratively process & split slices
#
while slices_to_process:
# process a slice
current_slice = slices_to_process.popleft()
cached = cache_data_slice(current_slice, slice_cache)
if cached is not None:
n_cached_objects += 1
# pointers to the current data slice
current_instances = current_slice.instance_ids
current_features = current_slice.feature_ids
current_id = current_slice.id
n_instances = len(current_instances)
n_features = len(current_features)
logging.info('*** Processing slice %d (%d X %d)\n\t',
current_id,
n_instances, n_features)
logging.debug('instances:%s\n\tfeatures:%s',
current_instances,
current_features)
#
# is this a leaf node or we can split?
if n_features == 1:
logging.info('---> Adding a leaf (just one feature)')
(feature_id, ) = current_features
feature_size = feature_sizes[feature_id]
# slicing from the original dataset
slice_data_rows = data[current_instances, :]
current_slice_data = slice_data_rows[:, current_features]
# create the node
leaf_node = CategoricalSmoothedNode(var=feature_id,
var_values=feature_size,
data=current_slice_data,
instances=current_instances,
alpha=self._alpha)
# storing links
# input_nodes.append(leaf_node)
leaf_node.id = current_id
node_id_assoc[current_id] = leaf_node
logging.debug('\tCreated Smooth Node %s', leaf_node)
elif (n_instances <= self._min_instances_slice and n_features > 1):
#
# splitting the slice on each feature
logging.info('---> Few instances (%d), decompose all features',
n_instances)
#
# shall put a cltree or
if self._cltree_leaves:
logging.info('into a Chow-Liu tree')
#
# slicing data
slice_data_rows = data[current_instances, :]
current_slice_data = slice_data_rows[:, current_features]
current_feature_sizes = [feature_sizes[i]
for i in current_features]
#
# creating a Chow-Liu tree as leaf
leaf_node = CLTreeNode(vars=current_features,
var_values=current_feature_sizes,
data=current_slice_data,
alpha=self._alpha)
#
# storing links
leaf_node.id = current_id
node_id_assoc[current_id] = leaf_node
logging.debug('\tCreated Chow-Liu Tree Node %s', leaf_node)
else:
logging.info('into a naive factorization')
#
# putting them in queue
child_slices = [DataSlice(current_instances, [feature_id])
for feature_id in current_features]
slices_to_process.extend(child_slices)
children_ids = [child.id for child in child_slices]
#
# storing the children links
for child_slice in child_slices:
current_slice.add_child(child_slice)
current_slice.type = ProductNode
building_stack.append(current_slice)
#
# creating the product node
prod_node = ProductNode(
var_scope=frozenset(current_features))
prod_node.id = current_id
node_id_assoc[current_id] = prod_node
logging.debug('\tCreated Prod Node %s (with children %s)',
prod_node,
children_ids)
else:
#
# slicing from the original dataset
slice_data_rows = data[current_instances, :]
current_slice_data = slice_data_rows[:, current_features]
split_on_features = False
#
# first run is a split on rows
if first_run:
logging.info('-- FIRST RUN --')
# first_run = False
else:
#
# try clustering on cols
logging.debug('...trying to split on columns')
dependent_features, other_features = greedy_feature_split(data,
current_slice,
feature_sizes,
self._g_factor,
self._rand_gen)
if len(other_features) > 0:
split_on_features = True
#
# have dependent components been found?
if split_on_features:
#
# splitting on columns
logging.info('---> Splitting on features')
#
# creating two new data slices and putting them on queue
first_slice = DataSlice(current_instances,
dependent_features)
second_slice = DataSlice(current_instances,
other_features)
slices_to_process.append(first_slice)
slices_to_process.append(second_slice)
children_ids = [first_slice.id, second_slice.id]
# first_slice.bagging = current_slice.bagging
# second_slice.bagging = current_slice.bagging
#
# storing link parent children
current_slice.type = ProductNode
building_stack.append(current_slice)
current_slice.add_child(first_slice)
current_slice.add_child(second_slice)
#
# creating product node
prod_node = ProductNode(var_scope=frozenset(current_features))
prod_node.id = current_id
node_id_assoc[current_id] = prod_node
logging.debug('\tCreated Prod Node %s (with children %s)',
prod_node,
children_ids)
else:
#
# clustering on rows
logging.info('---> Splitting on rows')
#
# at most n_rows clusters, for sklearn
k_row_clusters = min(self._n_cluster_splits,
n_instances - 1)
n_components = 1
if first_run:
first_run = False
#
# decreasing the number of components proportionally to
# the number of instances in the slice
n_components = max(
int(n_instances / inst_compo_ratio), 1)
# if n_instances < 50:
# n_components = 10
# if not current_slice.bagging:
# #
# # random prob
# if self._rand_gen.rand() > 0.5:
# current_slice.bagging = True
# n_components = 10
#
# creating many data slices
sampled_slices = []
for m in range(n_components):
logging.info('\t considering component: %d/%d',
m + 1,
n_components)
#
# sampling indices
sampled_instance_ids = None
if n_components > 1:
sampled_instance_ids = dataset.sample_indexes(current_instances,
perc=perc,
replace=replace,
rand_gen=self._rand_gen)
else:
sampled_instance_ids = current_instances
#
# creating new data slices (samp instances x current
# features)
sampled_data_slice = DataSlice(sampled_instance_ids,
current_features)
sampled_slices.append(sampled_data_slice)
#
# apply clustering on them
clustering = cluster_rows(data,
sampled_data_slice,
n_clusters=k_row_clusters,
cluster_method=self._row_cluster_method,
n_iters=self._n_iters,
n_restarts=self._n_restarts,
cluster_penalty=self._cluster_penalty,
rand_gen=self._rand_gen,
sklearn_args=self._sklearn_args)
# logging.debug('obtained clustering %s', clustering)
# splitting
cluster_slices = [DataSlice(cluster, current_features)
for cluster in clustering]
cluster_slices_ids = [slice.id
for slice in cluster_slices]
cluster_weights = [slice.n_instances() / n_instances
for slice in cluster_slices]
#
# appending for processing
slices_to_process.extend(cluster_slices)
#
# storing links
sampled_data_slice.type = SumNode
sampled_id = sampled_data_slice.id
# building_stack.append(sampled_data_slice)
for child_slice, child_weight in zip(cluster_slices,
cluster_weights):
sampled_data_slice.add_child(child_slice,
child_weight)
# child_slice.bagging = current_slice.bagging
#
# building a sum node
sum_node = SumNode(
var_scope=frozenset(current_features))
sum_node.id = sampled_id
node_id_assoc[sampled_id] = sum_node
logging.debug('\tCreated Sum Node %s (with children %s)',
sum_node,
cluster_slices_ids)
#
# linking mixtures to original node
sampled_unif_weights = numpy.ones(n_components)
sampled_weights = (sampled_unif_weights /
sampled_unif_weights.sum())
sampled_ids = [slice.id for slice in sampled_slices]
current_slice.type = SumNode
building_stack.append(current_slice)
building_stack.extend(sampled_slices)
for child_slice, child_weight in zip(sampled_slices,
sampled_weights):
current_slice.add_child(child_slice, child_weight)
#
# building a sum node
sum_node = SumNode(var_scope=frozenset(current_features))
sum_node.id = current_id
node_id_assoc[current_id] = sum_node
logging.debug('\tCreated Sum Node %s (with children %s)',
sum_node,
sampled_ids)
learn_end_t = perf_counter()
logging.info('Structure learned in %f secs',
(learn_end_t - learn_start_t))
#
# linking the spn graph (parent -> children)
#
logging.info('===> Building tree')
link_start_t = perf_counter()
root_build_node = building_stack[0]
root_node = node_id_assoc[root_build_node.id]
logging.debug('root node: %s', root_node)
root_node = SpnFactory.pruned_spn_from_slices(node_id_assoc,
building_stack)
link_end_t = perf_counter()
logging.info('\tLinked the spn in %f secs (root_node %s)',
(link_end_t - link_start_t),
root_node)
#
# building layers
#
logging.info('===> Layering spn')
layer_start_t = perf_counter()
spn = SpnFactory.layered_linked_spn(root_node)
layer_end_t = perf_counter()
logging.info('\tLayered the spn in %f secs',
(layer_end_t - layer_start_t))
logging.info('\nLearned SPN\n\n%s', spn.stats())
logging.info('\ncached slices:%d\n', n_cached_objects)
return spn
class RandomLearnSPN(object):
"""
Implementing Gens and Domingos
"""
def __init__(self,
min_instances_slice=100,
min_features_slice=0,
n_cluster_splits=2,
alpha=0.1,
cltree_leaves=False,
kde_leaves=False,
rand_gen=None):
"""
WRITEME
"""
self._min_instances_slice = min_instances_slice
self._min_features_slice = min_features_slice
self._alpha = alpha
self._cltree_leaves = cltree_leaves
self._kde = kde_leaves
self._n_cluster_splits = n_cluster_splits
self._rand_gen = rand_gen if rand_gen is not None \
else numpy.random.RandomState(RND_SEED)
logging.info('RandLearnSPN:\n\tmin inst:%d\n' +
'\tmin feat:%d\n' +
'\talpha:%f\n\tn clusters:%d\n' +
'\tcluster method=%s\n\tn iters: %d\n' +
'\tn restarts: %d\n\tcltree leaves:%s\n',
self._min_instances_slice,
self._min_features_slice,
self._alpha,
self._cltree_leaves)
#
# resetting the data slice ids (just in case)
DataSlice.reset_id_counter()
def fit_structure(self,
data,
feature_sizes):
"""
data is a numpy array
"""
tot_n_instances = data.shape[0]
tot_n_features = data.shape[1]
logging.info('Learning Random SPN structure on a (%d X %d) dataset',
tot_n_instances, tot_n_features)
learn_start_t = perf_counter()
#
# a queue containing the data slices to process
slices_to_process = deque()
# a stack for building nodes
building_stack = deque()
# a dict to keep track of id->nodes
node_id_assoc = {}
# creating the first slice
whole_slice = DataSlice.whole_slice(tot_n_instances,
tot_n_features)
slices_to_process.append(whole_slice)
first_run = True
#
# iteratively process & split slices
#
while slices_to_process:
# process a slice
current_slice = slices_to_process.popleft()
# pointers to the current data slice
current_instances = current_slice.instance_ids
current_features = current_slice.feature_ids
current_id = current_slice.id
n_instances = len(current_instances)
n_features = len(current_features)
logging.info('\n*** Processing slice %d (%d X %d)',
current_id,
n_instances, n_features)
logging.debug('\tinstances:%s\n\tfeatures:%s',
current_instances,
current_features)
#
# is this a leaf node or we can split?
if n_features == 1:
logging.info('---> Adding a leaf (just one feature)')
(feature_id, ) = current_features
feature_size = feature_sizes[feature_id]
# slicing from the original dataset
slice_data_rows = data[current_instances, :]
current_slice_data = slice_data_rows[:, current_features]
# create the node
leaf_node = CategoricalSmoothedNode(var=feature_id,
var_values=feature_size,
data=current_slice_data,
instances=current_instances,
alpha=self._alpha)
# storing links
# input_nodes.append(leaf_node)
leaf_node.id = current_id
node_id_assoc[current_id] = leaf_node
logging.debug('\tCreated Smooth Node %s', leaf_node)
elif (n_instances <= self._min_instances_slice and n_features > 1):
#
# splitting the slice on each feature
logging.info('---> Few instances (%d), decompose all features',
n_instances)
#
# shall put a cltree or
if self._cltree_leaves:
logging.info('into a Chow-Liu tree')
#
# slicing data
slice_data_rows = data[current_instances, :]
current_slice_data = slice_data_rows[:, current_features]
current_feature_sizes = [feature_sizes[i]
for i in current_features]
#
# creating a Chow-Liu tree as leaf
leaf_node = CLTreeNode(vars=current_features,
var_values=current_feature_sizes,
data=current_slice_data,
alpha=self._alpha)
#
# storing links
leaf_node.id = current_id
node_id_assoc[current_id] = leaf_node
logging.debug('\tCreated Chow-Liu Tree Node %s', leaf_node)
elif self._kde and n_instances > 1:
estimate_kernel_density_spn(current_slice,
feature_sizes,
data,
self._alpha,
node_id_assoc,
building_stack,
slices_to_process)
else:
logging.info('into a naive factorization')
#
# putting them in queue
child_slices = [DataSlice(current_instances, [feature_id])
for feature_id in current_features]
slices_to_process.extend(child_slices)
children_ids = [child.id for child in child_slices]
#
# storing the children links
for child_slice in child_slices:
current_slice.add_child(child_slice)
current_slice.type = ProductNode
building_stack.append(current_slice)
#
# creating the product node
prod_node = ProductNode(
var_scope=frozenset(current_features))
prod_node.id = current_id
node_id_assoc[current_id] = prod_node
logging.debug('\tCreated Prod Node %s (with children %s)',
prod_node,
children_ids)
else:
#
# slicing from the original dataset
slice_data_rows = data[current_instances, :]
current_slice_data = slice_data_rows[:, current_features]
split_on_features = False
#
# first run is a split on rows
if first_run:
logging.info('-- FIRST RUN --')
first_run = False
else:
#
# try clustering on cols
logging.debug('...trying to split on columns')
# dependent_features, other_features = \
# greedy_feature_split(data,
# current_slice,
# feature_sizes,
# self._g_factor,
# self._rand_gen)
# if len(other_features) > 0
# randomly choosing wheather to split on columns or not
if self._rand_gen.randint(2) > 0:
split_on_features = True
#
# have dependent components been found?
if split_on_features:
#
# splitting on columns
logging.info('---> Splitting on features')
# shuffling features
self._rand_gen.shuffle(current_features)
# randomly selecting a cut point
feature_cut = self._rand_gen.randint(1, n_features)
dependent_features = current_features[:feature_cut]
other_features = current_features[feature_cut:]
#
# creating two new data slices and putting them on queue
first_slice = DataSlice(current_instances,
dependent_features)
second_slice = DataSlice(current_instances,
other_features)
slices_to_process.append(first_slice)
slices_to_process.append(second_slice)
children_ids = [first_slice.id, second_slice.id]
#
# storing link parent children
current_slice.type = ProductNode
building_stack.append(current_slice)
current_slice.add_child(first_slice)
current_slice.add_child(second_slice)
#
# creating product node
prod_node = ProductNode(var_scope=frozenset(current_features))
prod_node.id = current_id
node_id_assoc[current_id] = prod_node
logging.debug('\tCreated Prod Node %s (with children %s)',
prod_node,
children_ids)
else:
#
# clustering on rows
logging.info('---> Splitting on rows')
#
# at most n_rows clusters, for sklearn
k_row_clusters = min(self._n_cluster_splits,
n_instances - 1)
# shuffling instances
self._rand_gen.shuffle(current_instances)
# random clustering
clustering = []
remaining_instances = current_instances[:]
for i in range(k_row_clusters - 1):
n_rem_instances = len(remaining_instances)
instance_cut = self._rand_gen.randint(1,
n_rem_instances)
clustering.append(remaining_instances[:instance_cut])
remaining_instances = remaining_instances[instance_cut:]
clustering.append(remaining_instances)
# logging.debug('obtained clustering %s', clustering)
logging.info('clustered into %d parts (min %d)',
len(clustering), k_row_clusters)
# splitting
cluster_slices = [DataSlice(cluster, current_features)
for cluster in clustering]
cluster_slices_ids = [slice.id
for slice in cluster_slices]
cluster_weights = [slice.n_instances() / n_instances
for slice in cluster_slices]
#
# appending for processing
slices_to_process.extend(cluster_slices)
#
# storing links
# current_slice.children = cluster_slices_ids
# current_slice.weights = cluster_weights
current_slice.type = SumNode
building_stack.append(current_slice)
for child_slice, child_weight in zip(cluster_slices,
cluster_weights):
current_slice.add_child(child_slice, child_weight)
#
# building a sum node
sum_node = SumNode(var_scope=frozenset(current_features))
sum_node.id = current_id
node_id_assoc[current_id] = sum_node
logging.debug('\tCreated Sum Node %s (with children %s)',
sum_node,
cluster_slices_ids)
learn_end_t = perf_counter()
logging.info('Structure learned in %f secs',
(learn_end_t - learn_start_t))
#
# linking the spn graph (parent -> children)
#
logging.info('===> Building tree')
link_start_t = perf_counter()
root_build_node = building_stack[0]
root_node = node_id_assoc[root_build_node.id]
logging.debug('root node: %s', root_node)
root_node = SpnFactory.pruned_spn_from_slices(node_id_assoc,
building_stack)
link_end_t = perf_counter()
logging.info('\tLinked the spn in %f secs (root_node %s)',
(link_end_t - link_start_t),
root_node)
#
# building layers
#
logging.info('===> Layering spn')
layer_start_t = perf_counter()
spn = SpnFactory.layered_linked_spn(root_node)
layer_end_t = perf_counter()
logging.info('\tLayered the spn in %f secs',
(layer_end_t - layer_start_t))
logging.info('\nLearned SPN\n\n%s', spn.stats())
# print(spn)
return spn
| 61,098
| 37.044209
| 98
|
py
|
spyn-repr
|
spyn-repr-master/algo/__init__.py
| 0
| 0
| 0
|
py
|
|
spyn-repr
|
spyn-repr-master/algo/dataslice.py
|
import numpy
try:
from time import perf_counter
except:
from time import time
perf_counter = time
from spn import LOG_ZERO
class DataSlice(object):
"""
A little util class for storing
the sets of indexes for the instances and features
considered
"""
class_counter = 0
@classmethod
def reset_id_counter(cls):
"""
WRITEME
"""
DataSlice.class_counter = 0
@classmethod
def whole_slice(cls,
n_instances,
n_features):
instances = numpy.arange(n_instances, dtype=numpy.uint32)
features = numpy.arange(n_features, dtype=numpy.uint32)
return DataSlice(instances, features)
def __init__(self,
instances=None,
features=None):
#
# ensuring them to be numpy ndarrays
self.instance_ids = numpy.array(instances)
self.feature_ids = numpy.array(features)
self.id = DataSlice.class_counter
self.children = []
self.weights = []
self.ll = LOG_ZERO
#
# this is fugly, what do I need the oop for?
self.type = None
#
# adding some members for the ll correction
self.lls = None # this shall be a numpy array
self.parent = None
self.w = 1.0
DataSlice.class_counter += 1
def __hash__(self):
return hash(self.id)
# def __eq__(self, other):
# return self.id == other.id
# def __ne__(self, other):
# return not self.__eq__(other)
def add_child(self, data_slice_child, data_slice_weight=None):
self.children.append(data_slice_child)
if data_slice_weight is not None:
self.weights.append(data_slice_weight)
# adding a reference to the weight
data_slice_child.w = data_slice_weight
# adding a reference to a child
data_slice_child.parent = self
#
# commenting this out for error prevention
# def add_children(self, data_slice_children, data_slice_weights=None):
# self.children.extend(data_slice_children)
# if data_slice_weights is not None:
# self.weigths.extend(data_slice_weights)
def n_instances(self):
return len(self.instance_ids)
def n_features(self):
return len(self.feature_ids)
def __repr__(self):
return ("[id: {id} ll: {ll} i :{instances} f :{features}\n {lls}]".
format(id=self.id,
ll=self.ll,
instances=self.instance_ids,
features=self.feature_ids,
lls=self.lls))
class ScopeNode(object):
"""
Representing a generic node in the Scope graph
"""
class_counter = 0
@classmethod
def reset_id_counter(cls):
"""
WRITEME
"""
ScopeNode.class_counter = 0
def __init__(self, scope, slice, children=None, nodes=None):
"""
WRITEME
"""
self.scope = frozenset(scope)
self.slice = slice
if children is None:
self.children = {}
else:
self.children = set(children)
if nodes is None:
self.nodes = {}
else:
self.nodes = set(nodes)
def add_child(self, child):
self.children.add(child)
def add_node(self, node):
self.nodes.add(node)
def is_leaf(self):
if not self.children:
return True
else:
return False
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
return self.id == other.id
def __repr__(self):
return "id:{0} slice:{3} -> \{{1}\} [{2}]\n".format(self.id,
" ".join(node.id
for node in self.children),
" ".join(node.id
for node in self.nodes),
self.slice.id)
class RegionNode(ScopeNode):
"""
Representing a region node in a Scope graph
"""
def __repr__(self):
return "region node {}".format(super().__repr__(self))
class PartitionNode(ScopeNode):
"""
Representing a partition node in a Scope graph
"""
def __repr__(self):
return "partition node {}".format(super().__repr__(self))
| 4,572
| 24.547486
| 96
|
py
|
spyn-repr
|
spyn-repr-master/spn/utils.py
|
try:
from itertools import izip as zip
except:
pass
import itertools
from itertools import tee
import numpy
import scipy
import scipy.stats
import os
import visualize
import pandas
import glob
def pairwise(iterable):
"""
s = <s0, s1, ...>
s -> (s0,s1), (s1,s2), (s2, s3), ...
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def stats_format(stats_list, separator, digits=5):
formatted = []
float_format = '{0:.' + str(digits) + 'f}'
for stat in stats_list:
if isinstance(stat, int):
formatted.append(str(stat))
elif isinstance(stat, float):
formatted.append(float_format.format(stat))
else:
formatted.append(stat)
# concatenation
return separator.join(formatted)
def sample_all_worlds(n_features, n_feature_vals=2):
"""
features are assumed to be homogeneous
"""
all_values = range(n_feature_vals)
all_worlds = itertools.product(all_values, repeat=n_features)
sampled_worlds = []
for world in all_worlds:
sampled_worlds.append(world)
return numpy.array(sampled_worlds)
def statistically_significant(x, y,
p_threshold=0.05,
test='wilcoxon'):
"""
WRITEME
"""
#
# paired samples shall have the same length
assert x.shape[0] == y.shape[0]
p_value = None
if test == 'wilcoxon':
W, p_value = scipy.stats.wilcoxon(x, y)
elif test == 'ttest':
T, p_value = scipy.stats.ttest_rel(x, y)
return (p_value < p_threshold), p_value
DATASET_LIST = ['nltcs', 'msnbc', 'kdd',
'plants', 'baudio', 'jester', 'bnetflix',
'accidents', 'tretail', 'pumsb_star',
'dna', 'kosarek', 'msweb',
'book', 'tmovie', 'cwebkb',
'cr52', 'c20ng', 'bbc', 'ad']
TEST_LL_FILE = '/test.lls'
def matching_dirs(name_list, prefix):
matched = []
for name in name_list:
#
# more precise controls can be made (i.e. regex)
if prefix in name:
matched.append(name)
return matched
def directory_stat_test(exp_dir_1,
exp_dir_2,
dataset_names=DATASET_LIST,
test_filename=TEST_LL_FILE,
test='wilcoxon',
p_threshold=0.05):
"""
WRITEME
"""
print('\n\n')
print('**********************************************************')
print('Comparing two experiments by statistical test significance')
print('**********************************************************\n')
#
# getting all the folders for each of the two input paths
folders_1 = [elems[0] for elems in os.walk(exp_dir_1)]
print('There are', len(folders_1), 'possible dirs for exp 1')
folders_2 = [elems[0] for elems in os.walk(exp_dir_2)]
print('There are', len(folders_2), 'possible dirs for exp 2')
#
# initing the results
victories_1 = []
victories_2 = []
draws = []
p_values = {}
#
# cycling through the names of the dataset we need to compare
for dataset_name in dataset_names:
print('Considering dataset', dataset_name)
#
# looking for a folder in both dirs
dataset_exps_1 = matching_dirs(folders_1, dataset_name)
dataset_exps_2 = matching_dirs(folders_2, dataset_name)
#
# checking for correctness
if len(dataset_exps_1) > 1:
print('There is more than one exp for dataset', dataset_name,
'in folder', exp_dir_1, 'exiting!')
return
if len(dataset_exps_2) > 1:
print('There is more than one exp for dataset', dataset_name,
'in folder', exp_dir_2, 'exiting!')
return
# if there are no results in both, skip it
if len(dataset_exps_1) > 0 and len(dataset_exps_2) > 0:
#
# trying to load the files with the exps
test_lls_path_1 = dataset_exps_1[0] + test_filename
test_lls_path_2 = dataset_exps_2[0] + test_filename
if (os.path.exists(test_lls_path_1) and
os.path.exists(test_lls_path_2)):
test_lls_1 = numpy.loadtxt(test_lls_path_1)
test_lls_2 = numpy.loadtxt(test_lls_path_2)
#
# checking for shape
assert test_lls_1.shape == test_lls_2.shape
print('Testing for', test_lls_1.shape[0], 'instances')
print('With p-value significance of', p_threshold)
different, p_value = statistically_significant(test_lls_1,
test_lls_2,
p_threshold=p_threshold,
test=test)
#
# storing them
test_ll_1 = test_lls_1.mean()
test_ll_2 = test_lls_2.mean()
p_values[dataset_name] = {'p-value': p_value,
'avg-lls': (test_ll_1, test_ll_2)}
if not different:
draws.append(dataset_name)
else:
#
# which one is bigger?
if test_ll_1 > test_ll_2:
victories_1.append(dataset_name)
else:
victories_2.append(dataset_name)
#
# printing final stats
print('---------------------------------------------')
print('Final results')
print('ALGO 1:\t #victories', len(victories_1), '{', victories_1, '}')
print('ALGO 2:\t #victories', len(victories_2), '{', victories_2, '}')
print('Draws:\t', len(draws), '{', draws, '}')
#
# printing p-values for latex
for dataset_name in DATASET_LIST:
p_value_str = None
try:
p_value = p_values[dataset_name]['p-value']
p_value_str = '& ' + "{:.2e}".format(p_value)
except:
p_value_str = "& "
print(p_value_str)
return p_values
N_COMPONENTS = 50
CURVE_FILE = 'curves.log'
CURVE_INDEX = 2 # test lls
def directory_curves_vis(curve_dirs,
line_dirs,
output='./',
fig_size=(9, 8),
dataset_names=DATASET_LIST,
test_filename=TEST_LL_FILE,
curve_filename=CURVE_FILE):
"""
WRITEME
ex:
directory_curves_vis(['exp/mix-learnspn-f/',
'exp/mix-learnspn-f-clt'],
['../SLSPN_Release/exp/spn/',
'../libra-tk-1.0.1/exp/best-idspn/',
'../libra-tk-1.0.1/exp/mt/'],
output='exp/curves2/')
"""
print('\n\n')
print('**********************************************************')
print('Drawing curves for different experiments')
print('**********************************************************\n')
curve_contents = []
for curve_dir in curve_dirs:
curve_contents.append([elems[0] for elems in os.walk(curve_dir)])
line_contents = []
for line_dir in line_dirs:
line_contents.append([elems[0] for elems in os.walk(line_dir)])
x_axis = numpy.arange(1, N_COMPONENTS + 1)
#
# precomputing folder contents
#
# better to loop through experiments first
for dataset_name in dataset_names:
#
# cheking if it is present for all of them
#
# curves first
curve_exps = []
not_found = False
for i, curve_dir in enumerate(curve_dirs):
#
# composing path
curves_log_path = None
curve_dataset_exps = matching_dirs(curve_contents[i], dataset_name)
if len(curve_dataset_exps) > 1:
raise ValueError('More than one folder for dataset',
dataset_name, curve_dir,
curve_dataset_exps)
if len(curve_dataset_exps) < 1:
not_found = True
print('Skipping dataset', dataset_name,
'Exp not present in', curve_dir)
break
else:
curves_log_path = curve_dataset_exps[0] + '/' + curve_filename
if os.path.exists(curves_log_path):
#
# loading it
curves_array = numpy.loadtxt(
curves_log_path, delimiter=',')
curve_exps.append((x_axis, curves_array[CURVE_INDEX]))
else:
not_found = True
print('Curves file not found for dataset', dataset_name,
'and experiment', curve_dir)
#
# lol
if not not_found:
line_exps = []
not_found = False
for i, line_dir in enumerate(line_dirs):
test_lls_file_path = None
line_dataset_exps = matching_dirs(
line_contents[i], dataset_name)
if len(line_dataset_exps) > 1:
raise ValueError('More than one folder for dataset',
dataset_name, line_dir,
line_dataset_exps)
if len(line_dataset_exps) < 1:
not_found = True
print('Skipping dataset', dataset_name,
'Exp not present in', line_dir)
break
else:
test_lls_file_path = line_dataset_exps[0] + \
'/' + test_filename
if os.path.exists(test_lls_file_path):
test_lls = numpy.loadtxt(test_lls_file_path)
test_ll = test_lls.mean()
print('Test LL', test_ll,
'for exp', curve_dir,
'for dataset', dataset_name)
line_exps.append(test_ll)
else:
not_found = True
print('Test.lls file not found for dataset',
dataset_name,
'and experiment', line_dir)
if not not_found:
#
# if we have all the values we can plot them
output_path = output + dataset_name + '.png'
print('Printint to', output_path)
visualize.visualize_curves(curve_exps,
output=output_path,
labels=['SPN-BB',
'SPN-BTB',
'LearnSPN',
'ID-SPN',
'MT'],
lines=line_exps,
loc=7,
fig_size=fig_size)
def load_frames_from_dirs(dirs,
dataset_name_list,
seps=None,
headers=None,
exp_file_name='exp.log',):
if seps is None:
seps = ['\t' for _dir in dirs]
if headers is None:
headers = [0 for _dir in dirs]
frame_lists = [[] for _dir in dirs]
for i, dir in enumerate(dirs):
sep = seps[i]
header = headers[i]
print(sep, header)
for dataset in dataset_name_list:
exp_paths = glob.glob(dir + '/{0}*/{1}'.format(dataset, exp_file_name))
assert len(exp_paths) == 1
exp_path = exp_paths[0]
print('Processing exp', exp_path)
frame = pandas.read_csv(exp_path, sep=sep, header=header, skip_footer=1)
frame_lists[i].append(frame)
return frame_lists
def approx_scope_histo_quartiles(scopes):
"""
scope is a sequence of number of nodes (frequency of scope lengths)
"""
n_scopes = len(scopes)
n_items = sum(scopes)
cumulative_scopes = [0] * n_scopes
cumulative_scopes[0] = scopes[0]
for i in range(1, n_scopes):
cumulative_scopes[i] = cumulative_scopes[i - 1] + scopes[i]
print('Cumulative scopes {}'.format(cumulative_scopes))
first_quartile = int(n_items * 0.25)
median = int(n_items * 0.5)
third_quartile = int(n_items * 0.75)
quartiles = [first_quartile, median, third_quartile]
print('Quartiles pos {}'.format(quartiles))
quartile_scopes = [0] * len(quartiles)
for i in range(len(quartiles)):
for j in range(n_scopes):
if cumulative_scopes[j] > quartiles[i]:
break
quartile_scopes[i] = j
return quartile_scopes
| 13,105
| 31.201474
| 87
|
py
|
spyn-repr
|
spyn-repr-master/spn/factory.py
|
from spn.linked.spn import Spn as SpnLinked
from spn.linked.layers import Layer as LayerLinked
from spn.linked.layers import SumLayer as SumLayerLinked
from spn.linked.layers import ProductLayer as ProductLayerLinked
from spn.linked.layers import CategoricalInputLayer
from spn.linked.layers import CategoricalSmoothedLayer \
as CategoricalSmoothedLayerLinked
from spn.linked.layers import CategoricalIndicatorLayer \
as CategoricalIndicatorLayerLinked
from spn.linked.layers import CategoricalCLInputLayer \
as CategoricalCLInputLayerLinked
from spn.linked.nodes import Node
from spn.linked.nodes import SumNode
from spn.linked.nodes import ProductNode
from spn.linked.nodes import CategoricalSmoothedNode
from spn.linked.nodes import CategoricalIndicatorNode
from spn.linked.nodes import CLTreeNode
from spn.linked.nodes import ConstantNode
from spn.theanok.layers import SumLayer_logspace as SumLayerTheanok
from spn.theanok.layers import ProductLayer_logspace as ProductLayerTheanok
from spn.theanok.layers import TheanokLayer as LayerTheanok
from spn.theanok.layers import InputLayer_logspace as InputLayerTheanok
from spn.theanok.spn import BlockLayeredSpn
from spn.utils import pairwise
from spn import INT_TYPE
import numpy
from math import ceil
from theano import config
import scipy.sparse
import sklearn.preprocessing
import random
import itertools
from collections import deque
from collections import defaultdict
import dataset
import logging
class SpnFactory(object):
"""
WRITEME
"""
#####################################################
#
#####################################################
@classmethod
def linked_kernel_density_estimation(cls,
n_instances,
features,
node_dict=None,
alpha=0.1
# ,batch_size=1,
# sparse=False
):
"""
WRITEME
"""
n_features = len(features)
# the top one is a sum layer with a single node
root_node = SumNode()
root_layer = SumLayerLinked([root_node])
# second one is a product layer with n_instances nodes
product_nodes = [ProductNode() for i in range(n_instances)]
product_layer = ProductLayerLinked(product_nodes)
# linking them to the root node
for prod_node in product_nodes:
root_node.add_child(prod_node, 1. / n_instances)
# last layer can be a categorical smoothed input
# or sum_layer + categorical indicator input
input_layer = None
layers = None
n_leaf_nodes = n_features * n_instances
if node_dict is None:
# creating a sum_layer with n_leaf_nodes
sum_nodes = [SumNode() for i in range(n_leaf_nodes)]
# store them into a layer
sum_layer = SumLayerLinked(sum_nodes)
# linking them to the products above
for i, prod_node in enumerate(product_nodes):
for j in range(n_features):
# getting the next n_features nodes
prod_node.add_child(sum_nodes[i * n_features + j])
# now creating the indicator nodes
input_layer = \
CategoricalIndicatorLayerLinked(vars=features)
# linking the sum nodes to the indicator vars
for i, sum_node in enumerate(sum_nodes):
# getting the feature id
j = i % n_features
# and thus its number of values
n_values = features[j]
# getting the indices of indicators
start_index = sum(features[:j])
end_index = start_index + n_values
indicators = [node for node
in input_layer.nodes()][start_index:end_index]
for ind_node in indicators:
sum_node.add_child(ind_node, 1. / n_values)
# storing levels
layers = [sum_layer, product_layer,
root_layer]
else:
# create a categorical smoothed layer
input_layer = \
CategoricalSmoothedLayerLinked(vars=features,
node_dicts=node_dict,
alpha=alpha)
# it shall contain n_leaf_nodes nodes
smooth_nodes = list(input_layer.nodes())
assert len(smooth_nodes) == n_leaf_nodes
# linking it
for i, prod_node in enumerate(product_nodes):
for j in range(n_features):
# getting the next n_features nodes
prod_node.add_child(smooth_nodes[i * n_features + j])
# setting the used levels
layers = [product_layer, root_layer]
# create the spn from levels
kern_spn = SpnLinked(input_layer, layers)
return kern_spn
@classmethod
def linked_naive_factorization(cls,
features,
node_dict=None,
alpha=0.1):
"""
WRITEME
"""
n_features = len(features)
# create an input layer
input_layer = None
layers = None
# first layer is a product layer with n_feature children
root_node = ProductNode()
root_layer = ProductLayerLinked([root_node])
# second is a sum node on an indicator layer
if node_dict is None:
# creating sum nodes
sum_nodes = [SumNode() for i in range(n_features)]
# linking to the root
for node in sum_nodes:
root_node.add_child(node)
# store into a level
sum_layer = SumLayerLinked(sum_nodes)
# now create an indicator layer
input_layer = CategoricalIndicatorLayerLinked(vars=features)
# and linking it
# TODO make this a function
for i, sum_node in enumerate(sum_nodes):
# getting the feature id
j = i % n_features
# and thus its number of values
n_values = features[j]
# getting the indices of indicators
start_index = sum(features[:j])
end_index = start_index + n_values
indicators = [node for node
in input_layer.nodes()][start_index:end_index]
for ind_node in indicators:
sum_node.add_child(ind_node, 1. / n_values)
# collecting layers
layers = [sum_layer, root_layer]
# or a categorical smoothed layer
else:
input_layer = CategoricalSmoothedLayerLinked(vars=features,
node_dicts=node_dict,
alpha=alpha)
# it shall contain n_features nodes
smooth_nodes = list(input_layer.nodes())
assert len(smooth_nodes) == n_features
for node in smooth_nodes:
root_node.add_child(node)
# set layers accordingly
layers = [root_layer]
# build the spn
naive_fact_spn = SpnLinked(input_layer, layers)
return naive_fact_spn
@classmethod
def linked_random_spn_top_down(cls,
vars,
n_layers,
n_max_children,
n_scope_children,
max_scope_split,
merge_prob=0.5,
rand_gen=None):
"""
WRITEME
"""
def cluster_scopes(scope_list):
cluster_dict = {}
for i, var in enumerate(scope_list):
cluster_dict[var] += {i}
return cluster_dict
def cluster_set_scope(scope_list):
return {scope for scope in scope_list}
def link_leaf_to_input_layer(sum_leaf,
scope_var,
input_layer,
rand_gen):
for indicator_node in input_layer.nodes():
if indicator_node.var == scope_var:
rand_weight = rand_gen.random()
sum_leaf.add_child(indicator_node, rand_weight)
# print(sum_leaf, indicator_node, rand_weight)
# normalizing
sum_leaf.normalize()
#
# creating a product layer
#
def build_product_layer(parent_layer,
parent_scope_list,
n_max_children,
n_scope_children,
input_layer,
rand_gen):
# grouping the scopes of the parents
scope_clusters = cluster_set_scope(parent_scope_list)
# for each scope add a fixed number of children
children_lists = {scope: [ProductNode(var_scope=scope)
for i in range(n_scope_children)]
for scope in scope_clusters}
# counting which node is used
children_counts = {scope: [0 for i in range(n_scope_children)]
for scope in scope_clusters}
# now link those randomly to their parent
for parent, scope in zip(parent_layer.nodes(), parent_scope_list):
# only for nodes not becoming leaves
if len(scope) > 1:
# sampling at most n_max_children from those in the same
# scope
children_scope_list = children_lists[scope]
sample_length = min(
len(children_scope_list), n_max_children)
sampled_ids = rand_gen.sample(range(n_scope_children),
sample_length)
sampled_children = [None for i in range(sample_length)]
for i, id in enumerate(sampled_ids):
# getting the sampled child
sampled_children[i] = children_scope_list[id]
# updating its counter
children_counts[scope][id] += 1
for child in sampled_children:
# parent is a sum layer, we must set a random weight
rand_weight = rand_gen.random()
parent.add_child(child, rand_weight)
# we can now normalize it
parent.normalize()
else:
# binding the node to the input layer
(scope_var,) = scope
link_leaf_to_input_layer(parent,
scope_var,
input_layer,
rand_gen)
# pruning those children never used
for scope in children_lists.keys():
children_scope_list = children_lists[scope]
scope_counts = children_counts[scope]
used_children = [child
for count, child in zip(scope_counts,
children_scope_list)
if count > 0]
children_lists[scope] = used_children
# creating the layer and new scopelist
# print('children list val', children_lists.values())
children_list = [child
for child in
itertools.chain.from_iterable(
children_lists.values())]
scope_list = [key
for key, child_list in children_lists.items()
for elem in child_list]
# print('children list', children_list)
# print('scope list', scope_list)
prod_layer = ProductLayerLinked(children_list)
return prod_layer, scope_list
def build_sum_layer(parent_layer,
parent_scope_list,
rand_gen,
max_scope_split=-1,
merge_prob=0.5):
# keeping track of leaves
# leaf_props = []
scope_clusters = cluster_set_scope(parent_scope_list)
# looping through all the parent nodes and their scopes
# in order to decompose their scope
dec_scope_list = []
for scope in parent_scope_list:
# decomposing their scope into k random pieces
k = len(scope)
if 1 < max_scope_split <= len(scope):
k = rand_gen.randint(2, max_scope_split)
shuffled_scope = list(scope)
rand_gen.shuffle(shuffled_scope)
dec_scopes = [frozenset(shuffled_scope[i::k])
for i in range(k)]
dec_scope_list.append(dec_scopes)
# if a decomposed scope consists of only one var, generate a
# leaf
# leaves = [(parent, (dec_scope,))
# for dec_scope in dec_scopes if len(dec_scope) == 1]
# leaf_props.extend(leaves)
# generating a unique decomposition
used_decs = {}
children_list = []
scope_list = []
for parent, decs in zip(parent_layer.nodes(),
dec_scope_list):
merge_count = 0
for scope in decs:
sum_node = None
try:
rand_perc = rand_gen.random()
if (merge_count < len(decs) - 1 and
rand_perc > merge_prob):
sum_node = used_decs[scope]
merge_count += 1
else:
raise Exception()
except:
# create a node for it
sum_node = SumNode(var_scope=scope)
children_list.append(sum_node)
scope_list.append(scope)
used_decs[scope] = sum_node
parent.add_child(sum_node)
# unique_dec = {frozenset(dec) for dec in
# itertools.chain.from_iterable(dec_scope_list)}
# print('unique dec', unique_dec)
# building a dict scope->child
# children_dict = {scope: SumNode() for scope in unique_dec}
# now linking parents to their children
# for parent, scope in zip(parent_layer.nodes(),
# parent_scope_list):
# dec_scopes = dec_scope_list[scope]
# for dec in dec_scopes:
# retrieving children
# adding it
# parent.add_child(children_dict[dec])
# we already have the nodes and their scopes
# children_list = [child for child in children_dict.values()]
# scope_list = [scope for scope in children_dict.keys()]
sum_layer = SumLayerLinked(nodes=children_list)
return sum_layer, scope_list
# if no generator is provided, create a new one
if rand_gen is None:
rand_gen = random.Random()
# create input layer
# _vars = [2, 3, 2, 2, 4]
input_layer = CategoricalIndicatorLayerLinked(vars=vars)
# create root layer
full_scope = frozenset({i for i in range(len(vars))})
root = SumNode(var_scope=full_scope)
root_layer = SumLayerLinked(nodes=[root])
last_layer = root_layer
# create top scope list
last_scope_list = [full_scope]
layers = [root_layer]
layer_count = 0
stop_building = False
while not stop_building:
# checking for early termination
# this one leads to split product nodes into leaves
if layer_count >= n_layers:
print('Max level reached, trying to stop')
max_scope_split = -1
# build a new layer alternating types
if isinstance(last_layer, SumLayerLinked):
print('Building product layer')
last_layer, last_scope_list = \
build_product_layer(last_layer,
last_scope_list,
n_max_children,
n_scope_children,
input_layer,
rand_gen)
elif isinstance(last_layer, ProductLayerLinked):
print('Building sum layer')
last_layer, last_scope_list = \
build_sum_layer(last_layer,
last_scope_list,
rand_gen,
max_scope_split,
merge_prob)
# testing for more nodes to expand
if last_layer.n_nodes() == 0:
print('Stop building')
stop_building = True
else:
layers.append(last_layer)
layer_count += 1
# checking for early termination
# if not stop_building:
# if isinstance(last_layer, ProductLayerLinked):
# building a sum layer splitting everything into one
# length scopes
# last_sum_layer, last_scope_list = \
# build_sum_layer(last_layer,
# last_scope_list,
# rand_gen,
# max_scope_split=-1)
# then linking each node to the input layer
# for sum_leaf, scope in zip(last_sum_layer.nodes(),
# last_scope_list):
# (scope_var,) = scope
# link_leaf_to_input_layer(sum_leaf,
# scope_var,
# input_layer,
# rand_gen)
# elif isinstance(last_layer, SumLayerLinked):
# pass
# print('LAYERS ', len(layers), '\n')
# for i, layer in enumerate(layers):
# print('LAYER ', i)
# print(layer)
# print('\n')
spn = SpnLinked(input_layer=input_layer,
layers=layers[::-1])
# testing
# scope_list = [
# frozenset({1, 3, 4}), frozenset({2, 0}), frozenset({1, 3, 4})]
# sum_layer = SumLayerLinked(nodes=[SumNode(), SumNode(), SumNode()])
# prod_layer, scope_list = build_product_layer(
# sum_layer, scope_list, 2, 3, input_layer, rand_gen)
# sum_layer1, scope_list_2 = build_sum_layer(prod_layer,
# scope_list,
# rand_gen,
# max_scope_split=2
# )
# prod_layer_2, scope_list_3 = build_product_layer(sum_layer1,
# scope_list_2,
# 2,
# 3,
# input_layer,
# rand_gen)
# create spn from layers
# spn = SpnLinked(input_layer=input_layer,
# layers=[prod_layer_2, sum_layer1,
# prod_layer, sum_layer, root_layer])
return spn
@classmethod
def layered_linked_spn(cls, root_node):
"""
Given a simple linked version (parent->children),
returns a layered one (linked + layers)
"""
layers = []
root_layer = None
input_nodes = []
layer_nodes = []
input_layer = None
# layers.append(root_layer)
previous_level = None
# collecting nodes to visit
open = deque()
next_open = deque()
closed = set()
open.append(root_node)
while open:
# getting a node
current_node = open.popleft()
current_id = current_node.id
# has this already been seen?
if current_id not in closed:
closed.add(current_id)
layer_nodes.append(current_node)
# print('CURRENT NODE')
# print(current_node)
# expand it
for child in current_node.children:
# only for non leaf nodes
if (isinstance(child, SumNode) or
isinstance(child, ProductNode)):
next_open.append(child)
else:
# it must be an input node
if child.id not in closed:
input_nodes.append(child)
closed.add(child.id)
# open is now empty, but new open not
if (not open):
# swap them
open = next_open
next_open = deque()
# and create a new level alternating type
if previous_level is None:
# it is the first level
if isinstance(root_node, SumNode):
previous_level = SumLayerLinked([root_node])
elif isinstance(root_node, ProductNode):
previous_level = ProductLayerLinked([root_node])
elif isinstance(previous_level, SumLayerLinked):
previous_level = ProductLayerLinked(layer_nodes)
elif isinstance(previous_level, ProductLayerLinked):
previous_level = SumLayerLinked(layer_nodes)
layer_nodes = []
layers.append(previous_level)
#
# finishing layers
#
#
# checking for CLTreeNodes
cltree_leaves = False
for node in input_nodes:
if isinstance(node, CLTreeNode):
cltree_leaves = True
break
if cltree_leaves:
input_layer = CategoricalCLInputLayerLinked(input_nodes)
else:
# otherwiise assuming all input nodes are homogeneous
if isinstance(input_nodes[0], CategoricalSmoothedNode):
# print('SMOOTH LAYER')
input_layer = CategoricalSmoothedLayerLinked(input_nodes)
elif isinstance(input_nodes[0], CategoricalIndicatorNode):
input_layer = CategoricalIndicatorLayerLinked(input_nodes)
spn = SpnLinked(input_layer=input_layer,
layers=layers[::-1])
return spn
@classmethod
def pruned_spn_from_slices(cls, node_assoc, building_stack, logger=None):
"""
WRITEME
"""
if logger is None:
logger = logging.getLogger(__name__)
# traversing the building stack
# to link and prune nodes
for build_node in reversed(building_stack):
# current node
current_id = build_node.id
# print('+ Current node: %d', current_id)
current_children_slices = build_node.children
# print('\tchildren: %r', current_children_slices)
current_children_weights = build_node.weights
# print('\tweights: %r', current_children_weights)
# retrieving corresponding node
node = node_assoc[current_id]
# print('retrieved node', node)
# discriminate by type
if isinstance(node, SumNode):
logging.debug('it is a sum node %d', current_id)
# getting children
for child_slice, child_weight in zip(current_children_slices,
current_children_weights):
# print(child_slice)
# print(child_slice.id)
# print(node_assoc)
child_id = child_slice.id
child_node = node_assoc[child_id]
# print(child_node)
# checking children types as well
if isinstance(child_node, SumNode):
logging.debug('++ pruning node: %d', child_node.id)
# this shall be pruned
for grand_child, grand_child_weight \
in zip(child_node.children,
child_node.weights):
node.add_child(grand_child,
grand_child_weight *
child_weight)
else:
logging.debug('+++ Adding it as child: %d',
child_node.id)
node.add_child(child_node, child_weight)
# print('children added')
elif isinstance(node, ProductNode):
logging.debug('it is a product node %d', current_id)
# linking children
for child_slice in current_children_slices:
child_id = child_slice.id
child_node = node_assoc[child_id]
# checking for alternating type
if isinstance(child_node, ProductNode):
logging.debug('++ pruning node: %d', child_node.id)
# this shall be pruned
for grand_child in child_node.children:
node.add_child(grand_child)
else:
node.add_child(child_node)
# print('+++ Linking child %d', child_node.id)
# this is superfluous, returning a pointer to the root
root_build_node = building_stack[0]
return node_assoc[root_build_node.id]
@classmethod
def pruned_spn_from_scopes(cls, scope_assoc, building_stack, logger=None):
"""
WRITEME
"""
build_node = None
node = None
node_assoc = None
#
# FIXME: this is just a stub, it does not even compile
#
if logger is None:
logger = logging.getLogger(__name__)
# traversing the building stack
# to link and prune nodes
for build_scope in reversed(building_stack):
# current node
current_id = build_scope.id
# print('+ Current node: %d', current_id)
current_children_slices = build_node.children
# print('\tchildren: %r', current_children_slices)
current_children_weights = build_node.weights
# print('\tweights: %r', current_children_weights)
# retrieving corresponding node
scope_node = scope_assoc[current_id]
# print('retrieved node', node)
# discriminate by type
if isinstance(node, SumNode):
logging.debug('it is a sum node %d', current_id)
# getting children
for child_slice, child_weight in zip(current_children_slices,
current_children_weights):
# print(child_slice)
# print(child_slice.id)
# print(node_assoc)
child_id = child_slice.id
child_node = node_assoc[child_id]
# print(child_node)
# checking children types as well
if isinstance(child_node, SumNode):
logging.debug('++ pruning node: %d', child_node.id)
# this shall be pruned
for grand_child, grand_child_weight \
in zip(child_node.children,
child_node.weights):
node.add_child(grand_child,
grand_child_weight *
child_weight)
else:
logging.debug('+++ Adding it as child: %d',
child_node.id)
node.add_child(child_node, child_weight)
# print('children added')
elif isinstance(node, ProductNode):
logging.debug('it is a product node %d', current_id)
# linking children
for child_slice in current_children_slices:
child_id = child_slice.id
child_node = node_assoc[child_id]
# checking for alternating type
if isinstance(child_node, ProductNode):
logging.debug('++ pruning node: %d', child_node.id)
# this shall be pruned
for grand_child in child_node.children:
node.add_child(grand_child)
else:
node.add_child(child_node)
# print('+++ Linking child %d', child_node.id)
# this is superfluous, returning a pointer to the root
root_build_node = building_stack[0]
return node_assoc[root_build_node.id]
@classmethod
def layered_pruned_linked_spn(cls, root_node):
"""
WRITEME
"""
#
# first traverse the spn top down to collect a bottom up traversal order
# it could be done in a single pass I suppose, btw...
building_queue = deque()
traversal_stack = deque()
building_queue.append(root_node)
while building_queue:
#
# getting current node
curr_node = building_queue.popleft()
#
# appending it to the stack
traversal_stack.append(curr_node)
#
# considering children
try:
for child in curr_node.children:
building_queue.append(child)
except:
pass
#
# now using the inverse traversal order
for node in reversed(traversal_stack):
# print('retrieved node', node)
# discriminate by type
if isinstance(node, SumNode):
logging.debug('it is a sum node %d', node.id)
current_children = node.children[:]
current_weights = node.weights[:]
# getting children
children_to_add = deque()
children_weights_to_add = deque()
for child_node, child_weight in zip(current_children,
current_weights):
# print(child_slice)
# print(child_slice.id)
# print(node_assoc)
print(child_node)
# checking children types as well
if isinstance(child_node, SumNode):
# this shall be prune
logging.debug('++ pruning node: %d', child_node.id)
# del node.children[i]
# del node.weights[i]
# adding subchildren
for grand_child, grand_child_weight \
in zip(child_node.children,
child_node.weights):
children_to_add.append(grand_child)
children_weights_to_add.append(grand_child_weight *
child_weight)
# node.add_child(grand_child,
# grand_child_weight *
# child_weight)
# print(
# 'remaining children', [c.id for c in node.children])
else:
children_to_add.append(child_node)
children_weights_to_add.append(child_weight)
#
# adding all the children (ex grand children)
node.children.clear()
node.weights.clear()
for child_to_add, weight_to_add in zip(children_to_add, children_weights_to_add):
node.add_child(child_to_add, weight_to_add)
# else:
# print('+++ Adding it as child: %d', child_node.id)
# node.add_child(child_node, child_weight)
# print('children added')
elif isinstance(node, ProductNode):
logging.debug('it is a product node %d', node.id)
current_children = node.children[:]
children_to_add = deque()
# linking children
for i, child_node in enumerate(current_children):
# checking for alternating type
if isinstance(child_node, ProductNode):
# this shall be pruned
logging.debug('++ pruning node: %d', child_node.id)
# this must now be useless
# del node.children[i]
# adding children
for grand_child in child_node.children:
children_to_add.append(grand_child)
# node.add_child(grand_child)
else:
children_to_add.append(child_node)
# node.add_child(child_node)
# print('+++ Linking child %d', child_node.id)
#
# adding grand children
node.children.clear()
for child_to_add in children_to_add:
node.add_child(child_to_add)
"""
#
# printing
print(\"TRAVERSAL\")
building_queue = deque()
building_queue.append(root_node)
while building_queue:
#
# getting current node
curr_node = building_queue.popleft()
#
# appending it to the stack
print(curr_node)
#
# considering children
try:
for child in curr_node.children:
building_queue.append(child)
except:
pass
"""
#
# now transforming it layer wise
# spn = SpnFactory.layered_linked_spn(root_node)
return root_node
def merge_block_layers(layer_1, layer_2):
#
# check for type
assert type(layer_1) is type(layer_2)
#
# merging nodes
merged_nodes = [node for node in layer_1.nodes()]
merged_nodes += [node for node in layer_2.nodes()]
#
# merging
merged_layer = type(layer_1)(merged_nodes)
#
# merging i/o
merged_inputs = layer_1.input_layers | layer_2.input_layers
merged_outputs = layer_1.output_layers | layer_2.output_layers
#
# relinking
merged_layer.input_layers = merged_inputs
merged_layer.output_layers = merged_outputs
for i in merged_inputs:
i.add_output_layer(merged_layer)
for o in merged_outputs:
o.add_input_layer(merged_layer)
return merged_layer
from scopes import topological_layer_sort
def compute_block_layer_depths(spn):
#
# sort layers topologically
topo_sorted_layers = topological_layer_sort(list(spn.top_down_layers()))
#
# traverse them in this order and associate them by depth
depth_dict = {}
depth_dict[spn.input_layer()] = 0
for layer in topo_sorted_layers:
child_layer_depths = [depth_dict[p] for p in layer.input_layers]
depth_dict[layer] = max(child_layer_depths) + 1
return depth_dict
def edge_density_after_merge(layer_1, layer_2):
n_input_nodes_1 = sum([l.n_nodes() for l in layer_1.input_layers])
n_input_nodes_2 = sum([l.n_nodes() for l in layer_2.input_layers])
n_input_nodes = n_input_nodes_1 + n_input_nodes_2
n_output_nodes = layer_1.n_nodes() + layer_2.n_nodes()
n_max_edges = n_input_nodes * n_output_nodes
n_edges = layer_1.n_edges() + layer_2.n_edges()
return n_edges / n_max_edges
def merge_block_layers_spn(spn, threshold, compute_heuristics=edge_density_after_merge):
"""
Given an alternated layer linked SPN made by many block layers, try to
aggregate them into macro blocks
"""
#
# lebeling each block with its depth level
layer_depth_dict = compute_block_layer_depths(spn)
#
# create an inverse dict with depth level -> blocks
depth_layer_dict = defaultdict(set)
for layer, depth in layer_depth_dict.items():
depth_layer_dict[depth].add(layer)
#
# here we are storing the new levels, we are assuming the input layer always to be alone
mod_layers = []
#
# from each level, starting from the bottom, excluding the input layer
for k in sorted(depth_layer_dict.keys())[1:]:
print('Considering depth {}'.format(k))
mergeable = True
k_depth_layers = depth_layer_dict[k]
while mergeable:
#
# retrieve layers at that depth
#
# for each possible pair compute an heuristic score
best_score = -numpy.inf
best_pair = None
layer_pairs = itertools.combinations(k_depth_layers, 2)
can_merge = False
for layer_1, layer_2 in layer_pairs:
print('\tConsidering layers: {0} {1}'.format(layer_1.id,
layer_2.id))
score = compute_heuristics(layer_1, layer_2)
if score > best_score and score > threshold:
can_merge = True
best_score = score
best_pair = (layer_1, layer_2)
if can_merge:
print('merging', best_pair[0].id, best_pair[1].id)
#
# merging the best pair
merged_layer = merge_block_layers(*best_pair)
#
# disconnecting the previous ones
best_pair[0].disconnect_layer()
best_pair[1].disconnect_layer()
#
# storing them back
k_depth_layers = [l for l in k_depth_layers
if l != best_pair[0] and l != best_pair[1]]
k_depth_layers.append(merged_layer)
else:
mergeable = False
#
# finally storing them
mod_layers.extend(k_depth_layers)
#
# creating an SPN out of it:
mod_spn = SpnLinked(input_layer=spn.input_layer(),
layers=mod_layers)
return mod_spn
def retrieve_children_parent_assoc(spn, root=None):
"""
Builds a map children node -> parent from a linked spn
"""
if root is None:
root = spn.root()
parent_assoc = defaultdict(set)
#
# traversing it
for node in spn.top_down_nodes():
if hasattr(node, 'children') and node.children:
for child in node.children:
parent_assoc[child].add(node)
return parent_assoc
def linked_categorical_input_to_indicators(spn, input_layer=None):
"""
Convertes a linked spn categorical input layer into an indicator one
"""
#
# get child, parent relations for node relinking
child_assoc = retrieve_children_parent_assoc(spn)
#
# get input layer
cat_input_layer = spn.input_layer()
assert isinstance(cat_input_layer, CategoricalSmoothedLayerLinked)
#
# one indicator node for each var value
vars = cat_input_layer.vars()
if not vars:
vars = list(sorted({node.var for node in cat_input_layer.nodes()}))
feature_values = cat_input_layer.feature_vals()
# print('vars', vars)
# print('feature values', feature_values)
indicator_nodes = [CategoricalIndicatorNode(var, val)
for i, var in enumerate(vars) for val in range(feature_values[i])]
# for node in indicator_nodes:
# print(node)
indicator_map = defaultdict(set)
for ind_node in indicator_nodes:
indicator_map[ind_node.var].add(ind_node)
sum_nodes = []
#
# as many sum nodes as cat nodes
for node in cat_input_layer.nodes():
sum_node = SumNode(var_scope=frozenset([node.var]))
sum_nodes.append(sum_node)
for ind_node in sorted(indicator_map[node.var], key=lambda x: x.var_val):
sum_node.add_child(ind_node, numpy.exp(node._var_probs[ind_node.var_val]))
#
# removing links to parents
parents = child_assoc[node]
for p_node in parents:
#
# assume it to be a product node
# TODO: generalize
assert isinstance(p_node, ProductNode)
p_node.children.remove(node)
p_node.add_child(sum_node)
#
# creating layer
sum_layer = SumLayerLinked(sum_nodes)
indicator_layer = CategoricalIndicatorLayerLinked(indicator_nodes)
cat_input_layer.disconnect_layer()
spn.set_input_layer(indicator_layer)
spn.insert_layer(sum_layer, 0)
return spn
def make_marginalized_network_constant(spn, vars_to_marginalize):
"""
Replacing sub networks whose scope has to be marginalized over
with constant nodes
"""
#
# get child, parent relations for node relinking
child_assoc = retrieve_children_parent_assoc(spn)
const_nodes_to_add = []
scope_to_marginalize = frozenset(vars_to_marginalize)
#
# bottom up traversal
for layer in spn.bottom_up_layers():
layer_nodes_to_remove = []
for node in layer.nodes():
#
# is this a non-leaf node? or a leaf whose scope is to marginalize?
to_remove = False
if hasattr(node, 'children'):
#
# if all his children are constants, we can remove it
if all([isinstance(child, ConstantNode) for child in node.children]):
to_remove = True
else:
if node.var in scope_to_marginalize:
to_remove = True
if to_remove:
const_node = ConstantNode(node.var)
const_nodes_to_add.append(const_node)
parents = child_assoc[node]
#
# unlink it from parents and relink constant node
for p_node in parents:
if isinstance(p_node, SumNode):
w = p_node.remove_child(node)
p_node.add_child(const_node, w)
else:
p_node.remove_child(node)
p_node.add_child(const_node)
#
# remove it from the layer as well
layer_nodes_to_remove.append(node)
for node in layer_nodes_to_remove:
layer.remove_node(node)
#
# is the layer now empty?
if not layer._nodes:
raise ValueError('Layer is empty, unhandled case')
#
# adding all constant nodes to the previous nodes in a new input layer
input_nodes = [node for node in spn.input_layer().nodes()] + const_nodes_to_add
new_input_layer = CategoricalInputLayer(nodes=input_nodes)
spn.set_input_layer(new_input_layer)
def split_layer_by_node_scopes(layer, node_layer_assoc, group_by=10):
"""
Splits a layer according to its nodes scopes. It may be useful
for indicator layers with many nodes
"""
scopes_to_nodes = defaultdict(set)
n_nodes = len(list(layer.nodes()))
for node in layer.nodes():
if hasattr(node, 'var_scope') and node.var_scope:
scopes_to_nodes[frozenset(node.var_scope)].add(node)
elif hasattr(node, 'var') and node.var:
scopes_to_nodes[frozenset([node.var])].add(node)
else:
raise ValueError('Node without scope {}'.format(node))
#
# aggregating together=?
sub_layers = None
n_scopes = len(scopes_to_nodes)
if group_by: # and group_by < n_scopes:
n_groups = n_scopes // group_by if n_scopes % group_by == 0 else n_scopes // group_by + 1
print(n_groups)
node_groups = [[] for j in range(n_groups)]
for i, (_scope, nodes) in enumerate(scopes_to_nodes.items()):
node_groups[i % n_groups].extend(nodes)
sub_layers = [layer.__class__(nodes=nodes)
for nodes in node_groups if nodes]
else:
sub_layers = [layer.__class__(nodes=nodes)
for _output, nodes in scopes_to_nodes.items()]
#
# we have to update the node layer assoc map
for s in sub_layers:
for node in s.nodes():
node_layer_assoc[node] = s
print('[S] Layer: {} ({}) into {} layers {} ({})'.format(layer.id,
layer.__class__.__name__,
len(sub_layers),
[l.id for l in sub_layers],
[len(list(l.nodes()))
for l in sub_layers]))
return sub_layers
def split_layer_by_outputs(layer,
child_parent_assoc,
node_layer_assoc,
max_n_nodes=None):
"""
Splits a layer into different sublayers whose nodes have outputs in the same
layers, if possible.
Note, they cannot be directly reused in a linked spn otherwise stats are
getting messed up
TODO: fix this
"""
if max_n_nodes is None:
max_n_nodes = numpy.inf
output_to_nodes = defaultdict(set)
for node in layer.nodes():
output_layers = set()
for parent in child_parent_assoc[node]:
output_layers.add(node_layer_assoc[parent])
output_to_nodes[frozenset(output_layers)].add(node)
# sub_layers = [layer.__class__(nodes=nodes)
# for _output, nodes in output_to_nodes.items()]
sub_layers = []
for _output, nodes in output_to_nodes.items():
#
# do we need to break the layer even more?
if len(nodes) < max_n_nodes:
sub_layers.append(layer.__class__(nodes=nodes))
else:
for i in range(0, len(nodes), max_n_nodes):
node_list = list(nodes)
sub_layers.append(layer.__class__(nodes=node_list[i:i + max_n_nodes]))
#
# we have to update the node layer assoc map
for s in sub_layers:
for node in s.nodes():
node_layer_assoc[node] = s
print('[O] Layer: {} ({}) into {} layers {} ({})'.format(layer.id,
layer.__class__.__name__,
len(sub_layers),
[l.id for l in sub_layers],
[len(list(l.nodes()))
for l in sub_layers]))
# if mixed_output_nodes:
# sub_layers.append(layer.__class__(nodes=list(mixed_output_nodes)))
return sub_layers
def build_theanok_input_layer(input_layer, n_features, feature_vals):
input_dim = n_features
output_dim = len(list(input_layer.nodes()))
mask = []
for node in input_layer.nodes():
mask.append(sum(feature_vals[:node.var]) + node.var_val)
mask = numpy.array(mask)
# print('mask', mask)
return InputLayerTheanok(input_dim, output_dim, mask, layer_id=input_layer.id)
def build_theanok_layer(output_layer, input_layers, theano_inputs, dtype=float):
"""
Creating a theanok layer representing the linked output_layer
and considering its (linked) input layers already built.
"""
output_nodes = list(output_layer.nodes())
input_nodes = []
for l in sorted(input_layers):
input_nodes.extend(list(l.nodes()))
# print('input nodes {}'.format([n.id for n in input_nodes]))
output_dim = len(output_nodes)
input_dim = len(input_nodes)
output_nodes_assoc = {node: i for i, node in enumerate(output_nodes)}
input_nodes_assoc = {node: i for i, node in enumerate(input_nodes)}
#
# creating the weight matrix
W = numpy.zeros((input_dim, output_dim), dtype=dtype)
if isinstance(output_layer, SumLayerLinked):
for node in output_nodes:
for j, child in enumerate(node.children):
# print('{}->{} ({}, {})'.format(node.id,
# child.id,
# input_nodes_assoc[child],
# output_nodes_assoc[node]))
W[input_nodes_assoc[child], output_nodes_assoc[node]] = node.weights[j]
elif isinstance(output_layer, ProductLayerLinked):
for node in output_nodes:
for child in node.children:
# print('{}->{} ({}, {})'.format(node.id,
# child.id,
# input_nodes_assoc[child],
# output_nodes_assoc[node]))
W[input_nodes_assoc[child], output_nodes_assoc[node]] = 1
else:
raise ValueError('Unrecognized layer type: {}'.format(output_layer.__class__.__name__))
#
# creating scope matrix
# TODO: creating the scope matrix
scope = None
#
# creating layer
layer = None
if isinstance(output_layer, SumLayerLinked):
layer = SumLayerTheanok(input_dim=input_dim,
output_dim=output_dim,
layer_id=output_layer.id,
weights=W)
elif isinstance(output_layer, ProductLayerLinked):
layer = ProductLayerTheanok(input_dim=input_dim,
output_dim=output_dim,
layer_id=output_layer.id,
weights=W)
else:
raise ValueError('Unrecognized layer type: {}'.format(output_layer.__class__.__name__))
#
# double linking it
for input_layer in theano_inputs:
layer.add_input_layer(input_layer)
input_layer.add_output_layer(layer)
return layer
def build_theanok_spn_from_block_linked(spn,
n_features,
feature_vals,
group_by=0,
max_n_nodes_layer=None):
"""
Translating a block linked spn into a block theano-keras-like
"""
#
# setting counter to the current max
max_node_count = max([node.id for node in spn.top_down_nodes()]) + 1
max_layer_count = max([layer.id for layer in spn.top_down_layers()]) + 1
Node.set_id_counter(max_node_count)
LayerLinked.set_id_counter(max_layer_count)
#
# transforming the categorical input layer into a layer of indicator nodes
if isinstance(spn.input_layer(), CategoricalSmoothedLayerLinked):
logging.info('Transforming input layer from categorical to indicators...')
spn = linked_categorical_input_to_indicators(spn)
node_layer_map = {node: layer for layer in spn.bottom_up_layers() for node in layer.nodes()}
child_parent_map = retrieve_children_parent_assoc(spn)
#
# top down layers traversal, discarding input layer and splitting
top_down_layers = []
for l in list(spn.top_down_layers()):
split_layers = split_layer_by_outputs(l, child_parent_map, node_layer_map,
max_n_nodes=max_n_nodes_layer)
# #
# # we can split input layers even further
# if isinstance(l, CategoricalIndicatorLayerLinked):
# for s in split_layers:
# top_down_layers.extend(split_layer_by_node_scopes(s, group_by))
# else:
# if l.id == max_layer_count + 1:
# pass
# for s in split_layers:
# top_down_layers.extend(split_layer_by_node_scopes(s,
# node_layer_map,
# group_by))
# else:
# top_down_layers.extend(split_layers)
top_down_layers.extend(split_layers)
#
# recomputing the node layer map
node_layer_map = {node: layer for layer in top_down_layers for node in layer.nodes()}
# for node in spn.input_layer().nodes():
# node_layer_map[node] = spn.input_layer()
#
# linked layer -> theano layer
layer_to_layer_map = {}
#
# ordering input layer nodes
#
# proceeding bottom up
for layer in reversed(top_down_layers):
logging.debug('{}'.format(layer))
#
# retrieve the input layers
input_layers = set()
#
# indicator node
if isinstance(layer, CategoricalIndicatorLayerLinked):
theano_layer = build_theanok_input_layer(layer, n_features, feature_vals)
else:
for node in layer.nodes():
if hasattr(node, 'children'):
for child in node.children:
# if child in node_layer_map:
input_layers.add(node_layer_map[child])
theano_layers = [layer_to_layer_map[l]
for l in sorted(input_layers) if l in layer_to_layer_map]
#
# building a theano layer
dtype = None
if isinstance(layer, SumLayerLinked):
dtype = float
elif isinstance(layer, ProductLayerLinked):
dtype = int
theano_layer = build_theanok_layer(layer, input_layers, theano_layers, dtype=dtype)
#
# adding it into the mapping
layer_to_layer_map[layer] = theano_layer
#
# ordering the nodes
theano_layers = [layer for layer in layer_to_layer_map.values()]
ordered_theano_layers = topological_layer_sort(theano_layers)
theano_layers_seq = [(layer, layer.input_layers) for layer in ordered_theano_layers]
#
# build and compile
theano_spn = BlockLayeredSpn(layers=theano_layers_seq)
#
# printing layer stats
logging.info(theano_spn.layer_stats())
#
# compiling theano functions
theano_spn.compile()
return theano_spn
NODE_LAYER_TYPE_ASSOC = {
SumNode: SumLayerLinked,
ProductNode: ProductLayerLinked,
CategoricalIndicatorNode: CategoricalIndicatorLayerLinked}
def build_linked_layer_from_nodes(nodes):
return NODE_LAYER_TYPE_ASSOC[nodes[0].__class__](nodes)
| 56,522
| 36.358229
| 97
|
py
|
spyn-repr
|
spyn-repr-master/spn/__init__.py
|
import sys
# marginalize indicator
MARG_IND = -1
# log of zero const, to avoid -inf
# numpy.exp(LOG_ZERO) = 0
LOG_ZERO = -1e3
def IS_LOG_ZERO(log_val):
"""
checks for a value to represent the logarithm of 0.
The identity to be verified is that:
IS_LOG_ZERO(x) && exp(x) == 0
according to the constant LOG_ZERO
"""
return (log_val <= LOG_ZERO)
# defining a numerical correction for 0
EPSILON = sys.float_info.min
# size for integers
INT_TYPE = 'int8'
# seed for random generators
RND_SEED = 31
# negative infinity for worst log-likelihood
NEG_INF = -sys.float_info.max
# abstract class definition
from abc import ABCMeta
from abc import abstractmethod
class AbstractSpn(metaclass=ABCMeta):
"""
WRITEME
"""
# __metaclass__ = ABCMeta
@abstractmethod
def __init__(self, input_layer=None):
"""
WRITEME
"""
@abstractmethod
def eval(self, input):
"""
WRITEME
"""
#
# we just have one input layer
#
@abstractmethod
def set_input_layer(self, layer):
"""
WRITEME
"""
@abstractmethod
def top_down_nodes(self):
"""
Top down node traversal
"""
@abstractmethod
def fit(self, train, valid, test, algo, options):
"""
WRITEME
"""
def __repr__(self):
"""
Printing an SPN summary
"""
layer_strings = [msg for msg in map(str, self._layers)]
layer_strings.reverse()
layer_strings.append(str(self._input_layer))
stats = '\n'.join(layer_strings)
return stats
def input_layer(self):
"""
WRITEME
"""
return self._input_layer
def smooth_leaves(self, alpha):
"""
Laplacian smoothing of the probability values
of the leaf nodes (if the leaf represents a univariate distribution)
"""
self._input_layer.smooth_probs(alpha)
def n_nodes(self):
"""
WRITEME
"""
# nodes = self._input_layer.n_nodes()
nodes = len(list(self.top_down_nodes()))
return nodes
def n_edges(self):
"""
WRITEME
"""
#
# adding input layer too, it may contain cltrees
# edges = self._input_layer.n_edges()
edges = [len(node.children) for node in self.top_down_nodes()
if hasattr(node, 'children')]
return sum(edges)
def n_leaves(self):
"""
WRITEME
"""
return self._input_layer.n_nodes()
def n_weights(self):
"""
WRITEME
"""
weights = [len(node.weights) for node in self.top_down_nodes()
if hasattr(node, 'weights')]
return weights
def stats(self):
"""
WRITEME
"""
# total stats
stats = '*************************\n'\
'* nodes:\t{0}\t*\n'\
'* edges:\t{1}\t*\n'\
'* weights:\t{2}\t*\n'\
'*************************'.format(self.n_nodes(),
self.n_edges(),
self.n_weights())
return stats
class AbstractLayeredSpn(AbstractSpn, metaclass=ABCMeta):
"""
WRITEME
"""
# __metaclass__ = ABCMeta
@abstractmethod
def __init__(self, input_layer=None, layers=[]):
"""
WRITEME
"""
@abstractmethod
def eval(self, input):
"""
WRITEME
"""
#
# layer setting routines
#
@abstractmethod
def set_input_layer(self, layer):
"""
WRITEME
"""
@abstractmethod
def set_layers(self, layers):
"""
WRITEME
"""
@abstractmethod
def add_layer(self, layer, pos=None):
"""
WRITEME
"""
@abstractmethod
def fit(self, train, valid, test, algo, options):
"""
WRITEME
"""
def __repr__(self):
"""
Printing an SPN summary
WRITEME
"""
layer_strings = [msg for msg in map(str, self._layers)]
layer_strings.reverse()
layer_strings.append(str(self._input_layer))
stats = '\n'.join(layer_strings)
return stats
def top_down_layers(self):
"""
Traversing layers top down
"""
for layer in reversed(self._layers):
yield layer
yield self._input_layer
def bottom_up_layers(self):
"""
Traversing laeyrs bottom up
"""
yield self._input_layer
for layer in self._layers:
yield layer
def input_layer(self):
"""
WRITEME
"""
return self._input_layer
def smooth_leaves(self, alpha):
"""
Laplacian smoothing of the probability values
of the leaf nodes (if the leaf represents a univariate distribution)
"""
self._input_layer.smooth_probs(alpha)
def n_layers(self):
"""
WRITEME
"""
return len(self._layers) + 1
def n_nodes(self):
"""
WRITEME
"""
nodes = self._input_layer.n_nodes()
for layer in self._layers:
nodes += layer.n_nodes()
return nodes
def n_edges(self):
"""
WRITEME
"""
#
# adding input layer too, it may contain cltrees
edges = self._input_layer.n_edges()
for layer in self._layers:
edges += layer.n_edges()
return edges
def n_leaves(self):
"""
WRITEME
"""
return self._input_layer.n_nodes()
def n_weights(self):
"""
WRITEME
"""
weights = 0
for layer in self._layers:
weights += layer.n_weights()
return weights
def stats(self):
"""
WRITEME
"""
# total stats
stats = '*************************\n'\
'* levels:\t{0}\t*\n'\
'* nodes:\t{1}\t*\n'\
'* edges:\t{2}\t*\n'\
'* weights:\t{3}\t*\n'\
'*************************'.format(self.n_layers(),
self.n_nodes(),
self.n_edges(),
self.n_weights())
return stats
| 6,449
| 21.089041
| 76
|
py
|
spyn-repr
|
spyn-repr-master/spn/theanok/initializations.py
|
import numpy as np
import theano
import theano.tensor as T
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def sharedX(X, dtype=theano.config.floatX, name=None):
return theano.shared(np.asarray(X, dtype=dtype), name=name)
def shared_zeros(shape, dtype=theano.config.floatX, name=None):
return sharedX(np.zeros(shape), dtype=dtype, name=name)
def shared_scalar(val=0., dtype=theano.config.floatX, name=None):
return theano.shared(np.cast[dtype](val))
def shared_ones(shape, dtype=theano.config.floatX, name=None):
return sharedX(np.ones(shape), dtype=dtype, name=name)
def uniform(shape, scale=0.05):
return sharedX(np.random.uniform(low=-scale, high=scale, size=shape))
def ndim_tensor(ndim):
if ndim == 1:
return T.vector()
elif ndim == 2:
return T.matrix()
elif ndim == 3:
return T.tensor3()
elif ndim == 4:
return T.tensor4()
return T.matrix()
class Initialization(object):
_init_dict = {'uniform': uniform,
}
@classmethod
def get(cls, init):
if init in Initialization._init_dict:
return Initialization._init_dict[init]
else:
raise ValueError('Init method not present', init)
| 1,260
| 22.351852
| 73
|
py
|
spyn-repr
|
spyn-repr-master/spn/theanok/layers.py
|
import numpy
import theano
import theano.tensor as T
from spn import LOG_ZERO
from .initializations import Initialization, sharedX, ndim_tensor
import os
#
# inspired by Keras
#
def exp_activation(x):
return T.exp(x)
def log_activation(x):
return T.log(x).clip(LOG_ZERO, 0.)
def log_sum_exp_activation(x, axis=1):
x_max = T.max(x, axis=axis, keepdims=True)
return T.log(T.sum(T.exp(x - x_max), axis=axis, keepdims=True)) + x_max
class TheanokLayer(object):
"""
WRITEME
"""
__module__ = os.path.splitext(os.path.basename(__file__))[0]
_counter_id = 0
def __init__(self,
output_dim,
weights,
input_dim=None,
input_layers=None,
layer_id=None,
init='uniform',
activation=None,
constraints=None,
scope=None,
batch_size=None):
"""
initing
"""
#
# set id
if layer_id:
self.id = layer_id
else:
self.id = TheanokLayer._counter_id
TheanokLayer._counter_id += 1
#
# storing input/output layers refs
self.input_layers = set()
if input_layers:
input_dim = sum([l.output_dim for l in input_layers])
for i in input_layers:
self.input_layers.add(i)
elif not input_dim:
raise ValueError('Input dim not specified')
self.output_layers = set()
if weights is not None:
assert weights.shape[0] == input_dim
assert weights.shape[1] == output_dim
self.initial_weights = weights
else:
self.initial_weights = None
self.batch_size = batch_size
#
# setting scope matrix
self.scope = scope
#
# setting dimensions
self.input_dim = input_dim
self.output_dim = output_dim
self.set_input_shape((self.input_dim,))
#
# setting activation
self.activation = activation
#
# setting optimization constraints
self.constrains = constraints
#
# parameters initialization
self.init = Initialization.get(init)
def build(self):
"""
WRITEME
"""
#
# nb_samples X n_input_units
self.input = T.matrix()
#
# n_input_units X n_output_units
if self.initial_weights is not None:
self.W = sharedX(self.initial_weights, name='W_{}'.format(self.id))
self.weights = [self.W]
#
# n_output_units X n_vars
if self.scope:
self.C = sharedX(self.scope, name='C_{}'.format(self.id))
#
# TODO: clean the superfluous parts here from keras
def set_input_shape(self, input_shape):
if type(input_shape) not in [tuple, list]:
raise Exception('Invalid input shape - input_shape should be a tuple of int.')
input_shape = (None,) + tuple(input_shape)
if hasattr(self, 'input_ndim') and self.input_ndim:
if self.input_ndim != len(input_shape):
raise Exception('Invalid input shape - Layer expects input ndim=' +
str(self.input_ndim) + ', was provided with input shape '
+ str(input_shape))
self._input_shape = input_shape
self.input = ndim_tensor(len(self._input_shape))
self.build()
def add_input_layer(self, layer):
self.input_layers.add(layer)
def add_output_layer(self, layer):
self.output_layers.add(layer)
def set_previous(self, previous_layers):
"""
WRITEME
"""
previous_output_dim = sum([l.output_dim for l in previous_layers])
assert self.input_dim == previous_output_dim
# self.previous = previous_layer
self.input_layers = previous_layers
self.build()
def get_output(self, train=False):
X = self.get_input(train)
output = self.activation(T.dot(X, self.W))
return output
def get_input(self, train=False):
# if hasattr(self, 'previous'):
# return self.previous.get_output(train=train)
if hasattr(self, 'input_layers') and self.input_layers:
previous_outputs = [l.get_output(train=train) for l in sorted(self.input_layers)]
return theano.tensor.concatenate(previous_outputs, axis=1)
elif hasattr(self, 'input'):
return self.input
else:
raise Exception('Layer is not connected\
and is not an input layer.')
def get_weights(self):
weights = []
for p in self.params:
weights.append(p.get_value())
return weights
def n_nodes(self):
return self.output_dim
def n_edges(self):
if hasattr(self, 'W'):
return numpy.sum(self.W.get_value() > 0.0)
else:
return 0
def __eq__(self, layer):
return self.id == layer.id
def __lt__(self, layer):
return self.id < layer.id
def __hash__(self):
# print('has')
# from pprint import pprint
# pprint(vars(self))
return hash(self.id)
def compile(self,):
"""
Creating a theano function to retrieve the layer output
"""
self.evaluate_layer_func = theano.function([self.input], self.get_output())
# output = self.get_output()
# self.evaluate_layer_func = theano.function([self.get_input()], output)
def evaluate(self, input_signal, flatten=False):
res = self.evaluate_layer_func(input_signal)
if flatten:
res = res.flatten()
return res
def stats(self):
n_edges = self.n_edges()
stats_str = '{1}\tx\t{0},\t{2}\t({3})'.format(self.n_nodes(),
self.input_dim,
n_edges,
n_edges / (self.n_nodes() * self.input_dim))
return stats_str
def __repr__(self):
layer_str = 'id:{0} [{1}]->[{2}]\n'.format(self.id,
','.join([str(l.id)
for l in sorted(self.input_layers)]),
','.join([str(l.id)
for l in sorted(self.output_layers)]))
weights_str = ""
if hasattr(self, 'W'):
weights_str = '\n{}\n'.format(self.W.get_value())
div = '\n**********************************************************\n'
stats_str = self.stats()
return layer_str + weights_str + stats_str + div
class SumLayer(TheanokLayer):
def __init__(self,
input_dim,
output_dim,
weights,
init='uniform'):
"""
Properly calling basic layer
"""
super().__init__(input_dim=input_dim,
output_dim=output_dim,
weights=weights,
init=init,
activation=log_activation,
constraints=None)
def build(self):
#
# building the base layer
super().build()
#
# I should have saved
#
# then storing the weights as parameters
self.params = [self.W]
def __repr__(self):
return '[sum layer:]\n' + TheanokLayer.__repr__(self)
class InputLayer_logspace(TheanokLayer):
def __init__(self,
input_dim,
output_dim,
mask,
layer_id=None):
"""
Just doing the logarithm of the input
"""
assert len(mask) == output_dim
self.mask = mask
super().__init__(input_dim=input_dim,
output_dim=output_dim,
weights=None,
layer_id=layer_id,
# activation=log_sum_exp_activation,
constraints=None)
def build(self):
#
# building the base layer
super().build()
self.M = sharedX(self.mask, name='mask_{}'.format(self.id), dtype=int)
self.params = []
def get_output(self, train=False):
X = self.get_input(train)
return T.clip(T.log(X[:, self.M]), LOG_ZERO, 0)
def __repr__(self):
return '[input layer log:]\n' + TheanokLayer.__repr__(self)
class SumLayer_logspace(TheanokLayer):
# __module__ = os.path.splitext(os.path.basename(__file__))[0]
def __init__(self,
input_dim,
output_dim,
weights,
layer_id=None,
init='uniform'):
"""
The activation function is the logsumexp,
for numerical stability here we are assuming the product layer to be linear layer
"""
super().__init__(input_dim=input_dim,
output_dim=output_dim,
weights=weights,
init=init,
layer_id=layer_id,
# activation=log_sum_exp_activation,
constraints=None)
def build(self):
#
# building the base layer
super().build()
#
# then storing the weights as parameters
self.params = [self.W]
def get_output(self, train=False):
X = self.get_input(train)
X = T.log(self.W) + X.dimshuffle(0, 1, 'x')
x_max = T.max(X, axis=1, keepdims=True)
return (T.log(T.sum(T.exp(X - x_max), axis=1, keepdims=True)) + x_max).reshape((X.shape[0],
self.W.shape[1]))
def __repr__(self):
return '[sum layer log:]\n' + TheanokLayer.__repr__(self)
class MaxLayer_logspace(TheanokLayer):
def __init__(self,
input_dim,
output_dim,
weights,
init='uniform',
layer_id=None,
batch_size=None):
"""
The activation function is a max (still in the log space)
"""
super().__init__(input_dim=input_dim,
output_dim=output_dim,
weights=weights,
init=init,
layer_id=layer_id,
# activation=log_sum_exp_activation,
constraints=None,
batch_size=batch_size)
def build(self):
#
# building the base layer
super().build()
# # FIXME: this shall cope with a variable batch size
# # storing a tensor for the max position values
# weight_shape = self.W.shape.eval()
# m_values = numpy.zeros((self.batch_size, weight_shape[0], weight_shape[1]))
# self.M = sharedX(m_values, name='M_{}'.format(self.id))
#
# then storing the weights as parameters
self.params = [self.W]
def get_output(self, train=False):
X = self.get_input(train)
X = T.log(self.W) + X.dimshuffle(0, 1, 'x')
X_max = T.max(X, axis=1)
return X_max
def compile(self,):
#
# and adding a function to retrieve the max map
# a binary mask that has 1 when there was the max connection
X = self.input
X = T.log(self.W) + X.dimshuffle(0, 1, 'x')
#
# TODO: mask only one value (argmax)
M = T.switch(T.eq(T.max(X, axis=1, keepdims=True), X), 1, 0)
self.evaluate_layer_func = theano.function([self.input], [self.get_output(), M])
def __repr__(self):
return '[max layer log:]\n' + TheanokLayer.__repr__(self)
class ProductLayer(TheanokLayer):
def __init__(self,
input_dim,
output_dim,
weights,
layer_id=None,
batch_size=None):
"""
Properly calling basic layer
"""
super().__init__(input_dim=input_dim,
output_dim=output_dim,
weights=weights,
activation=exp_activation,
layer_id=layer_id,
batch_size=batch_size)
def build(self):
#
# building the base layer
super().build()
#
# Shall we have to store the parameters for product layers?
self.params = [self.W]
def __repr__(self):
return '[prod layer:]\n' + TheanokLayer.__repr__(self)
class ProductLayer_logspace(TheanokLayer):
# __module__ = os.path.splitext(os.path.basename(__file__))[0]
def __init__(self,
input_dim,
output_dim,
weights,
layer_id=None,
batch_size=None):
"""
No activation function, the output is in the log domain
"""
super().__init__(input_dim=input_dim,
output_dim=output_dim,
weights=weights,
layer_id=layer_id,
batch_size=batch_size)
def get_output(self, train=False):
X = self.get_input(train)
output = T.dot(X, self.W)
return output
def build(self):
#
# building the base layer
super().build()
#
# Shall we have to store the parameters for product layers?
self.params = [self.W]
def __repr__(self):
return '[prod layer log:]\n' + TheanokLayer.__repr__(self)
| 13,956
| 28.259958
| 105
|
py
|
spyn-repr
|
spyn-repr-master/spn/theanok/__init__.py
| 0
| 0
| 0
|
py
|
|
spyn-repr
|
spyn-repr-master/spn/theanok/spn.py
|
import numpy
import theano
import theano.tensor as T
from .initializations import ndim_tensor
import sys
# from .layers import TheanokLayer
# from .layers import SumLayer_logspace
# from .layers import ProductLayer_logspace
# from .layers import InputLayer_logspace
import theano.misc.pkl_utils
import pickle
from collections import defaultdict
#
# TODO: inherit from AbstractSpn
class SequentialSpn(object):
def __init__(self,
layers=[]):
"""
WRITEME
"""
self.layers = []
for layer in layers:
self.add(layer)
def add(self, layer):
"""
WRITEME
"""
self.layers.append(layer)
#
# for each layer after the first one
if len(self.layers) > 1:
self.layers[-1].set_previous(self.layers[-2])
# if not hasattr(self.layers[0], 'input'):
# self.set_input()
def get_output(self, train=False):
"""
WRITEME
"""
return self.layers[-1].get_output(train)
# def set_input(self):
# for l in self.layers:
# if hasattr(l, 'input'):
# ndim = l.input.ndim
# self.layers[0].input = ndim_tensor(ndim)
# break
def get_input(self, train=False):
# if not hasattr(self.layers[0], 'input'):
# self.set_input()
return self.layers[0].get_input(train)
def compile(self, ):
"""
WRITEME
"""
def fit(self, ):
"""
WRITEME
"""
def predict(self, ):
"""
WRITEME
"""
def evaluate(self, ):
"""
WRITEME
"""
class BlockLayeredSpn(object):
def __init__(self,
layers=[],
output_layers=[]):
"""
Layers is a sequence of pairs (layer, input_layers_seq)
where input_layers_seq is the sequence containing the inputs to layer
"""
#
# adding and BUILDING layers
self.layers = []
# input_layers = []
self.input_layers = []
#
# setting one single input to all
self.input = theano.tensor.matrix()
#
# filtering the layers with not direct input
for layer, prevs in layers:
# if not prevs:
# input_layers.append(layer)
self.add(layer, prevs)
# for layer in input_layers:
# layer.input = self.input
self.output_layers = output_layers
def add(self, layer, input_layers=[]):
"""
WRITEME
"""
self.layers.append(layer)
#
# for each layer after the first one
# if len(self.layers) > 1:
if not input_layers:
self.add_input_layer(layer)
else:
for input_layer in input_layers:
layer.add_input_layer(input_layer)
input_layer.add_output_layer(layer)
# self.layers[-1].set_previous(self.layers[-2])
# if not hasattr(self.layers[0], 'input'):
# self.set_input()
def add_input_layer(self, layer):
layer.input = self.input
self.input_layers.append(layer)
def add_output_layer(self, layer):
self.output_layers.append(layer)
def get_output(self, train=False):
"""
WRITEME
"""
#
# TODO: generalize
# assuming just one last level as output
return self.layers[-1].get_output(train)
# def set_input(self):
# for l in self.layers:
# if hasattr(l, 'input'):
# ndim = l.input.ndim
# self.layers[0].input = ndim_tensor(ndim)
# break
def get_input(self, train=False):
# if not hasattr(self.layers[0], 'input'):
# self.set_input()
# return self.layers[0].get_input(train)
# return self.input_layer.get_input(train)
return self.input
def compile(self, ):
"""
Building functions:
- to evaluate the network (pointwise and marginal evidence)
- MPE evidence
- to predict
"""
network_input = self.get_input()
network_output = self.get_output()
self.evaluate_func = theano.function([network_input], network_output)
def fit(self, ):
"""
WRITEME
"""
def predict(self, ):
"""
WRITEME
"""
def evaluate(self, instances, flatten=False):
"""
Evaluates the network bottom up after seeing the evidences in instances
"""
res = self.evaluate_func(instances)
if flatten:
res = res.flatten()
return res
def evaluate_mpe(self, instances):
"""
Evaluates the network bottom up for MPE
after seeing the evidences in instances
"""
#
# bottom-up step
#
signal_map = {}
for layer in self.layers:
if not hasattr(layer, 'evaluate_layer_func'):
layer.compile()
#
# retrieve input signal
inputs = [signal_map[in_layer] for in_layer in sorted(layer.input_layers)]
if not inputs:
inputs = [instances]
#
# concatenating
input_signals = numpy.concatenate(inputs, axis=1)
output_signal = layer.evaluate(input_signals)
signal_map[layer] = output_signal
#
# top-down step
#
def __repr__(self):
layer_strings = [msg for msg in map(str, self.layers)]
layer_strings.reverse()
stats = '\n'.join(layer_strings)
return stats
def layer_stats(self):
layer_strings = ['[{}] {}'.format(l.id, l.stats()) for l in self.layers]
layer_strings.reverse()
stats = '\n'.join(layer_strings)
return stats
def remove_double_links_layers(self):
"""
Removing the output_layers pointers
"""
for layer in self.layers:
layer.output_layers = set()
def double_linking_layers(self):
"""
Setting references to one layer
"""
inv_layer_assoc = defaultdict(set)
for layer in self.layers:
for input_layer in layer.input_layers:
inv_layer_assoc[input_layer].add(layer)
for input_layer, output_layers in inv_layer_assoc.items():
for o in output_layers:
input_layer.add_output_layer(o)
def dump(self, file):
#
# removing circular links before
self.remove_double_links_layers()
sys.setrecursionlimit(1000000000)
# theano.misc.pkl_utils.dump(self, file)
pickle.dump(self, file)
@classmethod
def load(cls, file):
# spn = theano.misc.pkl_utils.load(file)
spn = pickle.load(file)
#
# putting back circular links
spn.double_linking_layers()
return spn
def evaluate_on_dataset_batch(spn, data, batch_size=None):
n_instances = data.shape[0]
pred_lls = numpy.zeros(n_instances)
if batch_size is None:
batch_size = n_instances
n_batches = max(n_instances // batch_size, 1)
for i in range(n_batches):
preds = spn.evaluate(data[i * batch_size: (i + 1) * batch_size])
pred_lls[i * batch_size: (i + 1) * batch_size] = preds.flatten()
#
# some instances remaining?
rem_instances = n_instances - n_batches * batch_size
if rem_instances > 0:
preds = spn.evaluate(data[rem_instances:])
pred_lls[rem_instances:] = preds.flatten()
return pred_lls
| 7,735
| 24.363934
| 86
|
py
|
spyn-repr
|
spyn-repr-master/spn/theanok/tests/test_layers.py
|
import numpy
from numpy.testing import assert_array_almost_equal
import theano
from spn.theanok.layers import SumLayer, ProductLayer
from ..layers import SumLayer_logspace
from ..layers import ProductLayer_logspace
from ..layers import MaxLayer_logspace
from spn import LOG_ZERO
def test_theano_sum_layer():
input_vec = numpy.array([[1., 0., 1., 0., 1., 0.],
[0., 0., 0., 0., 0., 0.],
[1., 0., 0., 1., 0., 1.],
[1., 1., 1., 1., 1., 1.]],
dtype=theano.config.floatX)
W = numpy.array([[0.6, 0.4, 0., 0., 0., 0.],
[0.3, 0.7, 0., 0., 0., 0.],
[0., 0., 0.1, 0.9, 0., 0.],
[0., 0., 0.7, 0.3, 0., 0.],
[0., 0., 0., 0., 0.5, 0.5],
[0., 0., 0., 0., 0.2, 0.8]],
dtype=theano.config.floatX).T
layer = SumLayer(input_dim=6,
output_dim=6,
weights=W)
layer.build()
input = layer.get_input()
output = layer.get_output()
eval_func = theano.function([input], output)
res = eval_func(input_vec)
# print(output.shape.eval())
print(res)
# [[ -5.10825574e-01 -1.20397282e+00 -2.30258512e+00 -3.56674969e-01
# -6.93147182e-01 -1.60943794e+00]
# [ -1.00000000e+03 -1.00000000e+03 -1.00000000e+03 -1.00000000e+03
# -1.00000000e+03 -1.00000000e+03]
# [ -5.10825574e-01 -1.20397282e+00 -1.05360545e-01 -1.20397282e+00
# -6.93147182e-01 -2.23143533e-01]
# [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00
# 0.00000000e+00 0.00000000e+00]]
def test_theano_prod_layer():
input_vec = numpy.array([[1., 0., 1., 0., 1., 0.],
[0., 0., 0., 0., 0., 0.],
[1., 0., 0., 1., 0., 1.],
[1., 1., 1., 1., 1., 1.]],
dtype=theano.config.floatX)
W = numpy.array([[1., 0., 1., 0., 1., 0.],
[0., 1., 0., 1., 0., 1.],
[1., 0., 1., 0., 0., 1.],
[0., 1., 0., 1., 1., 0.]]).T
layer = ProductLayer(input_dim=6,
output_dim=4,
weights=W)
layer.build()
input = layer.get_input()
output = layer.get_output()
eval_func = theano.function([input], output)
res = eval_func(input_vec)
# print(output.shape.eval())
print(res)
# [[ 20.08553696 1. 7.38905621 2.71828175]
# [ 1. 1. 1. 1. ]
# [ 2.71828175 7.38905621 7.38905621 2.71828175]
# [ 20.08553696 20.08553696 20.08553696 20.08553696]]
def test_theano_sum_layer_log():
import pickle
data = numpy.array([[0, 1, 0, 1, 1, 0],
[1, 0, 0, 1, 0, 1],
[0, 1, 1, 0, 1, 0],
[0, 1, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0]]).astype(numpy.float32)
log_data = numpy.clip(numpy.log(data), LOG_ZERO, 0)
W_1 = numpy.array([[.1, .4, .0, .0, .0, .0],
[.9, .6, .0, .0, .0, .0],
[.0, .0, .3, .6, .0, .0],
[.0, .0, .7, .4, .0, .0],
[.0, .0, .0, .0, .5, .2],
[.0, .0, .0, .0, .5, .8]])
layer = SumLayer_logspace(input_dim=6,
output_dim=6,
weights=W_1)
layer.build()
input = layer.get_input()
output = layer.get_output()
eval_func = theano.function([input], output)
log_res = eval_func(log_data)
res = numpy.exp(log_res)
# print(output.shape.eval())
expected_res = numpy.dot(data, W_1)
expected_log_res = numpy.log(expected_res)
print(res)
print(log_res)
assert_array_almost_equal(expected_res, res)
assert_array_almost_equal(expected_log_res, log_res)
#
# now trying to compile it
layer.compile()
log_res = layer.evaluate(log_data)
res = numpy.exp(log_res)
assert_array_almost_equal(expected_res, res)
assert_array_almost_equal(expected_log_res, log_res)
model_path = 'test.theanok.sum'
with open(model_path, 'wb') as f:
pickle.dump(layer, f)
#
# deserialization
with open(model_path, 'rb') as f:
layer = pickle.load(f)
log_res = layer.evaluate(log_data)
res = numpy.exp(log_res)
assert_array_almost_equal(expected_res, res)
assert_array_almost_equal(expected_log_res, log_res)
def test_theano_prod_layer_log():
import pickle
data = numpy.array([[0.9, 0.6, 0.7, 0.4, 0.5, 0.2],
[0.1, 0.4, 0.7, 0.4, 0.5, 0.8],
[0.9, 0.6, 0.3, 0.6, 0.5, 0.2],
[0.9, 0.6, 0.3, 0.6, 0.5, 0.8],
[0.1, 0.4, 0.3, 0.6, 0.5, 0.2]]).astype(numpy.float32)
log_data = numpy.clip(numpy.log(data), LOG_ZERO, 0)
W_2 = numpy.array([[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 0, 1],
[0, 1, 1, 0],
[1, 0, 0, 1],
[0, 1, 1, 0]]).astype(numpy.float32)
layer = ProductLayer_logspace(input_dim=6,
output_dim=4,
weights=W_2)
layer.build()
input = layer.get_input()
output = layer.get_output()
expected_res = W_2 * data[:, :, numpy.newaxis]
expected_res[expected_res == 0] = 1
expected_res = numpy.prod(expected_res, axis=1)
expected_log_res = numpy.log(expected_res)
print(expected_res)
eval_func = theano.function([input], output)
log_res = eval_func(log_data)
res = numpy.exp(log_res)
# print(output.shape.eval())
print(res)
print(log_res)
assert_array_almost_equal(expected_res, res)
assert_array_almost_equal(expected_log_res, log_res)
#
# now trying to compile it
layer.compile()
log_res = layer.evaluate(log_data)
res = numpy.exp(log_res)
assert_array_almost_equal(expected_res, res)
assert_array_almost_equal(expected_log_res, log_res)
model_path = 'test.theanok.prodlayer'
with open(model_path, 'wb') as f:
pickle.dump(layer, f)
#
# deserialization
with open(model_path, 'rb') as f:
layer = pickle.load(f)
log_res = layer.evaluate(log_data)
res = numpy.exp(log_res)
assert_array_almost_equal(expected_res, res)
assert_array_almost_equal(expected_log_res, log_res)
def test_theano_max_layer_log_I():
data = numpy.array([[0, 1, 0, 1, 1, 0],
[1, 0, 0, 1, 0, 1],
[0, 1, 1, 0, 1, 0],
[0, 1, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0]]).astype(numpy.float32)
log_data = numpy.clip(numpy.log(data), LOG_ZERO, 0)
W_1 = numpy.array([[.1, .4, .0, .0, .0, .0],
[.9, .6, .0, .0, .0, .0],
[.0, .0, .3, .6, .0, .0],
[.0, .0, .7, .4, .0, .0],
[.0, .0, .0, .0, .5, .2],
[.0, .0, .0, .0, .5, .8]])
layer = MaxLayer_logspace(input_dim=6,
output_dim=6,
weights=W_1,
batch_size=data.shape[0])
layer.build()
input = layer.get_input()
output = layer.get_output()
eval_func = theano.function([input], output)
log_res = eval_func(log_data)
res = numpy.exp(log_res)
# print(output.shape.eval())
expected_res = W_1 * data[:, :, numpy.newaxis]
print('Expected res', expected_res)
expected_res = numpy.max(expected_res, axis=1)
expected_log_res = numpy.log(expected_res)
print(res)
print(log_res)
assert_array_almost_equal(expected_res, res)
assert_array_almost_equal(expected_log_res, log_res)
#
# now trying to compile it
layer.compile()
log_res, M = layer.evaluate(log_data)
res = numpy.exp(log_res)
print(M)
assert_array_almost_equal(expected_res, res)
assert_array_almost_equal(expected_log_res, log_res)
| 8,281
| 29.116364
| 83
|
py
|
spyn-repr
|
spyn-repr-master/spn/theanok/tests/test_spn.py
|
import numpy
from numpy.testing import assert_array_almost_equal
import theano
from ..spn import SequentialSpn
from ..spn import BlockLayeredSpn
from ..layers import SumLayer, ProductLayer
from ..layers import SumLayer_logspace
from ..layers import ProductLayer_logspace
from ..layers import MaxLayer_logspace
from spn import LOG_ZERO
def test_theano_spn_build():
#
# initial weights
input_vec = numpy.array([[1., 0., 1., 0., 1., 0.],
[0., 0., 0., 0., 0., 0.],
[1., 0., 0., 1., 0., 1.],
[1., 1., 1., 1., 1., 1.]],
dtype=theano.config.floatX)
W = numpy.array([[0.6, 0.4, 0., 0., 0., 0.],
[0.3, 0.7, 0., 0., 0., 0.],
[0., 0., 0.1, 0.9, 0., 0.],
[0., 0., 0.7, 0.3, 0., 0.],
[0., 0., 0., 0., 0.5, 0.5],
[0., 0., 0., 0., 0.2, 0.8]]).T
W_1 = numpy.array([[1., 0., 1., 0., 1., 0.],
[0., 1., 0., 1., 0., 1.],
[1., 0., 1., 0., 0., 1.],
[0., 1., 0., 1., 1., 0.]]).T
#
# creating an architecture
model = BlockLayeredSpn()
sum_layer = SumLayer(output_dim=6,
input_dim=6,
weights=W)
model.add_input_layer(sum_layer)
model.add(sum_layer)
model.add(ProductLayer(output_dim=4,
input_dim=6,
weights=W_1),
[sum_layer])
input = model.get_input()
output = model.get_output()
f = theano.function([input], output)
res = f(input_vec)
print(res)
# First (sum) layer
# [[ -5.10825624e-01 -1.00000000e+03 -5.10825624e-01 0.00000000e+00]
# [ -1.20397280e+00 -1.00000000e+03 -1.20397280e+00 0.00000000e+00]
# [ -2.30258509e+00 -1.00000000e+03 -1.05360516e-01 0.00000000e+00]
# [ -3.56674944e-01 -1.00000000e+03 -1.20397280e+00 0.00000000e+00]
# [ -6.93147181e-01 -1.00000000e+03 -6.93147181e-01 0.00000000e+00]
# [ -1.60943791e+00 -1.00000000e+03 -2.23143551e-01 0.00000000e+00]]
# Second (prod) layer
# [[ 0.03 0. 0.27 1. ]
# [ 0.042 0. 0.072 1. ]
# [ 0.012 0. 0.432 1. ]
# [ 0.105 0. 0.045 1. ]]
# Third (sum) layer
# [ -2.97887516 -1000. -1.83979492 0. ]
# [ 0.05085 0. 0.15885 1. ]
def test_spn_compile():
data = numpy.array([[0, 1, 0, 1, 1, 0],
[1, 0, 0, 1, 0, 1],
[0, 1, 1, 0, 1, 0],
[0, 1, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0]]).astype(numpy.float32)
W_1 = numpy.array([[.1, .4, .0, .0, .0, .0],
[.9, .6, .0, .0, .0, .0],
[.0, .0, .3, .6, .0, .0],
[.0, .0, .7, .4, .0, .0],
[.0, .0, .0, .0, .5, .2],
[.0, .0, .0, .0, .5, .8]]).astype(numpy.float32)
W_2 = numpy.array([[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 0, 1],
[0, 1, 1, 0],
[1, 0, 0, 1],
[0, 1, 1, 0]]).astype(numpy.float32)
W_3 = numpy.array([[0.1],
[0.2],
[0.25],
[0.45]]).astype(numpy.float32)
model = BlockLayeredSpn()
sum_layer = SumLayer(output_dim=6,
input_dim=6,
weights=W_1)
# model.add_input_layer(sum_layer)
model.add(sum_layer)
prod_layer = ProductLayer(output_dim=4,
input_dim=6,
weights=W_2)
model.add(prod_layer,
[sum_layer])
root_layer = SumLayer(output_dim=1,
input_dim=4,
weights=W_3)
model.add(root_layer,
[prod_layer])
model.compile()
log_res = model.evaluate(data)
print(log_res)
res = numpy.exp(log_res)
print(res)
#
# expected res (sum layer 1)
expected_res = numpy.dot(data, W_1)
expected_log_res = numpy.log(expected_res)
#
# (prod layer)
expected_res = W_2 * expected_res[:, :, numpy.newaxis]
expected_res[expected_res == 0] = 1
expected_res = numpy.prod(expected_res, axis=1)
expected_log_res = numpy.log(expected_res)
#
# (root layer)
expected_res = numpy.dot(expected_res, W_3)
expected_log_res = numpy.log(expected_res)
print('Expected res', expected_res)
print('Expected log res', expected_log_res)
assert_array_almost_equal(res, expected_res)
assert_array_almost_equal(log_res, expected_log_res)
def test_spn_compile_logspace():
import pickle
data = numpy.array([[0, 1, 0, 1, 1, 0],
[1, 0, 0, 1, 0, 1],
[0, 1, 1, 0, 1, 0],
[0, 1, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0]]).astype(numpy.float32)
log_data = numpy.clip(numpy.log(data), LOG_ZERO, 0)
W_1 = numpy.array([[.1, .4, .0, .0, .0, .0],
[.9, .6, .0, .0, .0, .0],
[.0, .0, .3, .6, .0, .0],
[.0, .0, .7, .4, .0, .0],
[.0, .0, .0, .0, .5, .2],
[.0, .0, .0, .0, .5, .8]]).astype(numpy.float32)
W_2 = numpy.array([[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 0, 1],
[0, 1, 1, 0],
[1, 0, 0, 1],
[0, 1, 1, 0]]).astype(numpy.float32)
W_3 = numpy.array([[0.1],
[0.2],
[0.25],
[0.45]]).astype(numpy.float32)
model = BlockLayeredSpn()
sum_layer = SumLayer_logspace(output_dim=6,
input_dim=6,
weights=W_1)
# model_path = 'test.theanok'
# with open(model_path, 'wb') as f:
# print('dumping 1')
# pickle.dump(sum_layer, f)
# #
# # deserialization
# with open(model_path, 'rb') as f:
# print('loading 1')
# a_model = pickle.load(f)
# model.add_input_layer(sum_layer)
model.add(sum_layer)
# model_path = 'test.theanok'
# with open(model_path, 'wb') as f:
# print('dumping 2')
# pickle.dump(sum_layer, f)
# #
# # deserialization
# with open(model_path, 'rb') as f:
# print('loading 2')
# a_model = pickle.load(f)
prod_layer = ProductLayer_logspace(output_dim=4,
input_dim=6,
weights=W_2)
# model_path = 'test.theanok'
# with open(model_path, 'wb') as f:
# print('dumping s1')
# pickle.dump(prod_layer, f)
# #
# # deserialization
# with open(model_path, 'rb') as f:
# print('loading s1')
# a_model = pickle.load(f)
model.add(prod_layer,
[sum_layer])
# model_path = 'test.theanok'
# with open(model_path, 'wb') as f:
# print('dumping 3')
# pickle.dump(sum_layer, f)
# #
# # deserialization
# with open(model_path, 'rb') as f:
# print('loading 3')
# a_model = pickle.load(f)
root_layer = SumLayer_logspace(output_dim=1,
input_dim=4,
weights=W_3)
model.add(root_layer,
[prod_layer])
# for layer in model.layers:
# layer.output_layers = set()
#
# serialization
# model_path = 'test.theanok'
# with open(model_path, 'wb') as f:
# print('dumping f')
# pickle.dump(model, f)
# #
# # deserialization
# with open(model_path, 'rb') as f:
# print('loading f')
# model = pickle.load(f)
model.compile()
log_res = model.evaluate(log_data)
print(log_res)
res = numpy.exp(log_res)
print(res)
#
# expected res (sum layer 1)
expected_res = numpy.dot(data, W_1)
expected_log_res = numpy.log(expected_res)
#
# (prod layer)
expected_res = W_2 * expected_res[:, :, numpy.newaxis]
expected_res[expected_res == 0] = 1
expected_res = numpy.prod(expected_res, axis=1)
expected_log_res = numpy.log(expected_res)
#
# (root layer)
expected_res = numpy.dot(expected_res, W_3)
expected_log_res = numpy.log(expected_res)
print('Expected res', expected_res)
print('Expected log res', expected_log_res)
assert_array_almost_equal(res, expected_res)
assert_array_almost_equal(log_res, expected_log_res)
#
# serialization
model_path = 'test.theanok'
with open(model_path, 'wb') as f:
model.dump(f)
#
# deserialization
with open(model_path, 'rb') as f:
model = BlockLayeredSpn.load(f)
log_res = model.evaluate(log_data)
print(log_res)
res = numpy.exp(log_res)
print(res)
def test_mpn_compile_logspace():
data = numpy.array([[0, 1, 0, 1, 1, 0],
[1, 0, 0, 1, 0, 1],
[0, 1, 1, 0, 1, 0],
[0, 1, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0]]).astype(numpy.float32)
log_data = numpy.clip(numpy.log(data), LOG_ZERO, 0)
W_1 = numpy.array([[.1, .4, .0, .0, .0, .0],
[.9, .6, .0, .0, .0, .0],
[.0, .0, .3, .6, .0, .0],
[.0, .0, .7, .4, .0, .0],
[.0, .0, .0, .0, .5, .2],
[.0, .0, .0, .0, .5, .8]]).astype(numpy.float32)
W_2 = numpy.array([[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 0, 1],
[0, 1, 1, 0],
[1, 0, 0, 1],
[0, 1, 1, 0]]).astype(numpy.float32)
W_3 = numpy.array([[0.1],
[0.2],
[0.25],
[0.45]]).astype(numpy.float32)
model = BlockLayeredSpn()
max_layer = MaxLayer_logspace(output_dim=6,
input_dim=6,
weights=W_1,
batch_size=data.shape[0])
# model.add_input_layer(max_layer)
model.add(max_layer)
prod_layer = ProductLayer_logspace(output_dim=4,
input_dim=6,
weights=W_2)
model.add(prod_layer,
[max_layer])
root_layer = MaxLayer_logspace(output_dim=1,
input_dim=4,
weights=W_3,
batch_size=data.shape[0])
model.add(root_layer,
[prod_layer])
model.compile()
log_res = model.evaluate(log_data)
print(log_res)
res = numpy.exp(log_res)
print(res)
#
# expected res (max layer 1) this is the same as if
# the layer were a sum layer
expected_res = numpy.dot(data, W_1)
expected_log_res = numpy.log(expected_res)
#
# (prod layer) unchanged
expected_res = W_2 * expected_res[:, :, numpy.newaxis]
expected_res[expected_res == 0] = 1
expected_res = numpy.prod(expected_res, axis=1)
expected_log_res = numpy.log(expected_res)
#
# (root layer)
expected_res = expected_res * W_3.T
expected_res = numpy.max(expected_res, axis=1, keepdims=True)
expected_log_res = numpy.log(expected_res)
print('Expected res', expected_res)
print('Expected log res', expected_log_res)
assert_array_almost_equal(res, expected_res)
assert_array_almost_equal(log_res, expected_log_res)
| 11,884
| 29.24173
| 76
|
py
|
spyn-repr
|
spyn-repr-master/spn/theanok/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
spyn-repr
|
spyn-repr-master/spn/linked/nodes.py
|
from spn import utils
from spn import LOG_ZERO
from spn import MARG_IND
from spn import IS_LOG_ZERO
from spn import RND_SEED
import numpy
from math import log
from math import exp
from cltree.cltree import CLTree
import dataset
import numba
from collections import defaultdict
NODE_SYM = 'u' # unknown type
SUM_NODE_SYM = '+'
PROD_NODE_SYM = '*'
INDICATOR_NODE_SYM = 'i'
DISCRETE_VAR_NODE_SYM = 'd'
CHOW_LIU_TREE_NODE_SYM = 'c'
CONSTANT_NODE_SYM = 'k'
class Node(object):
"""
WRITEME
"""
# class id counter
id_counter = 0
def __init__(self, var_scope=None):
"""
WRITEME
"""
# default val is 0.
self.log_val = LOG_ZERO
# setting id and incrementing
self.id = Node.id_counter
Node.id_counter += 1
# derivative computation
self.log_der = LOG_ZERO
self.var_scope = var_scope
def __repr__(self):
return 'id: {id} scope: {scope}'.format(id=self.id,
scope=self.var_scope)
# this is probably useless, using it for test purposes
def set_val(self, val):
"""
WRITEME
"""
if numpy.allclose(val, 0, 1e-10):
self.log_val = LOG_ZERO
else:
self.log_val = log(val)
def __hash__(self):
"""
A node has a unique id
"""
return hash(self.id)
def __eq__(self, other):
"""
WRITEME
"""
return self.id == other.id
def node_type_str(self):
return NODE_SYM
def node_short_str(self):
return "{0} {1}\n".format(self.node_type_str(),
self.id)
@classmethod
def reset_id_counter(cls):
"""
WRITEME
"""
Node.id_counter = 0
@classmethod
def set_id_counter(cls, val):
"""
WRITEME
"""
Node.id_counter = val
@numba.njit
def eval_sum_node(children_log_vals, log_weights):
"""
numba version
"""
max_log = LOG_ZERO
n_children = children_log_vals.shape[0]
# getting the max
for i in range(n_children):
ch_log_val = children_log_vals[i]
log_weight = log_weights[i]
w_sum = ch_log_val + log_weight
if w_sum > max_log:
max_log = w_sum
# log_unnorm = LOG_ZERO
# max_child_log = LOG_ZERO
sum_val = 0.
for i in range(n_children):
ch_log_val = children_log_vals[i]
log_weight = log_weights[i]
# for node, log_weight in zip(children, log_weights):
# if node.log_val is False:
ww_sum = ch_log_val + log_weight
sum_val += exp(ww_sum - max_log)
# is this bad code?
log_val = LOG_ZERO
if sum_val > 0.:
log_val = log(sum_val) + max_log
return log_val
# log_unnorm = log(sum_val) + max_log
# self.log_val = log_unnorm - numpy.log(self.weights_sum)
# return self.log_val
class SumNode(Node):
"""
WRITEME
"""
def __init__(self, var_scope=None):
"""
WRITEME
"""
Node.__init__(self, var_scope)
self.children = []
self.weights = []
self.log_weights = []
self.weights_sum = 0
def add_child(self, child, weight):
"""
WRITEME
"""
self.children.append(child)
self.weights.append(weight)
weight_log = log(weight) if weight > 0.0 else LOG_ZERO
self.log_weights.append(weight_log)
self.weights_sum += weight
def remove_child(self, child):
child_pos = self.children.index(child)
child_weight = self.weights[child_pos]
self.weights_sum -= child_weight
self.children.pop(child_pos)
self.weights.pop(child_pos)
self.log_weights.pop(child_pos)
return child_weight
def set_weights(self, weights):
"""
WRITEME
"""
self.weights = weights
# normalizing self.weights
w_sum = sum(self.weights)
for i, weight in enumerate(self.weights):
self.weights[i] = weight / w_sum
# updating log weights
for i, weight in enumerate(weights):
self.log_weights[i] = log(weight) if weight > 0.0 else LOG_ZERO
# and also the sum
self.weights_sum = sum(weights)
# @numba.jit
def eval(self):
"""
WRITEME
"""
# resetting the log derivative
self.log_der = LOG_ZERO
max_log = LOG_ZERO
# getting the max
for node, log_weight in zip(self.children, self.log_weights):
w_sum = node.log_val + log_weight
if w_sum > max_log:
max_log = w_sum
# log_unnorm = LOG_ZERO
# max_child_log = LOG_ZERO
sum_val = 0.
for node, log_weight in zip(self.children, self.log_weights):
# if node.log_val is False:
ww_sum = node.log_val + log_weight
sum_val += exp(ww_sum - max_log)
# is this bad code?
if sum_val > 0.:
self.log_val = log(sum_val) + max_log
else:
self.log_val = LOG_ZERO
# # up to now numba
# log_unnorm = log(sum_val) + max_log
# self.log_val = log_unnorm - numpy.log(self.weights_sum)
# return self.log_val
# self.log_val = eval_sum_node(numpy.array([child.log_val
# for child in self.children]),
# numpy.array(self.log_weights))
def mpe_eval(self):
"""
WRITEME
"""
# resetting the log derivative
self.log_der = LOG_ZERO
# log_val is used as an accumulator, one less var
self.log_val = LOG_ZERO
# getting the max
for node, log_weight in zip(self.children, self.log_weights):
w_sum = node.log_val + log_weight
if w_sum > self.log_val:
self.log_val = w_sum
def backprop(self):
"""
WRITE
"""
# if it is not zero we can pass
if self.log_der > LOG_ZERO:
# dS/dS_n = sum_{p}: dS/dS_p * dS_p/dS_n
# per un nodo somma p
#
for child, log_weight in zip(self.children, self.log_weights):
# print('child before', child.log_der)
# if child.log_der == LOG_ZERO:
# if IS_LOG_ZERO(child.log_der):
if child.log_der <= LOG_ZERO:
child.log_der = self.log_der + log_weight
else:
child.log_der = numpy.logaddexp(child.log_der,
self.log_der + log_weight)
# print('child after', child.log_der)
# update weight log der too ?
def mpe_backprop(self):
"""
WRITEME
"""
if self.log_der > LOG_ZERO:
# the child der is the max der among parents
for child in self.children:
child.log_der = max(child.log_der, self.log_der)
def normalize(self):
"""
WRITEME
"""
# normalizing self.weights
w_sum = sum(self.weights)
for i, weight in enumerate(self.weights):
self.weights[i] = weight / w_sum
# computing log(self.weights)
for i, weight in enumerate(self.weights):
self.log_weights[i] = log(weight) if weight > 0.0 else LOG_ZERO
def is_complete(self):
_complete = True
# all children scopes shall be equal
children_scopes = [child.var_scope
for child in self.children]
# adding this node scope
children_scopes.append(self.var_scope)
for scope1, scope2 in utils.pairwise(children_scopes):
if scope1 != scope2:
_complete = False
break
return _complete
def n_children(self):
"""
WRITEME
"""
return len(self.children)
def node_type_str(self):
return SUM_NODE_SYM
def node_short_str(self):
children_str = " ".join(["{id}:{weight}".format(id=node.id,
weight=weight)
for node, weight in zip(self.children,
self.weights)])
return "{type} {id} [{children}]".format(type=self.node_type_str(),
id=self.id,
children=children_str)
def __repr__(self):
base = Node.__repr__(self)
children_info = [(node.id, weight)
for node, weight in zip(self.children,
self.weights)]
msg = ''
for id, weight in children_info:
msg += ' ({id} {weight})'.format(id=id,
weight=weight)
return 'Sum Node {line1}\n{line2}'.format(line1=base,
line2=msg)
@numba.njit
def eval_prod_node(children_log_vals):
"""
WRITEME
"""
n_children = children_log_vals.shape[0]
# and the zero children counter
# zero_children = 0
# computing the log value
log_val = 0.0
for i in range(n_children):
ch_log_val = children_log_vals[i]
# if ch_log_val <= LOG_ZERO:
# zero_children += 1
log_val += ch_log_val
return log_val # , zero_children
class ProductNode(Node):
"""
WRITEME
"""
def __init__(self, var_scope=None):
"""
WRITEME
"""
Node.__init__(self, var_scope)
self.children = []
# bit for zero children, see Darwiche
self.zero_children = 0
def add_child(self, child):
"""
WRITEME
"""
self.children.append(child)
def remove_child(self, child):
child_pos = self.children.index(child)
self.children.pop(child_pos)
def eval(self):
"""
WRITEME
"""
# resetting the log derivative
self.log_der = LOG_ZERO
# and the zero children counter
self.zero_children = 0
# computing the log value
self.log_val = 0.0
for node in self.children:
if node.log_val <= LOG_ZERO:
self.zero_children += 1
self.log_val += node.log_val
#
# numba
# self.log_val = \
# eval_prod_node(numpy.array([child.log_val
# for child in self.children]))
# return self.log_val
def mpe_eval(self):
"""
Just redirecting normal evaluation
"""
self.eval()
def backprop(self):
"""
WRITEME
"""
if self.log_der > LOG_ZERO:
for child in self.children:
log_der = LOG_ZERO
# checking the bit
if self.zero_children == 0:
log_der = self.log_val - child.log_val
elif self.zero_children == 1 and child.log_val <= LOG_ZERO:
log_der = sum([node.log_val for node in self.children
if node != child])
# log_der = 0.0
# for node in self.children:
# if node != child:
# log_der += node.log_val
# adding this parent value
log_der += self.log_der
# if child.log_der <= LOG_ZERO:
# if IS_LOG_ZERO(child.log_der):
if child.log_der <= LOG_ZERO:
# first assignment
child.log_der = log_der
else:
child.log_der = numpy.logaddexp(child.log_der,
log_der)
def mpe_backprop(self):
"""
WRITEME
"""
if self.log_der > LOG_ZERO:
for child in self.children:
log_der = LOG_ZERO
# checking the bit
if self.zero_children == 0:
log_der = self.log_val - child.log_val
elif self.zero_children == 1 and child.log_val <= LOG_ZERO:
log_der = sum([node.log_val for node in self.children
if node != child])
# adding this parent value
log_der += self.log_der
# updating child log der with the max instead of sum
child.log_der = max(child.log_der, log_der)
def backprop2(self):
"""
WRITEME
"""
# if more than one child has a zero value, cannot propagate
if self.log_val <= LOG_ZERO:
count = 0
for child in self.children:
if child.log_val <= LOG_ZERO:
count += 1
if count > 1:
return
# only when needed
if self.log_der > LOG_ZERO:
for child in self.children:
# print('b child val', child.log_val, child.log_der)
if child.log_val <= LOG_ZERO:
# print('child log zero')
# shall loop on other children
# maybe this is memory consuming, but shall be faster
# going to numpy array shall be faster
log_der = sum([node.log_val for node in self.children
if node.log_val > LOG_ZERO]) + \
self.log_der
if child.log_der <= LOG_ZERO:
# print('first log, add', log_der)
child.log_der = log_der
else:
child.log_der = numpy.logaddexp(child.log_der,
log_der)
# print('not first log, added', child.log_der)
# if it is 0 there is no point updating children
elif self.log_val > LOG_ZERO:
# print('par val not zero')
if child.log_der <= LOG_ZERO:
child.log_der = self.log_der + \
self.log_val - \
child.log_val
# print('child val not zero', child.log_der)
else:
child.log_der = numpy.logaddexp(child.log_der,
self.log_der +
self.log_val -
child.log_val)
# print('child log der not first', child.log_der)
def is_decomposable(self):
decomposable = True
whole = set()
for child in self.children:
child_scope = child.var_scope
for scope_var in child_scope:
if scope_var in whole:
decomposable = False
break
else:
whole.add(scope_var)
else:
continue
break
if whole != self.var_scope:
decomposable = False
return decomposable
def n_children(self):
"""
WRITEME
"""
return len(self.children)
def node_type_str(self):
return PROD_NODE_SYM
def node_short_str(self):
children_str = " ".join(["{id}".format(id=node.id)
for node in self.children])
return "{type} {id} [{children}]".format(type=self.node_type_str(),
id=self.id,
children=children_str)
def __repr__(self):
base = Node.__repr__(self)
children_info = [node.id
for node in self.children]
msg = ''
for id in children_info:
msg += ' ({id})'.format(id=id)
return 'Prod Node {line1}\n{line2}'.format(line1=base,
line2=msg)
class CategoricalIndicatorNode(Node):
"""
WRITEME
"""
def __init__(self, var, var_val):
"""
WRITEME
"""
Node.__init__(self, frozenset({var}))
self.var = var
self.var_val = var_val
def eval(self, input):
"""
WRITEME
"""
obs = input[self.var]
self.log_der = LOG_ZERO
if obs == MARG_IND:
self.log_val = 0.
elif obs == self.var_val:
self.log_val = 0.
else:
self.log_val = LOG_ZERO
def mpe_eval(self, obs):
"""
Just redirecting normal evaluation
"""
self.eval(obs)
def n_children(self):
return 0
def node_type_str(self):
return INDICATOR_NODE_SYM
def node_short_str(self):
return "{type} {id} <{var}> {val}".format(type=self.node_type_str(),
id=self.id,
var=self.var,
val=self.var_val)
def __repr__(self):
base = Node.__repr__(self)
return """Indicator Node {line1} var: {var} val: {val}""".format(line1=base,
var=self.var,
val=self.var_val)
class CLTreeNode(Node):
"""
An input node representing a Chow-Liu Tree over a set of r.v.
"""
def __init__(self,
vars,
var_values,
data,
factors=None,
alpha=0.1):
"""
vars = the sequence of feature ids
var_values = the sequence of feature values
alpha = smoothing parameter
data = the data slice (2d ndarray) upon which to grow a cltree
factors = the already computed factors (this is when the model has already been conputed)
"""
Node.__init__(self, frozenset(vars))
self.vars = numpy.array(vars)
self._alpha = alpha
#
# assuming all variables to be homogeneous
# TODO: generalize this
self._n_var_vals = var_values[0]
self.var_values = numpy.array(var_values)
#
# assuming data is never None
self._data = data
self._cltree = CLTree(data,
features=self.vars,
n_feature_vals=self._n_var_vals,
feature_vals=self.var_values,
alpha=alpha,
sparse=True,
mem_free=True)
def smooth_probs(self, alpha, data=None):
"""
The only waya to smooth here is to rebuild the whole tree
"""
self._alpha = alpha
if data is not None:
self._data = data
# else:
# raise ValueError('Cannot smooth without data')
self._cltree = CLTree(data=self._data,
features=self.vars,
n_feature_vals=self._n_var_vals,
feature_vals=self.var_values,
alpha=alpha,
# copy_mi=False,
sparse=True,
mem_free=True)
def eval(self, obs):
"""
Dispatching inference to the cltree
"""
#
# TODO: do something for the derivatives
self.log_der = LOG_ZERO
# self.log_val = self._cltree.eval(obs)
self.log_val = self._cltree.eval_fact(obs)
def mpe_eval(self, obs):
"""
WRITEME
"""
raise NotImplementedError('MPE inference not yet implemented')
def n_children(self):
return len(self.vars)
def node_type_str(self):
return CHOW_LIU_TREE_NODE_SYM
def node_short_str(self):
vars_str = ','.join([var for var in self.vars])
return "{type} {id}" +\
" <{vars}>" +\
" {tree} {factors}".format(type=self.node_type_str(),
id=self.id,
vars=vars_str,
tree=self._cltree.tree_repr(),
factors=self._cltree.factors_repr())
def __repr__(self):
"""
WRITEME
"""
base = Node.__repr__(self)
return ("""CLTree Smoothed Node {line1}
vars: {vars} vals: {vals} tree:{tree}""".
format(line1=base,
vars=self.vars,
vals=self._n_var_vals,
tree=self._cltree.tree_repr()))
@numba.njit
def eval_numba(obs, vars):
if obs == MARG_IND:
return 0.
else:
return vars[obs]
class CategoricalSmoothedNode(Node):
"""
WRITEME
"""
def __init__(self, var, var_values, alpha=0.1,
freqs=None, data=None, instances=None):
"""
WRITEME
"""
Node.__init__(self, frozenset({var}))
self.var = var
self.var_val = var_values
# building storing freqs
if data is None:
if freqs is None:
self._var_freqs = [1 for i in range(var_values)]
else:
self._var_freqs = freqs[:]
else:
# better checking for numpy arrays shape
assert data.shape[1] == 1
(freqs_dict,), _features = dataset.data_2_freqs(data)
self._var_freqs = freqs_dict['freqs']
# computing the smoothed ll
self._var_probs = CategoricalSmoothedNode.smooth_ll(self._var_freqs[:],
alpha)
# storing instance ids (it is a list)
self._instances = instances
def smooth_ll(freqs, alpha):
"""
WRITEME
"""
vals = len(freqs)
freqs_sum = sum(freqs)
for i, freq in enumerate(freqs):
log_freq = LOG_ZERO
if (freq + alpha) > 0.:
log_freq = log(freq + alpha)
freqs[i] = (log_freq -
log(freqs_sum + vals * alpha))
# return freqs
return numpy.array(freqs)
def smooth_freq_from_data(data, alpha):
"""
WRITEME
"""
# data here shall have only one feature
assert data.shape[1] == 1
(freqs_dict,), _features = dataset.data_2_freqs(data)
return CategoricalSmoothedNode.smooth_ll(freqs_dict['freqs'], alpha)
def smooth_probs(self, alpha, data=None):
"""
WRITEME
"""
if data is None:
# var_values = len(self._var_freqs)
smooth_probs = \
CategoricalSmoothedNode.smooth_ll(self._var_freqs[:],
alpha)
else:
# slicing in two different times to preserve the 2 dims
data_slice_var = data[:, [self.var]]
# checking to be sure: it shall be a list btw
if isinstance(self._instances, list):
data_slice = data_slice_var[self._instances]
else:
data_slice = data_slice_var[list(self._instances)]
# print('SLICE', data_slice_var, data_slice)
smooth_probs = \
CategoricalSmoothedNode.smooth_freq_from_data(data_slice,
alpha)
self._var_probs = smooth_probs
def eval(self, input):
"""
WRITEME
"""
obs = input[self.var]
self.log_der = LOG_ZERO
# if obs == MARG_IND:
# self.log_val = 0.
# else:
# self.log_val = self._var_probs[obs]
self.log_val = eval_numba(obs, self._var_probs)
def mpe_eval(self, obs):
"""
Just redirecting normal evaluation, it surely is the one associated
to the observed value
"""
self.eval(obs)
def n_children(self):
return 0
def node_type_str(self):
return DISCRETE_VAR_NODE_SYM
def node_short_str(self):
freqs_str = " ".join(self._var_freqs)
return "{type} {id}" +\
" <{var}>" +\
" {freqs}".format(type=self.node_type_str(),
id=self.id,
vars=self.var,
freqs=freqs_str)
def __repr__(self):
base = Node.__repr__(self)
return ("""Categorical Smoothed Node {line1}
var: {var} val: {val} [{ff}] [{ll}]""".
format(line1=base,
var=self.var,
val=len(self._var_freqs),
ff=[freq for freq in self._var_freqs],
ll=[ll for ll in self._var_probs]))
def var_values(self):
"""
WRITEME
"""
return len(self._var_freqs)
class ConstantNode(Node):
"""
A node emitting always a constant signal
"""
def __init__(self, scope, const_value=0.0):
"""
WRITEME
"""
Node.__init__(self, frozenset(scope))
self.log_val = const_value
self.log_der = LOG_ZERO
def n_children(self):
return 0
def node_type_str(self):
return CONSTANT_NODE_SYM
def eval(self, obs):
"""
EMPTY
"""
return
def mpe_eval(self, obs):
"""
EMPTY
"""
return
def node_short_str(self):
return "{type} {id} <{var}> {val}".format(type=self.node_type_str(),
id=self.id,
var=self.scope,
val=self.log_val)
def __repr__(self):
base = Node.__repr__(self)
return """Constant Node {line1} var: {var} val: {val}""".format(line1=base,
var=self.scope,
val=self.log_val)
def sample_from_leaf(leaf, instances_vec, instance_id, rand_gen=None, feature_values=None):
"""
Samples an instance according to the leaf distribution and store
those values into an array
"""
#
# discriminating on the leaf type:
if isinstance(leaf, CategoricalSmoothedNode):
if rand_gen is None:
rand_gen = numpy.random.RandomState(RND_SEED)
# print(leaf._var_probs)
sampled_value = rand_gen.choice(leaf.var_val, p=numpy.exp(leaf._var_probs))
instances_vec[instance_id, leaf.var] = sampled_value
elif isinstance(leaf, CategoricalIndicatorNode):
assert feature_values is not None
vars_id = numpy.sum(feature_values[:leaf.var])
#
# and setting all others to 0
instances_vec[instance_id, vars_id:vars_id + feature_values[leaf.var]] = 0
#
# and the leaf value to 1
instances_vec[instance_id, vars_id + leaf.var_val] = 1
elif isinstance(leaf, CLTreeNode):
raise ValueError('CLT sampling not implemented yet')
else:
raise ValueError('Unrecognized leaf type')
return instances_vec
def mpe_states_from_leaf(node, only_first_max=False):
"""
Getting the mpe assignment for a leaf node. If it is an indicator node
it returns its value, otherwise the mode of the univariate distribution it represents.
TODO: extending it to multivariate nodes
"""
mpe_state_dict = defaultdict(list)
#
# discriminating on the leaf type:
if isinstance(node, CategoricalSmoothedNode):
max_val = numpy.max(node._var_probs)
for i, val in enumerate(node._var_probs):
if numpy.isclose(max_val, val):
mpe_state_dict[node.var].append(i)
if only_first_max:
break
elif isinstance(node, CategoricalIndicatorNode):
mpe_state_dict[node.var].append(node.var_val)
elif isinstance(node, CLTreeNode):
raise ValueError('CLT sampling not implemented yet')
else:
raise ValueError('Unrecognized leaf type')
# print(mpe_state_dict)
return mpe_state_dict
| 28,838
| 28.159757
| 97
|
py
|
spyn-repr
|
spyn-repr-master/spn/linked/weight_learning.py
|
import numpy
from scipy.misc import logsumexp
import numba
from .nodes import SumNode
from .nodes import ProductNode
from .nodes import CategoricalSmoothedNode
from .nodes import CategoricalIndicatorNode
from .nodes import CLTreeNode
from ..factory import retrieve_children_parent_assoc
from collections import deque
from spn import LOG_ZERO
from spn import MARG_IND
RAND_SEED = 1337
@numba.jit
def evaluate_indicator_node(node, data):
"""
WRITEME
"""
n_instances = data.shape[0]
lls = numpy.zeros(n_instances)
for i in range(n_instances):
if data[i, node.var] != node.var_val and data[i, node.var] != MARG_IND:
lls[i] = LOG_ZERO
return lls
@numba.jit
def evaluate_categorical_node(node, data):
"""
WRITEME
"""
n_instances = data.shape[0]
lls = numpy.zeros(n_instances)
for i in range(n_instances):
obs_val = data[i, node.var]
if obs_val == MARG_IND:
lls[i] = 0.
else:
lls[i] = node._var_probs[obs_val]
return lls
@numba.jit
def evaluate_product_node(node, eval_assoc, n_instances):
"""
WRITEME
"""
lls = numpy.zeros(n_instances)
for child in node.children:
lls += eval_assoc[child]
return lls
@numba.jit
def evaluate_sum_node(node, eval_assoc, n_instances):
"""
WRITEME
"""
n_children = len(node.children)
ll_hats = numpy.zeros((n_instances, n_children))
log_weights = numpy.zeros(n_children)
for i, (child, log_weight) in enumerate(zip(node.children,
node.log_weights)):
log_weights[i] = log_weight
ll_hats[:, i] = eval_assoc[child]
lls = logsumexp(ll_hats + log_weights[numpy.newaxis, :], axis=1)
return lls
def evaluate_node(node, data, eval_assoc):
"""
Dispatching node evaluation by node type
"""
n_instances = data.shape[0]
if isinstance(node, CategoricalIndicatorNode):
return evaluate_indicator_node(node, data)
elif isinstance(node, CategoricalSmoothedNode):
return evaluate_categorical_node(node, data)
elif isinstance(node, SumNode):
return evaluate_sum_node(node, eval_assoc, n_instances)
elif isinstance(node, ProductNode):
return evaluate_product_node(node, eval_assoc, n_instances)
def ml_weights_estimation_posterior(node,
eval_assoc):
#
# retrieve all children and their past evaluations
children_evals = numpy.array([numpy.sum(numpy.exp(eval_assoc[child]))
for child in node.children])
ml_weights = children_evals / numpy.sum(children_evals)
return ml_weights
def ml_weights_estimation_posterior_I(node,
eval_assoc):
#
# retrieve all children and their past evaluations
children_evals = numpy.concatenate([numpy.exp(eval_assoc[child])[..., None]
for child in node.children], axis=1)
ml_weights = children_evals / numpy.sum(children_evals, axis=1)[..., None]
return ml_weights.mean(axis=0) # / n_instances
def ml_weights_estimation_posterior_II(node,
eval_assoc):
#
# retrieve all children and their past evaluations
children_evals = numpy.concatenate([numpy.exp(eval_assoc[child])[..., None]
for child in node.children], axis=1)
children_evals = children_evals * numpy.array(node.weights)[None, :]
ml_weights = children_evals / numpy.sum(children_evals, axis=1)[..., None]
return ml_weights.mean(axis=0) # / n_instances
def ml_weights_estimation_counts(node,
eval_assoc):
children_evals = [eval_assoc[child] for child in node.children]
children_evals = numpy.concatenate([e.reshape(e.shape[0], 1)
for e in children_evals],
axis=1)
children_attr = numpy.argmax(children_evals, axis=1)
children_counts = numpy.bincount(children_attr,
minlength=children_evals.shape[1])
ml_weights = children_counts / sum(children_counts)
return ml_weights
def ml_evaluation(spn,
data,
nodes_to_eval=None,
child_assoc=None,
update_weights=False,
nodes_to_skip_updating=None,
weight_estimator=ml_weights_estimation_posterior_II):
"""
Estimating the weights in a linked spn by traversing it bottom up
Nodes_to_eval a set of nodes to evaluate (after being evaluated the algo stops)
"""
n_instances = data.shape[0]
if child_assoc is None:
child_assoc = retrieve_children_parent_assoc(spn)
else:
child_assoc = dict(child_assoc)
#
#
if nodes_to_eval is None:
nodes_to_eval = set()
if nodes_to_skip_updating is None:
nodes_to_skip_updating = set()
nodes_evals = {}
layer_nodes_evals = {}
weight_updates = {}
#
# remove one node from memory if it has no more parents to evaluate
def remove_child_parent(parent_node):
if hasattr(parent_node, 'children') and parent_node.children:
for child in parent_node.children:
child_assoc[child].remove(parent_node)
if not child_assoc[child]:
layer_nodes_evals.pop(child)
#
# proceeding one layer at a time
for layer in spn.bottom_up_layers():
stop = False
for node in layer.nodes():
if update_weights:
#
# before evaluating, is this a sum node? can we evaluate it?
if isinstance(node, SumNode) and node in nodes_to_eval:
# n_instances)
ml_weights = weight_estimator(node, layer_nodes_evals)
# node.set_weights(ml_weights)
weight_updates[node] = ml_weights
instances_evals = evaluate_node(node, data, layer_nodes_evals)
#
# can we remove its children values from memory?
remove_child_parent(node)
layer_nodes_evals[node] = instances_evals
#
# if is to eval, store the evaltuation to return it later
if node in nodes_to_eval:
nodes_evals[node] = instances_evals
nodes_to_eval.remove(node)
#
# if no more nodes to eval, exit
if not nodes_to_eval:
stop = True
break
if stop:
break
return nodes_evals, weight_updates
def random_weight_estimation(nodes,
data,
rand_gen=None):
"""
Setting random weights to sum nodes
"""
if rand_gen is None:
rand_gen = numpy.random.RandomState(RAND_SEED)
for node in nodes:
#
# checking for correct type
assert isinstance(node, SumNode)
#
# random weights
rand_weights = rand_gen.rand(len(node.children))
rand_weights /= numpy.sum(rand_weights)
node.set_weights(rand_weights)
return nodes
@numba.jit
def estimate_counts_numba(data,
instance_ids,
feature_ids,
feature_vals,
estimated_counts=None):
"""
Assuming that estimated_counts is a numpy 2D array
(features x max(feature_val))
"""
if estimated_counts is None:
n_features = len(feature_ids)
max_feature_val = max(feature_vals)
estimated_counts = numpy.zeros((n_features, max_feature_val))
#
# actual counting
for feature_id in feature_ids:
for instance_id in instance_ids:
estimated_counts[feature_id, data[instance_id, feature_id]] += 1
return estimated_counts
@numba.jit
def smooth_ll_parameters(estimated_counts,
ll_frequencies,
instance_ids,
feature_ids,
feature_vals,
alpha):
"""
WRITEME
"""
tot_counts = len(instance_ids)
for feature_id in feature_ids:
feature_val = feature_vals[feature_id]
smooth_tot_ll = numpy.log(tot_counts + feature_val * alpha)
for val in range(feature_val):
smooth_n = estimated_counts[feature_id, val] + alpha
smooth_n_ll = numpy.log(smooth_n) if smooth_n > 0.0 else LOG_ZERO
ll_frequencies[feature_id, val] = smooth_n_ll - smooth_tot_ll
return ll_frequencies
def random_weight_ml_estimation(nodes,
data,
alpha=0.0,
rand_gen=None):
"""
WRITEME
"""
if rand_gen is None:
rand_gen = numpy.random.RandomState(RAND_SEED)
for node in nodes:
#
# checking for correct type
assert isinstance(node, SumNode)
#
# random clustering
#
# estimate data on each clustering partition
def bootstrap_weight_ml_estimation(nodes,
data,
alpha=0.0,
bootstrap_rate=0.5):
"""
WRITEME
"""
| 9,524
| 27.951368
| 83
|
py
|
spyn-repr
|
spyn-repr-master/spn/linked/learning.py
|
from spn.linked.spn import Spn
from spn.linked.nodes import SumNode
from spn.linked.nodes import ProductNode
from spn.linked.nodes import CategoricalSmoothedNode
from spn.linked.layers import SumLayer
from spn.linked.layers import ProductLayer
from spn.linked.layers import CategoricalSmoothedLayer
from spn.factory import SpnFactory
from spn import RND_SEED
import itertools
import numpy
import scipy
import scipy.spatial.distance
from collections import deque
from sklearn.cluster import spectral_clustering
from sklearn.manifold import spectral_embedding
try:
from time import perf_counter
except:
from time import time
perf_counter = time
import sys
import gc
#
# Some util classes
#
class DataSlice(object):
"""
A little util class for storing
the sets of indexes for the instances and features
considered
"""
class_counter = 0
@classmethod
def reset_id_counter(cls):
"""
WRITEME
"""
DataSlice.class_counter = 0
@classmethod
def whole_slice(cls,
n_instances,
n_features):
# lists can be as good as sets atm
# instances = {i for i in range(n_instances)}
# features = {i for i in range(n_features)}
instances = [i for i in range(n_instances)]
features = [i for i in range(n_features)]
return DataSlice(instances, features)
def __init__(self,
instances=None,
features=None):
self.instances = instances
self.features = features
self.id = DataSlice.class_counter
DataSlice.class_counter += 1
class NodeBuild(object):
"""
WRITEME
"""
def __init__(self,
id,
children_ids,
children_weights=None):
self.id = id
self.children_ids = children_ids
self.children_weights = children_weights
class SpectralStructureLearner(object):
"""
WRITEME
"""
def __init__(self,
sigma=0.1,
rand_gen=None):
"""
WRITEME
"""
self._sigma = sigma
# initing the random generator
if rand_gen is not None:
self._rand_gen = rand_gen
else:
self._rand_gen = numpy.random.RandomState(RND_SEED)
def gaussian_kernel(self, instance_1, instance_2):
"""
WRITEME - like NG's
e ^ (-(||s_1-s_2||^2)/2*sigma**2)
"""
return numpy.exp(- (numpy.linalg.norm(instance_1 - instance_2, 2) ** 2)
/ (2 * self._sigma ** 2))
def estimate_counts(feature):
"""
WRITEME
"""
# seems like scipy is converting them to float
# this is potentially highly memory consuming
# if not done inplace
feature = feature.astype('int8', copy=False)
# frequency counting
feature_count = numpy.bincount(feature)
# getting the number of different values seen
feature_vals = feature_count.shape[0]
# this can be tricky: have to cope with the eventuality
# of seeing only zeros
if feature_vals < 2:
feature_vals = 2
feature_count = numpy.append(feature_count, [0], 0)
return feature_count, feature_vals
def g_test_val(self, instance_1, instance_2):
"""
WRITEME
"""
# instance_1 = instance_1.astype('int8', copy=False)
# instance_2 = instance_2.astype('int8', copy=False)
n_samples = instance_1.shape[0]
# print(instance_1, instance_2, n_samples)
# feature_tot_1 = numpy.bincount(instance_1)
# feature_tot_2 = numpy.bincount(instance_2)
# feature_vals_1 = feature_tot_1.shape[0]
# feature_vals_2 = feature_tot_2.shape[0]
feature_tot_1, feature_vals_1 = \
SpectralStructureLearner.estimate_counts(instance_1)
feature_tot_2, feature_vals_2 = \
SpectralStructureLearner.estimate_counts(instance_2)
# print(feature_tot_1,
# feature_vals_1,
# feature_tot_2,
# feature_vals_2)
# computing the contingency table
# shall I use numpy.histogram2d?
# or the pull request numpy.table?
co_occ_matrix = numpy.zeros((feature_vals_1, feature_vals_2),
dtype='int64')
for val_1, val_2 in zip(instance_1, instance_2):
co_occ_matrix[val_1][val_2] += 1
# print(co_occ_matrix)
# expected frequencies
exp_freqs = numpy.outer(feature_tot_1, feature_tot_2) / n_samples
# print(exp_freqs)
g_val_matrix = \
numpy.where(co_occ_matrix > 0,
co_occ_matrix * numpy.log(co_occ_matrix / exp_freqs),
0.0)
# g_val_matrix = co_occ_matrix * numpy.log(co_occ_matrix / exp_freqs)
return g_val_matrix.sum() * 2
def g_test(self, instance_1, instance_2, p_value):
"""
Applying a G-test
"""
# extracting counts
# TODO: re-eng this in a cleaner way: g_test_val does this
# computation as well
feature_tot_1, feature_vals_1 = \
SpectralStructureLearner.estimate_counts(instance_1)
feature_tot_2, feature_vals_2 = \
SpectralStructureLearner.estimate_counts(instance_2)
# computing the deegres of freedon
feature_nonzero_1 = numpy.count_nonzero(feature_tot_1)
feature_nonzero_2 = numpy.count_nonzero(feature_tot_2)
dof = (feature_nonzero_1 - 1) * (feature_nonzero_2 - 1)
# computing the G val
g_val = self.g_test_val(instance_1, instance_2)
# print('GSTATS', g_val, dof, p_value, 2 * dof * p_value + 0.001)
# testing against p value
return g_val < 2 * dof * p_value + 0.001
def compute_similarity_matrix_pair(self,
data_slice,
metric=gaussian_kernel):
"""
From a matrix m x n creates a kernel matrix
according to a metric of size m x m
(it shall be symmetric, and (semidefinite) positive)
** MANUAL **
"""
n_instances = data_slice.shape[0]
print('data slice with {0} instances'.format(n_instances))
# allocating the matrix
similarity_matrix = numpy.zeros((n_instances, n_instances))
# caching the slices
instances = [instance for instance in data_slice]
# computing the metric pairwise, just once
for i, j in itertools.combinations(range(n_instances), 2):
sys.stdout.write('\rsimilarity between entities {0}-{1}'.
format(i, j))
sys.stdout.flush()
# from index tuples to instances
# instance_slice_1 = data_slice[i, :]
# instance_slice_2 = data_slice[j, :]
instance_slice_1 = instances[i]
instance_slice_2 = instances[j]
# computing the metric on them
sim_i_j = metric(instance_slice_1, instance_slice_2)
# filling the matrix
similarity_matrix[i, j] = sim_i_j
similarity_matrix[j, i] = sim_i_j
# then for each value with itself
for i, instance_slice in enumerate(data_slice):
sys.stdout.write('\rsimilarity between entities {0}-{1}'.
format(i, i))
sys.stdout.flush()
sim_i_i = metric(instance_slice,
instance_slice)
similarity_matrix[i, i] = sim_i_i
print('\n')
return similarity_matrix
def compute_similarity_matrix(self,
data_slice,
metric='gaussian'):
"""
From a matrix m x n creates a kernel matrix
according to a metric of size m x m
(it shall be symmetric, and (semidefinite) positive)
** USES SCIPY **
"""
if metric == 'gaussian':
pairwise_dists = \
scipy.spatial.distance.squareform(
scipy.spatial.distance.pdist(data_slice,
'sqeuclidean'))
similarity_matrix = scipy.exp(-pairwise_dists /
(2 * self._sigma ** 2))
elif metric == 'gtest':
similarity_matrix = \
scipy.spatial.distance.squareform(
scipy.spatial.distance.pdist(data_slice,
self.g_test_val))
return similarity_matrix
#
# Gens' variants for splitting/clustering
#
def greedy_split_features(self,
data_slice,
slice_ids,
g_factor,
rand_gen):
"""
WRITEME
"""
# assuming the features are here the rows of data_slice
# equivalently len(feature_ids)
n_features = data_slice.shape[0]
# copying for manipulating it
feature_ids = [i for i in range(n_features)]
# print('FEATS', slice_ids, feature_ids, n_features)
# the split will be binary
dependent_features = []
# extracting one feature at random
# this can be done more efficiently with a set TODO
rand_feature_id = rand_gen.randint(0, n_features)
feature_ids.remove(rand_feature_id)
dependent_features.append(slice_ids[rand_feature_id])
# print('REM', feature_ids)
# greedy bfs searching
features_to_process = deque()
features_to_process.append(rand_feature_id)
while features_to_process:
# get one
current_feature_id = features_to_process.popleft()
# print('curr FT', current_feature_id)
# features to remove later
features_to_remove = deque()
for other_feature_id in feature_ids:
# extract the feature slices
feature_1 = data_slice[current_feature_id, :]
feature_2 = data_slice[other_feature_id, :]
# print('---->', current_feature_id,
# other_feature_id)
# print(feature_1, feature_2)
# print('G_VAL', self.g_test_val(feature_1,
# feature_2))
# apply a G-test
if not self.g_test(feature_1, feature_2, g_factor):
# print('GTEST')
features_to_remove.append(other_feature_id)
dependent_features.append(slice_ids[other_feature_id])
features_to_process.append(other_feature_id)
# now removing
# even now a set would be much more efficient
for feature_id in features_to_remove:
feature_ids.remove(feature_id)
# translating remaining features
other_features = [slice_ids[feature_id] for feature_id in feature_ids]
clustering = [dependent_features, other_features]
return clustering
#
# Cut related methods
# TODO: make them inner functions
#
def diag_sum(self, W):
"""
WRITEME
"""
# D = numpy.zeros(W.shape)
# for i, row in enumerate(W):
# D[i, i] = row.sum()
# return D
return numpy.diag(numpy.sum(W, axis=1))
def cut_val_f(self, W, f):
"""
W adiacency matrix
f cluster assignement
"""
# compute diagonal matrix D
D = self.diag_sum(W)
print(D)
print(D - W)
print(numpy.dot(D - W, f))
return numpy.dot(f.T, numpy.dot(D - W, f))
def cut_val_w(self, W, f):
"""
W adiacency matrix
f cluster assignement
"""
# compute diagonal matrix D
W_f = W[f == 1, :]
W_sliced = W_f[:, f == -1]
return numpy.sum(W_sliced)
def vol(self, W, f, index):
"""
WRITEME
"""
return numpy.sum(W[f == index, :])
def f_clu(self, W, f):
vol_f = self.vol(W, f, 1)
vol_not_f = self.vol(W, f, -1)
return numpy.where(f == 1, f / vol_f, f / vol_not_f)
def ncut_val(self, W, f):
"""
W adiacency matrix
f cluster assignement
returns the normalized cut
"""
# compute diagonal matrix D
D = self.diag_sum(W)
return (numpy.dot(f.T, numpy.dot(D - W, f)) /
numpy.dot(f.T, numpy.dot(D, f)))
def ncut(self, W, f):
"""
simpler version
"""
cut = self.cut_val_w(W, f)
print('CUT', cut)
vol_f = self.vol(W, f, 1)
vol_not_f = self.vol(W, f, -1)
return (cut * (1. / vol_f + 1. / vol_not_f))
#
# clustering management
#
def f_assignment_from_clusters(self, W, clustering):
"""
from [[ids_clu_1], ..., [ids_clu_2]] to f
"""
f = self.f_from_clusters(clustering)
# computing normalization by the volume vals of W
return self.f_clu(W, f)
def f_from_clusters(self, clustering):
"""
from [[ids_clu_1], ..., [ids_clu_2]] to f
"""
# Assuming the clustering to be binary
# TODO: make it more general
cluster_1 = clustering[0]
cluster_not_1 = clustering[1]
# allocate the f assignment
f = numpy.ones(len(cluster_1) + len(cluster_not_1))
# setting now the -1s
# f[list(cluster_not_1)] = -1
f[cluster_not_1] = -1
return f
def f_from_labels(self, labels):
"""
from [[ids_clu_1], ..., [ids_clu_2]] to f
"""
# Assuming the clustering to be binary
# so labels is a vector of zeros and ones
# TODO: make it more general
# allocate the f assignment
f = numpy.ones(len(labels))
# setting now the -1s
# f[list(cluster_not_1)] = -1
f[labels == 1] = -1
return f
def from_labels_to_clustering(self, labels, ids):
"""
WRITEME
"""
clustering = {}
for label, id in zip(labels, ids):
if label in clustering:
# clustering[label].add(i)
clustering[label].append(id)
else:
# adding a new cluster
# clustering[label] = {i}
clustering[label] = [id]
return list(clustering.values())
#
# verifying clustering quality
#
def is_clustering_valid_ncut(self,
clustering,
W=None,
threshold=0.3):
"""
Computing the NCUT by hand
"""
valid = False
if W is not None:
# computing the ncut
f = self.f_from_labels(clustering)
ncut = self.ncut(W, f)
# test it against the threshold
if ncut < threshold:
valid = True
print('NCUT: {0} RHO: {1} -> valid'.format(ncut, threshold))
else:
print('NCUT: {0} RHO: {1} -> not valid'.
format(ncut, threshold))
else:
# here goes Gens Test TODO
raise NotImplemented('You have to implement Gens\'!')
return valid
def is_clustering_valid_gtest(self,
data_slice,
clustering,
W=None,
threshold=10):
"""
For the partitioning into A and B in W, each w_ij shall pass the gtest
"""
valid = False
if W is not None:
# always assuming just two partitions
# extract weights cut
W_cut = []
# get the dof for the features
A_set = [i for i, label in enumerate(clustering) if label == 0]
B_set = [i for i, label in enumerate(clustering) if label == 1]
DOF_cut = []
instances = [instance for instance in data_slice]
for i, j in zip(A_set, B_set):
feature_tot_1, feature_vals_1 = \
SpectralStructureLearner.estimate_counts(instances[i])
feature_tot_2, feature_vals_2 = \
SpectralStructureLearner.estimate_counts(instances[j])
# computing the deegres of freedon
feature_nonzero_1 = numpy.count_nonzero(feature_tot_1)
feature_nonzero_2 = numpy.count_nonzero(feature_tot_2)
DOF_cut.append(
(feature_nonzero_1 - 1) * (feature_nonzero_2 - 1))
W_cut.append(W[i, j])
W_cut = numpy.array(W_cut)
G_test = 2 * numpy.array(DOF_cut) * threshold + 0.001
if numpy.all(W_cut < G_test):
valid = True
print('W_cut: {0} G_test: {1} -> valid'.format(W_cut, G_test))
else:
print('W_cut: {0} G_test: {1} -> not valid'.
format(W_cut, G_test))
else:
# here goes Gens Test TODO
raise NotImplemented('You have to implement Gens\'!')
return valid
def spectral_clustering(self,
data_slice,
ids,
k_components,
affinity_metric='gaussian',
cluster_method=None,
norm_lap=False,
validity_check=False,
threshold=None,
pair=False,
rand_gen=None):
"""
WRITEME
"""
if rand_gen is None:
rand_gen = self._rand_gen
#
# create the affinity matrix first
#
print('Computing affinity matrix for measure', affinity_metric)
aff_start_t = perf_counter()
if pair:
affinity_metric_func = None
if affinity_metric == 'gaussian':
affinity_metric_func = self.gaussian_kernel
else:
affinity_metric_func = self.g_test_val
affinity_matrix = \
self.compute_similarity_matrix_pair(data_slice,
affinity_metric_func)
else:
affinity_matrix = \
self.compute_similarity_matrix(data_slice,
affinity_metric)
aff_end_t = perf_counter()
print('Affinity metric computed! (in {0} secs)'.
format(aff_end_t - aff_start_t))
print(affinity_matrix)
n_instances = data_slice.shape[0]
labels = None
clustering = None
valid = None
#
# garbage collecting
#
gc.collect()
#
# checking for the rank of the square matrix
# cannot calculate eigenvectors where #instances = #clusters
#
if n_instances == k_components:
# the split can be easily computed
spec_start_t = perf_counter()
labels = [i for i in range(n_instances)]
print('One cluster for each element ({0} == {1})'
.format(k_components, n_instances))
spec_end_t = perf_counter()
else:
#
# if no cluster method is specified, use sklearn default
# sklearn.cluster.SpectralClustering (which uses kmeans)
#
if cluster_method is None:
print('Directly using sklearn.cluster.spectral_clustering')
spec_start_t = perf_counter()
labels = spectral_clustering(affinity=affinity_matrix,
# ?
n_clusters=k_components,
n_components=k_components,
eigen_solver='lobpcg',
# affinity='precomputed',
random_state=rand_gen)
spec_end_t = perf_counter()
else:
#
# projection in the eigen space
#
spec_start_t = perf_counter()
eigen_start_t = perf_counter()
eigen_data_slice = \
spectral_embedding(adiacency=affinity_matrix,
n_components=k_components,
# pyamg?
eigen_solver='lobpcg',
random_state=rand_gen,
norm_laplacian=norm_lap)
eigen_end_t = perf_counter()
print('Embedded in the eigenspace in {0}'.
format(eigen_end_t - eigen_start_t))
#
# apply the choosen clustering method now
#
labels = cluster_method(eigen_data_slice)
spec_end_t = perf_counter()
print('labels', labels)
print('ids', ids)
clustering = self.from_labels_to_clustering(labels, ids)
print('clustering', clustering)
print('Clustered {0} objects into {1} clusters'
' (in {2} secs)'.
format(n_instances,
len(clustering),
(spec_end_t - spec_start_t)))
if validity_check:
print('Checking for validity')
valid = self.is_clustering_valid_ncut(labels, # clustering,
affinity_matrix,
threshold=threshold)
valid = self.is_clustering_valid_gtest(data_slice,
labels,
affinity_matrix,
threshold=9)
return labels, clustering, valid
def fit_structure(self,
data,
feature_sizes,
rho=0.9,
k_col_clusters=2,
k_row_clusters=100,
sigma=3.0,
min_instances_slice=50,
alpha=0.1,
pairwise=False):
"""
Gens + Spectral
"""
#
# resetting global default parameters
#
self._sigma = sigma
print('Starting top down structure learning ...')
print('\trho:{0}'.format(rho))
print('\tsigma:{0}'.format(sigma))
print('\tmin-ins:{0}'.format(min_instances_slice))
# a queue containing the data slices to process
slices_to_process = deque()
# a stack for building nodes
node_build_stack = deque()
# a dict to keep track of id->nodes
node_id_assoc = {}
#
tot_num_instances = data.shape[0]
tot_num_features = data.shape[1]
# creating the first slice
whole_slice = DataSlice.whole_slice(tot_num_instances,
tot_num_features)
slices_to_process.append(whole_slice)
# keeping track of leaves
# input_nodes = []
first_run = True
#
# debug stats
#
n_nodes = 0
n_edges = 0
n_weights = 0
spn_start_t = perf_counter()
#
# iteratively process & split slices
#
while slices_to_process:
# process a slice
current_slice = slices_to_process.popleft()
# pointers to the current data slice
current_instances = current_slice.instances
current_features = current_slice.features
current_id = current_slice.id
n_instances = len(current_instances)
n_features = len(current_features)
print('current_id', current_id)
print('current_instances', current_instances)
print('current_features', current_features)
print('n_instances', n_instances)
print('n_features', n_features)
# is there a leaf node or we can split?
if n_features == 1:
print('---> Just one feature, adding a leaf')
(feature_id, ) = current_features
feature_size = feature_sizes[feature_id]
# slicing from the original dataset
slice_data_rows = data[current_instances, :]
current_slice_data = slice_data_rows[:, current_features]
# create the node
leaf_node = \
CategoricalSmoothedNode(var=feature_id,
var_values=feature_size,
data=current_slice_data,
instances=current_instances)
# store links
# input_nodes.append(leaf_node)
leaf_node.id = current_id
node_id_assoc[current_id] = leaf_node
print('Created Smooth Node', leaf_node)
n_nodes += 1
elif (n_instances <= min_instances_slice and n_features > 1):
# splitting the slice on each feature
print('---> Few instances ({0}), splitting on all features'.
format(n_instances))
# child_slices = [DataSlice(current_instances, {feature_id})
# for feature_id in current_features]
child_slices = [DataSlice(current_instances, [feature_id])
for feature_id in current_features]
slices_to_process.extend(child_slices)
# for feature_id in current_features:
# create new slice
# child_slice = DataSlice(current_instances, {feature_id})
# adding it to be processed
# slices_to_process.append(child_slice)
children_ids = [child.id for child in child_slices]
# the building node is a product one
build_prod = NodeBuild(current_id,
children_ids)
node_build_stack.append(build_prod)
# creating the product node
prod_node = ProductNode(var_scope=frozenset(current_features))
prod_node.id = current_id
node_id_assoc[current_id] = prod_node
print('Created Prod Node', prod_node)
print('children', children_ids)
n_nodes += 1
else:
valid_col_split = None
# slicing from the original dataset
slice_data_rows = data[current_instances, :]
current_slice_data = slice_data_rows[:, current_features]
# first run is a split on rows
if first_run:
print('FIRST RUN')
first_run = False
valid_col_split = False
else:
#
# try clustering on cols
#
labels, clustering, valid_col_split = \
self.spectral_clustering(current_slice_data.T,
ids=current_features,
k_components=k_col_clusters,
affinity_metric='gtest',
cluster_method=None,
validity_check=True,
threshold=rho,
norm_lap=False,
pair=pairwise)
#
# testing how good the clustering on features is
if valid_col_split:
# clustering on columns
print('---> Splitting on features')
# computing the remaining features
# always assuming a binary split
dependent_features = clustering[0]
other_features = clustering[1]
# creating two new data slices
first_slice = DataSlice(current_instances,
dependent_features)
second_slice = DataSlice(current_instances,
other_features)
slices_to_process.append(first_slice)
slices_to_process.append(second_slice)
children_ids = [first_slice.id, second_slice.id]
# building and storing a product node
build_prod = NodeBuild(current_id,
children_ids)
node_build_stack.append(build_prod)
prod_node = \
ProductNode(var_scope=frozenset(current_features))
prod_node.id = current_id
node_id_assoc[current_id] = prod_node
print('Created Prod Node', prod_node)
print('children', children_ids)
n_nodes += 1
else:
# clustering on rows
print('---> Splitting on rows')
# at most n_rows clusters
k_row_clusters = min(k_row_clusters,
n_instances - 1)
# sklearn's
labels, clustering, _valid = \
self.spectral_clustering(current_slice_data,
ids=current_instances,
k_components=k_row_clusters,
affinity_metric='gaussian',
cluster_method=None,
norm_lap=False,
pair=pairwise)
# splitting
cluster_slices = [DataSlice(cluster, current_features)
for cluster in clustering]
cluster_slices_ids = [slice.id
for slice in cluster_slices]
cluster_weights = [len(slice.instances) / n_instances
for slice in cluster_slices]
# appending for processing
slices_to_process.extend(cluster_slices)
# building a sum node
build_sum = NodeBuild(current_id,
cluster_slices_ids,
cluster_weights)
node_build_stack.append(build_sum)
sum_node = SumNode(var_scope=frozenset(current_features))
sum_node.id = current_id
node_id_assoc[current_id] = sum_node
print('Created Sum Node', sum_node)
print('children', cluster_slices_ids)
n_nodes += 1
#
# linking the spn graph (parent -> children)
#
print('===> Building tree')
# saving a reference now to the root (the first node)
root_build_node = node_build_stack[0]
root_node = node_id_assoc[root_build_node.id]
print('ROOT', root_node, type(root_node))
# traversing the building stack
# to link and prune nodes
for build_node in reversed(node_build_stack):
# for build_node in node_build_stack:
# current node
current_id = build_node.id
print('BID', current_id)
current_children_ids = build_node.children_ids
current_children_weights = build_node.children_weights
# retrieving corresponding node
node = node_id_assoc[current_id]
# print('retrieved node', node)
# discriminate by type
if isinstance(node, SumNode):
# getting children
for child_id, child_weight in zip(current_children_ids,
current_children_weights):
child_node = node_id_assoc[child_id]
# checking children types as well
if isinstance(child_node, SumNode):
# this shall be pruned
for grand_child, grand_child_weight \
in zip(child_node.children,
child_node.weights):
node.add_child(grand_child,
grand_child_weight *
child_weight)
else:
node.add_child(child_node, child_weight)
elif isinstance(node, ProductNode):
# linking children
for child_id in current_children_ids:
child_node = node_id_assoc[child_id]
if isinstance(child_node, CategoricalSmoothedNode):
pass
# print('SMOOTH CHILD')
# checking for alternating type
if isinstance(child_node, ProductNode):
# this shall be pruned
for grand_child in child_node.children:
node.add_child(grand_child)
else:
node.add_child(child_node)
# print('ADDED SMOOTH CHILD')
#
# building layers
#
print('===> Layering spn')
spn = SpnFactory.layered_linked_spn(root_node)
spn_end_t = perf_counter()
print('Spn learnt in {0} secs'.format(spn_end_t - spn_start_t))
print(spn.stats())
return spn
#
#
#
#
#
#
class CoClusterSlice(object):
"""
WRITEME
"""
def __init__(self, row_id, col_id, data_slice):
self.cc_row_id = row_id
self.cc_col_id = col_id
self.data_slice = data_slice
class ClusterH(object):
"""
WRITEME
"""
def __init__(self, id, elements=None):
self.id = id
self.elements = elements
self.children = []
def __repr__(self):
return "ClusterH: [id: {0} elements: {1} children: {2}]\n".\
format(self.id,
self.elements,
[child.id for child in self.children])
class CoClusteringStructureLearner(object):
"""
WRITEME
"""
def __init__(self):
"""
WRITEME
"""
def read_hierarchy_from_file(self, filename, sep=' '):
"""
Reads in a file in the format of hicc
"""
cluster_assignments = [[int(clust_id)
for clust_id in line.strip().split(sep)]
for line in open(filename, 'r')]
return cluster_assignments
def build_linked_hierarchy(self, clusters):
"""
From the list of list of integer representation
to a linked one
"""
# cluster_h_level = {id: ClusterLevel}
# preallocing a vector of cluster levels
n_levels = len(clusters) + 1
levels = [None for i in range(n_levels)]
# assuming the assignment to be non empty
n_elements = len(clusters[0])
cluster_id = 0
# creating the first level of the hierarchy
root_cluster = ClusterH(id=cluster_id,
elements=[i for i in range(n_elements)])
cluster_id += 1
# building a map to store id-> cluster levels
first_level_map = {root_cluster.id: root_cluster}
i = 0
levels[i] = first_level_map
# for each level
for i in range(n_levels - 1):
# get previous level
previous_level = levels[i]
# get current assignment
level_assign = clusters[i]
# storing this level assoc
current_level = {}
# create the clusters for this level
ord_clusters = set()
cls_id_buffer = [None for i in range(n_elements)]
print('i:', i)
for j in range(n_elements):
cls_id = level_assign[j]
# has this cluster been already seen?
if cls_id not in ord_clusters:
print('j:', j)
ord_clusters.add(cls_id)
new_cls = ClusterH(id=cluster_id,
elements=[k
for k in range(j, n_elements)
if level_assign[k] == cls_id])
cluster_id += 1
print('adding', new_cls.elements, 'to', j)
# linking to parent
parent_cls = None
prev_i = i - 1
# not on the first run
if prev_i >= 0:
previous_assign = clusters[prev_i]
parent_cls = previous_level[previous_assign[j]]
print('not first run', previous_assign[j])
else:
parent_cls = root_cluster
# checking if parent is the same cluster as child
if parent_cls.elements != new_cls.elements:
parent_cls.children.append(new_cls)
cls_id_buffer[cls_id] = new_cls.id
current_level[new_cls.id] = new_cls
print('adding child', new_cls.id, 'to', parent_cls.id)
else:
print('same cluster')
cls_id_buffer[cls_id] = parent_cls.id
current_level[parent_cls.id] = parent_cls
# update this level ids
for k in range(n_elements):
level_assign[k] = cls_id_buffer[level_assign[k]]
print('lvl assign', level_assign)
levels[i + 1] = current_level
return levels
def split_into_univariate_dist(self,
data_slice,
node,
node_id_assoc,
feature_sizes,
data):
"""
WRITEME
"""
feature_ids = data_slice.features
instance_ids = data_slice.instances
# for each feature
for feature_id in feature_ids:
# create a new slice
new_slice = DataSlice(instance_ids, [feature_id])
feature_size = feature_sizes[feature_id]
# create a single node after slicing
data_instance_slice = data[instance_ids, :]
feature_slice = data_instance_slice[:, [feature_id]]
leaf_node = CategoricalSmoothedNode(var=feature_id,
var_values=feature_size,
data=feature_slice)
# storing for later
leaf_node.id = new_slice.id
node_id_assoc[leaf_node.id] = leaf_node
# linking to parent
if isinstance(node, SumNode):
# linking with a fake weight, later this shall be rebuild
node.add_child(leaf_node, 1.0)
elif isinstance(node, ProductNode):
node.add_child(leaf_node)
print('adding a smooth node {0} to node {1} w/f {2}'.
format(leaf_node.id, node.id, feature_id))
def split_by_row(self,
co_clusters_to_slices,
curr_clusters,
node_id_assoc,
min_instances_slice,
feature_sizes,
data):
"""
WRITEME
"""
n_slices = len(co_clusters_to_slices)
# traversing the queue for a fixed length, then adding it
for i in range(n_slices):
# getting the first slice
curr_cc_to_slice = co_clusters_to_slices.popleft()
curr_slice = curr_cc_to_slice.data_slice
instance_ids = curr_slice.instances
feature_ids = curr_slice.features
col_id = curr_cc_to_slice.cc_col_id
print('processing slice', curr_slice.id)
# retrieving corresponding node, it is a sum node
sum_node = node_id_assoc[curr_slice.id]
n_instances_slice = len(instance_ids)
# check if few instances are left in the slice
if n_instances_slice <= min_instances_slice:
print('split into univariate distribution')
self.split_into_univariate_dist(curr_slice,
sum_node,
node_id_assoc,
feature_sizes,
data)
else:
# get the corresponding row co_cluster
row_cc = curr_clusters[curr_cc_to_slice.cc_row_id]
print('row children n', len(row_cc.children))
# for each split
for cc_child in row_cc.children:
# copy the instances
new_instance_ids = cc_child.elements[:]
# splitting the data slice
instance_slice = DataSlice(new_instance_ids, feature_ids)
# updating the cc
new_cc_to_slice = CoClusterSlice(row_id=cc_child.id,
col_id=col_id,
data_slice=instance_slice)
# adding a product node as child
prod_node = ProductNode(var_scope=frozenset(feature_ids))
# storing it
prod_node.id = instance_slice.id
node_id_assoc[prod_node.id] = prod_node
# linking to parent
node_weight = (1.0 * len(new_instance_ids) /
n_instances_slice)
sum_node.add_child(prod_node,
node_weight)
print('adding prod node {0} to sum {1} w/w {2}'.
format(prod_node.id, sum_node.id, node_weight))
# enqueue the slice
co_clusters_to_slices.append(new_cc_to_slice)
def split_by_column(self,
co_clusters_to_slices,
curr_clusters,
node_id_assoc,
feature_sizes,
data):
"""
WRITEME
"""
n_slices = len(co_clusters_to_slices)
print('assoc in', len(node_id_assoc))
# traversing the queue for a fixed length
for i in range(n_slices):
# get the first slice
curr_cc_to_slice = co_clusters_to_slices.popleft()
# getting the data slice
curr_slice = curr_cc_to_slice.data_slice
instance_ids = curr_slice.instances
feature_ids = curr_slice.features
print('processing slice', curr_slice.id)
# retieving the corresponding stored node, a product one
prod_node = node_id_assoc[curr_slice.id]
# more than one feature, checking for the cocluster splits
row_id = curr_cc_to_slice.cc_row_id
# getting the associated column co-cluster
col_cc = curr_clusters[curr_cc_to_slice.cc_col_id]
# for each split
for cc_child in col_cc.children:
# copying children ids
new_feature_ids = cc_child.elements[:]
# splitting the data slice
feature_slice = DataSlice(instance_ids, new_feature_ids)
# check for a univariate split
if len(new_feature_ids) == 1:
# adding a smoothed ndoe as leaf
self.split_into_univariate_dist(feature_slice,
prod_node,
node_id_assoc,
feature_sizes,
data)
else:
# updating the co cluster
new_cc_to_slice = CoClusterSlice(row_id=row_id,
col_id=cc_child.id,
data_slice=feature_slice)
# creating corresponding sum node
sum_node = SumNode(var_scope=frozenset(new_feature_ids))
# properly setting it and storing
sum_node.id = feature_slice.id
node_id_assoc[sum_node.id] = sum_node
# linking to the parent ndoe
prod_node.add_child(sum_node)
print('adding sum node {0} to prod node {1}'.
format(sum_node.id, prod_node.id))
# putting the new co cluster enqueue
co_clusters_to_slices.append(new_cc_to_slice)
def build_spn_from_co_clusters(self,
row_hierarchy,
col_hierarchy,
data,
feature_sizes,
min_instances_slice=50,
max_depth=10):
"""
WRITEME
"""
n_instances = data.shape[0]
n_features = data.shape[1]
# get the number of common levels
# hierarchies can have different depths
n_common_levels = min(len(row_hierarchy), len(col_hierarchy))
# limiting the max depth in the construction
# shall this prevent overfitting?
depth = min(max_depth, n_common_levels)
# creating the first slice
whole_slice = DataSlice.whole_slice(n_instances, n_features)
# setting its associated co-clusters
cc_to_first_slice = CoClusterSlice(row_id=0,
col_id=0,
data_slice=whole_slice)
# creating the queue to process the cc_slices
curr_cc_to_slice = deque()
# and initing with the first one
curr_cc_to_slice.append(cc_to_first_slice)
# build the map to store the id->node info
node_id_assoc = {}
# the first node is a sum node by convention
whole_scope = [i for i in range(n_features)]
root_node = SumNode(var_scope=frozenset(whole_scope))
root_node.id = whole_slice.id
node_id_assoc[root_node.id] = root_node
#
# Building the spn fron hierarchies up to a certain depth
#
for i in range(depth):
# get the right levels in the hierarchies
row_clusters = row_hierarchy[i]
col_clusters = col_hierarchy[i]
# split by row first
print('---> Building rows')
self.split_by_row(curr_cc_to_slice,
row_clusters,
node_id_assoc,
min_instances_slice,
feature_sizes,
data)
# then by columns
print('---> Building cols')
self.split_by_column(curr_cc_to_slice,
col_clusters,
node_id_assoc,
feature_sizes,
data)
# the remaining slices are being splitted
# into univariate leaves (there is no point in considering
# the remaining levels of one of the hierarchies since we
# are assuming a perfectly alternated sum/prod levels )
print('Splitting remaining slices')
while curr_cc_to_slice:
# get the cc_slice from the front of the queue
rem_cc_slice = curr_cc_to_slice.popleft()
# then the data slice from it
rem_slice = rem_cc_slice.data_slice
rem_node = node_id_assoc[rem_slice.id]
self.split_into_univariate_dist(rem_slice,
rem_node,
node_id_assoc,
feature_sizes,
data)
# now traversing the linked tree top down
# with the aim to prune correctly the sum nodes parents of the leaves
print('Relinking leaves')
nodes_to_process = deque()
nodes_to_process.append(root_node)
while nodes_to_process:
#
curr_node = nodes_to_process.popleft()
#
# here I am assuming that if a sum node is to be processed
# then it is valid (pruning comes in action when considering
# product nodes' sum node children)
if isinstance(curr_node, SumNode):
for child in curr_node.children:
nodes_to_process.append(child)
elif isinstance(curr_node, ProductNode):
# storing references to children to move
children_to_remove = []
children_to_add = []
for child in curr_node.children:
if isinstance(child, SumNode):
insert = True
for grand_child in child.children:
# this is not flexible about types
if isinstance(grand_child,
CategoricalSmoothedNode):
insert = False
children_to_add.append(grand_child)
if not insert:
# marking the child to be removed
print('removing sum node')
children_to_remove.append(child)
# removing the children
curr_node.children = [child for child in curr_node.children
if child not in children_to_remove]
# adding the new ones
curr_node.children.extend(children_to_add)
# equeuing the children
nodes_to_process.extend(curr_node.children)
#
# from the linked representation to a linked and layered one
#
spn = SpnFactory.layered_linked_spn(root_node)
return spn
| 51,985
| 34.316576
| 79
|
py
|
spyn-repr
|
spyn-repr-master/spn/linked/layers.py
|
from spn.linked.nodes import SumNode
from spn.linked.nodes import ProductNode
from spn.linked.nodes import CategoricalIndicatorNode
from spn.linked.nodes import CategoricalSmoothedNode
from spn.linked.nodes import CLTreeNode
from math import exp
import numba
@numba.jit
def eval_numba(nodes):
for node in nodes:
node.eval()
class Layer(object):
"""
WRITEME
"""
id_counter = 0
def __init__(self, nodes=None):
"""
WRITEME
"""
self._nodes = None
self._n_nodes = None
if nodes is None:
self._nodes = []
self._n_nodes = 0
else:
self._nodes = nodes
self._n_nodes = len(nodes)
#
# getting an id
self.id = Layer.id_counter
Layer.id_counter += 1
#
#
self.input_layers = set()
self.output_layers = set()
def add_node(self, node):
"""
WRITEME
"""
self._nodes.append(node)
self._n_nodes += 1
def remove_node(self, node):
"""
WRITEME
"""
self._nodes.remove(node)
def nodes(self):
"""
WRITEME
"""
for node in self._nodes:
yield node
def add_input_layer(self, layer):
self.input_layers.add(layer)
def remove_input_layer(self, layer):
self.input_layers.remove(layer)
def add_output_layer(self, layer):
self.output_layers.add(layer)
def remove_output_layer(self, layer):
self.output_layers.remove(layer)
def disconnect_layer(self):
print('Disconnecting', self.id)
print([l.id for l in self.output_layers])
print([l.id for l in self.input_layers])
for p in self.output_layers:
p.input_layers.remove(self)
for c in self.input_layers:
c.output_layers.remove(self)
def eval(self):
"""
layer bottom-up evaluation
"""
for node in self._nodes:
node.eval()
# eval_numba(self._nodes)
def mpe_eval(self):
"""
layer MPE bpttom-up evaluation
"""
for node in self._nodes:
node.mpe_eval()
def backprop(self):
"""
WRITEME
"""
for node in self._nodes:
node.backprop()
def mpe_backprop(self):
"""
WRITEME
"""
for node in self._nodes:
node.mpe_backprop()
def set_log_derivative(self, log_der):
"""
WRITEME
"""
for node in self._nodes:
node.log_der = log_der
def node_values(self):
"""
WRITEME
"""
# depending on the freq of the op I could allocate
# just once the list
return [node.log_val for node in self._nodes]
def get_nodes_by_id(self, node_pos):
"""
this may be inefficient, atm used only in factory
"""
node_list = [None for i in range(self._n_nodes)]
for node in self._nodes:
pos = node_pos[node.id]
node_list[pos] = node
return node_list
def get_node(self, node_id):
"""
WRITEME
"""
return self._nodes[node_id]
def n_nodes(self):
"""
WRITEME
"""
return self._n_nodes
def n_edges(self):
"""
WRITEME
"""
edges = 0
for node in self._nodes:
# input layers have nodes with no children attr
# try:
# for child in node.children:
# edges += 1
edges += node.n_children()
# except:
# pass
return edges
def n_weights(self):
"""
Only a sum layer has params
"""
return 0
#
# TODO: this may be too shallow for comparison
def __eq__(self, layer):
return self.id == layer.id
def __lt__(self, layer):
return self.id < layer.id
def __hash__(self):
return hash(self.id)
def __repr__(self):
"""
WRITEME
"""
layer_str = 'id:{0} [{1}]->[{2}]\n'.format(self.id,
','.join(sorted([str(l.id)
for l in self.input_layers])),
','.join(sorted([str(l.id)
for l in self.output_layers])))
div = '\n**********************************************************\n'
return layer_str + '\n'.join([str(node) for node in self._nodes]) + div
@classmethod
def reset_id_counter(cls):
"""
WRITEME
"""
Layer.id_counter = 0
@classmethod
def set_id_counter(cls, val):
"""
WRITEME
"""
Layer.id_counter = val
class SumLayer(Layer):
"""
WRITEME
"""
def __init__(self, nodes=None):
"""
WRITEME
"""
Layer.__init__(self, nodes)
def normalize(self):
"""
WRITEME
"""
for node in self._nodes:
node.normalize()
def add_edge(self, parent, child, weight):
"""
WRITEME
"""
parent.add_child(child, weight)
# def update_weights(self, update_rule):
# """
# WRITEME
# """
# for node in self._nodes:
# weight_updates = [update_rule(weight,
# exp(child.log_val + node.log_der))
# for child, weight
# in zip(node.children, node.weights)]
# node.set_weights(weight_updates)
def update_weights(self, update_rule, layer_id):
"""
WRITEME
"""
for node_id, node in enumerate(self._nodes):
weight_updates = [update_rule(layer_id,
node_id,
weight_id,
weight,
exp(child.log_val + node.log_der))
for weight_id, (child, weight)
in enumerate(zip(node.children, node.weights))]
node.set_weights(weight_updates)
def is_complete(self):
"""
WRITEME
"""
return all([node.is_complete() for node in self.nodes()])
def n_weights(self):
"""
For a sum layer, its number of edges
"""
return self.n_edges()
def __repr__(self):
return '[sum layer:]\n' + Layer.__repr__(self)
class ProductLayer(Layer):
"""
WRITEME
"""
def __init__(self, nodes=None):
"""
WRITEME
"""
Layer.__init__(self, nodes)
def add_edge(self, parent, child):
"""
WRITEME
"""
parent.add_child(child)
def is_decomposable(self):
"""
WRITEME
"""
return all([node.is_decomposable() for node in self.nodes()])
def __repr__(self):
return '[prod layer:]\n' + Layer.__repr__(self)
class CategoricalInputLayer(Layer):
"""
WRITEME
"""
def __init__(self, nodes=None, vars=None):
"""
WRITEME
"""
Layer.__init__(self, nodes)
self._vars = vars
self._feature_vals = None
def eval(self, input):
"""
WRITEME
"""
# for node in self._nodes:
# # get the observed value
# obs = input[node.var]
# # and eval the node
# node.eval(obs)
for node in self._nodes:
node.eval(input)
def vars(self):
"""
WRITEME
"""
return self._vars
def feature_vals(self):
"""
WRITEME
"""
return self._feature_vals
def smooth_probs(self, alpha):
"""
This shall be implemented in the class specializing
"""
raise NotImplementedError('Smoothing not implemented for input layer')
def __repr__(self):
return '[input layer:]\n' + Layer.__repr__(self)
def compute_feature_vals(nodes):
"""
From a set of input nodes, determine the feature ranges
"""
feature_vals_dict = {}
for node in nodes:
if isinstance(node, CLTreeNode):
#
# updating nodes vars ranges (assuming no inconsistencies)
for n_var, n_var_val in zip(node.vars, node.var_values):
if n_var not in feature_vals_dict:
feature_vals_dict[n_var] = n_var_val
elif isinstance(node, CategoricalIndicatorNode):
if node.var not in feature_vals_dict:
feature_vals_dict[node.var] = node.var_val + 1
else:
feature_vals_dict[node.var] = max(node.var_val + 1,
feature_vals_dict[node.var])
elif isinstance(node, CategoricalSmoothedNode):
if node.var not in feature_vals_dict:
feature_vals_dict[node.var] = node.var_val
feature_vals = [feature_vals_dict[var]
for var in sorted(feature_vals_dict.keys())]
return feature_vals
class CategoricalCLInputLayer(CategoricalInputLayer):
"""
This layer contains
TODO: rewrite this hierarchy, it is a mess
"""
def __init__(self, nodes=None):
"""
WRITEME
"""
if nodes is not None:
Layer.__init__(self, nodes)
#
# updating node counts
n_nodes = 0
for node in nodes:
n_nodes += len(node.vars) + 1 if isinstance(node,
CLTreeNode) else 1
self._n_nodes = n_nodes
self._feature_vals = compute_feature_vals(nodes)
else:
raise NotImplementedError('No nodes provided')
# def eval(self, input):
# """
# WRITEME
# """
# for node in self._nodes:
# #
# # I am not using polymorphism at all...
# if isinstance(node, CLTreeNode):
# node.eval(input)
# else:
# # the other node type is assumed to be CategoricalS
# # extract the observed var value
# obs = input[node.var]
# # and eval the node
# node.eval(obs)
def smooth_probs(self, alpha):
"""
WRITEME
"""
for node in self._nodes:
node.smooth_probs(alpha)
class CategoricalIndicatorLayer(CategoricalInputLayer):
"""
WRITEME
"""
def __init__(self, nodes=None, vars=None):
"""
WRITEME
"""
if nodes is None:
# self._vars = vars
nodes = [CategoricalIndicatorNode(var, i)
for var in range(len(vars))
for i in range(vars[var])]
# self._feature_vals = [2 for i in range(len(nodes))]
else:
# assuming the nodes are complete and coherent
vars_dict = {}
for node in nodes:
try:
vars_dict[node.var] += 1
except:
vars_dict[node.var] = 1
sorted_keys = sorted(vars_dict.items(), key=lambda t: t[0])
vars = [vals for id, vals in sorted_keys]
# self.feature_vals = compute_feature_vals(nodes)
CategoricalInputLayer.__init__(self, nodes, vars)
self._feature_vals = compute_feature_vals(nodes)
class CategoricalSmoothedLayer(CategoricalInputLayer):
"""
WRITEME
"""
def __init__(self, nodes=None, vars=None, node_dicts=None, alpha=0.1):
"""
WRITEME
"""
self.alpha = alpha
if nodes is None:
nodes = []
# self._vars = vars
for node_dict in node_dicts:
var_id = node_dict['var']
var_values = vars[var_id]
var_freqs = node_dict['freqs'] \
if 'freqs' in node_dict else None
nodes.append(CategoricalSmoothedNode(var_id,
var_values,
alpha,
var_freqs))
else:
# vars shall be computed by hand
# TODO test it
vars_dict = {}
for node in nodes:
vars_dict[node.var] = node.var_values()
# getting sorted keys
sorted_keys = sorted(vars_dict.items(), key=lambda t: t[0])
# vars = [vals for id, vals in sorted_keys]
vars = [id for id, vals in sorted_keys]
CategoricalInputLayer.__init__(self, nodes, vars)
self._feature_vals = compute_feature_vals(nodes)
def smooth_probs(self, alpha):
"""
WRITEME
"""
for node in self._nodes:
node.smooth_probs(alpha)
| 13,261
| 24.357553
| 99
|
py
|
spyn-repr
|
spyn-repr-master/spn/linked/__init__.py
| 0
| 0
| 0
|
py
|
|
spyn-repr
|
spyn-repr-master/spn/linked/representation.py
|
from .nodes import SumNode
from .nodes import ProductNode
from .nodes import mpe_states_from_leaf
from spn import RND_SEED
from spn import MARG_IND
from spn.linked.spn import evaluate_on_dataset
from spn.theanok.spn import evaluate_on_dataset_batch
from dataset import dataset_to_instances_set
from collections import deque
from collections import defaultdict
from collections import Counter
from collections import namedtuple
import numpy
# import numba
import logging
import itertools
import os
import subprocess
import operator
from time import perf_counter
def node_in_path_feature(node, data_repr, node_feature_assoc, instance_id, in_path=True):
"""
Feature is 1 if the node is in the path
"""
feature_id = node_feature_assoc[node]
if in_path:
data_repr[instance_id, feature_id] = 1
else:
data_repr[instance_id, feature_id] = 0
return data_repr
def acc_node_in_path_feature(node, data_repr, node_feature_assoc, instance_id, in_path=True):
"""
Sets feature val to 1 plus previous val if it was on path
"""
feature_id = node_feature_assoc[node]
if in_path:
data_repr[instance_id, feature_id] += 1
return data_repr
def max_weight_feature(node, data_repr, node_feature_assoc, instance_id):
feature_id = node_feature_assoc[node]
max_weight = 0.0
for i, child in enumerate(node.children):
if numpy.isclose(child.log_val + node.log_weights[i],
node.log_val):
max_weight = node.weights[i]
break
data_repr[instance_id, feature_id] = max_weight
return data_repr
def max_child_id_feature(node, data_repr, node_feature_assoc, instance_id):
max_child = None
for i, child in enumerate(node.children):
if numpy.isclose(child.log_val + node.log_weights[i],
node.log_val):
max_child = child
break
try:
feature_id = node_feature_assoc[max_child]
data_repr[instance_id, feature_id] += 1
except KeyError:
pass
return data_repr
def max_hidden_var_feature(node, data_repr, node_feature_assoc, instance_id):
if isinstance(node, SumNode):
max_child = None
for i, child in enumerate(node.children):
if numpy.isclose(child.log_val + node.log_weights[i],
node.log_val):
max_child = child
break
try:
feature_id = node_feature_assoc[(node, max_child)]
data_repr[instance_id, feature_id] += 1
except KeyError:
pass
return data_repr
def max_hidden_var_log_val(node, data_repr, node_feature_assoc, instance_id):
if isinstance(node, SumNode):
max_child = None
max_val = None
for i, child in enumerate(node.children):
if numpy.isclose(child.log_val + node.log_weights[i],
node.log_val):
max_child = child
max_val = node.log_val
break
try:
feature_id = node_feature_assoc[(node, max_child)]
data_repr[instance_id, feature_id] = max_val
except KeyError:
pass
return data_repr
def max_hidden_var_val(node, data_repr, node_feature_assoc, instance_id):
if isinstance(node, SumNode):
max_child = None
max_val = None
for i, child in enumerate(node.children):
if numpy.isclose(child.log_val + node.log_weights[i],
node.log_val):
max_child = child
max_val = numpy.exp(node.log_val)
break
try:
feature_id = node_feature_assoc[(node, max_child)]
data_repr[instance_id, feature_id] = max_val
except KeyError:
pass
return data_repr
def hidden_var_val(node, data_repr, node_feature_assoc, instance_id):
if isinstance(node, SumNode):
for i, child in enumerate(node.children):
val = numpy.exp(child.log_val + node.log_weights[i])
try:
feature_id = node_feature_assoc[(node, child)]
data_repr[instance_id, feature_id] = val
except KeyError:
pass
return data_repr
def hidden_var_log_val(node, data_repr, node_feature_assoc, instance_id):
if isinstance(node, SumNode):
for i, child in enumerate(node.children):
val = child.log_val + node.log_weights[i]
try:
feature_id = node_feature_assoc[(node, child)]
data_repr[instance_id, feature_id] = val
except KeyError:
pass
return data_repr
def child_var_val(node, data_repr, node_feature_assoc, instance_id):
if isinstance(node, SumNode):
for i, child in enumerate(node.children):
val = numpy.exp(child.log_val)
try:
feature_id = node_feature_assoc[(node, child)]
data_repr[instance_id, feature_id] = val
except KeyError:
pass
return data_repr
def child_var_log_val(node, data_repr, node_feature_assoc, instance_id):
if isinstance(node, SumNode):
for i, child in enumerate(node.children):
val = child.log_val
try:
feature_id = node_feature_assoc[(node, child)]
data_repr[instance_id, feature_id] = val
except KeyError:
pass
return data_repr
def var_val(node, data_repr, node_feature_assoc, instance_id):
val = numpy.exp(node.log_val)
try:
feature_id = node_feature_assoc[node]
data_repr[instance_id, feature_id] = val
except KeyError:
pass
return data_repr
def var_log_val(node, data_repr, node_feature_assoc, instance_id):
val = node.log_val
try:
feature_id = node_feature_assoc[node]
data_repr[instance_id, feature_id] = val
except KeyError:
pass
return data_repr
def log_output_feature(node, data_repr, node_feature_assoc, instance_id):
try:
feature_id = node_feature_assoc[node]
data_repr[instance_id, feature_id] = node.log_val
except KeyError:
pass
return data_repr
def filter_sum_nodes(spn):
feature_nodes = [node for node in spn.top_down_nodes() if isinstance(node, SumNode)]
return {node: i for i, node in enumerate(feature_nodes)}
def filter_product_nodes(spn):
feature_nodes = [node for node in spn.top_down_nodes() if isinstance(node, ProductNode)]
return {node: i for i, node in enumerate(feature_nodes)}
def filter_non_sum_nodes(spn):
feature_nodes = [node for node in spn.top_down_nodes() if not isinstance(node, SumNode)]
return {node: i for i, node in enumerate(feature_nodes)}
def filter_non_leaf_nodes(spn):
feature_nodes = [node for node in spn.top_down_nodes() if isinstance(node, SumNode) or
isinstance(node, ProductNode)]
return {node: i for i, node in enumerate(feature_nodes)}
def filter_all_nodes(spn):
feature_nodes = [node for node in spn.top_down_nodes()]
return {node: i for i, node in enumerate(feature_nodes)}
def filter_hidden_var_nodes(spn):
feature_nodes = [(node, child) for node in spn.top_down_nodes()
if isinstance(node, SumNode)
for child in node.children]
return {(node, child): i for i, (node, child) in enumerate(feature_nodes)}
def save_feature_info(spn, node_feature_assoc, output_file):
"""
Storing to file info about the extracted features for later reuse
id, node id, layer id, node type, scope
"""
header = 'id,node,layer,type,scope\n'
with open(output_file, 'w') as info_file:
info_file.write(header)
node_layer_map = {node: layer for layer in spn.bottom_up_layers()
for node in layer.nodes()}
sorted_features = sorted(node_feature_assoc.items(), key=operator.itemgetter(1))
for node, feature_id in sorted_features:
layer_id = node_layer_map[node].id
node_type = node.__class__.__name__
node_scope = ''
if hasattr(node, 'var_scope'):
node_scope = ' '.join(str(s) for s in sorted(node.var_scope))
elif hasattr(node, 'var'):
node_scope = str(node.var)
info_str = '{},{},{},{},{}\n'.format(feature_id,
node.id,
layer_id,
node_type,
node_scope)
info_file.write(info_str)
FeatureInfo = namedtuple('FeatureInfo', ['feature_id',
'node_id',
'layer_id',
'node_type',
'node_scope'])
def store_feature_info(feature_info, info_file_path):
"""
Saving features info to file
"""
header = 'id,node,layer,type,scope\n'
with open(info_file_path, 'w') as info_file:
info_file.write(header)
#
# ordering by feature id
for info in sorted(feature_info, key=lambda x: x.feature_id):
node_scope_str = ' '.join(str(s) for s in sorted(info.node_scope))
info_str = '{},{},{},{},{}\n'.format(info.feature_id,
info.node_id,
info.layer_id,
info.node_type,
node_scope_str)
info_file.write(info_str)
def load_feature_info(info_file_path):
"""
Retrieving the feature info back from a text file
"""
with open(info_file_path, 'r') as info_file:
feature_info = []
lines = info_file.readlines()
#
# discarding the header
lines = lines[1:]
for l in lines:
feature_id, node_id, layer_id, node_type, node_scope = l.split(',')
feature_id = int(feature_id)
node_id = int(node_id)
layer_id = int(layer_id)
node_scope = set([int(s) for s in node_scope.rstrip().split(' ')])
feature_info.append(FeatureInfo(feature_id, node_id, layer_id, node_type, node_scope))
return feature_info
def filter_features_by_layer(feature_info, layer_id):
"""
From a list of FeatureInfo filter belonging to a certain layer
"""
filtered_info = [info for info in feature_info if info.layer_id == layer_id]
return filtered_info
def filter_features_by_scope_length(feature_info, scope_length):
"""
From a list of FeatureInfo filter those having a certain scope
"""
filtered_info = [info for info in feature_info if len(info.node_scope) == scope_length]
return filtered_info
def filter_features_by_node_type(feature_info, node_type_str):
"""
From a list of FeatureInfo filter those having a certain node type
"""
filtered_info = [info for info in feature_info if info.node_type == node_type_str]
return filtered_info
def feature_mask_from_info(feature_info, n_features):
"""
From a list of FeatureInfo extract a boolean mask
for the features in them
"""
feature_mask = numpy.zeros(n_features, dtype=bool)
for info in feature_info:
feature_mask[info.feature_id] = True
return feature_mask
def extract_features_nodes_mpe(spn,
data,
filter_node_func=filter_sum_nodes,
retrieve_func=node_in_path_feature,
remove_zero_features=True,
output_feature_info=None,
dtype=None,
verbose=False):
"""
Representing a dataset (n_instances x n_features)
in a new space (n_instances x n_spn_sum_nodes) where
the new features are built according to a retrieve function
(e.g. sum node id, its output signal, etc)
and an spn on the sum nodes along the mpe path for each instance
"""
n_instances = data.shape[0]
n_features = data.shape[1]
#
# storing assoc: sum node -> new feature id
nodes_id_assoc = filter_node_func(spn)
n_spn_features = len(nodes_id_assoc)
#
# save scopes and other info?
if output_feature_info is not None:
node_id_assoc_n = None
if filter_node_func == filter_hidden_var_nodes:
node_id_assoc_n = {child: i for (node, child), i in nodes_id_assoc.items()}
else:
node_id_assoc_n = nodes_id_assoc
save_feature_info(spn, node_id_assoc_n, output_feature_info)
if dtype is None:
dtype = data.dtype
repr_data = numpy.zeros((n_instances, n_spn_features), dtype=dtype)
logging.info('Old data ({0} x {1}) -> ({0} x {2})'.format(n_instances,
n_features,
n_spn_features))
if verbose:
id_nodes_assoc = {v: k for k, v in nodes_id_assoc.items()}
feature_nodes = []
for v in sorted(id_nodes_assoc.keys()):
try:
node_id = id_nodes_assoc[v].id
except:
node_id = (id_nodes_assoc[v][0].id,
id_nodes_assoc[v][1].id)
feature_nodes.append((v, node_id))
print(feature_nodes)
#
# evaluate MPE circuit for each instance
for i in range(n_instances):
#
# bottom up evaluation
spn.single_mpe_eval(data[i])
#
# "top down"" retrieval
nodes_to_process = deque()
for node in spn.root_layer().nodes():
nodes_to_process.append(node)
while nodes_to_process:
curr_node = nodes_to_process.popleft()
children_to_process = None
if isinstance(curr_node, SumNode):
#
# retrieve the represented value for the feature
repr_data = retrieve_func(curr_node, repr_data, nodes_id_assoc, i)
#
# following the max children
children_to_process = [child for j, child in enumerate(curr_node.children)
if numpy.isclose(child.log_val + curr_node.log_weights[j],
curr_node.log_val)]
elif isinstance(curr_node, ProductNode):
#
# following all children
children_to_process = [child for child in curr_node.children]
if children_to_process:
nodes_to_process.extend(children_to_process)
if remove_zero_features:
old_n_features = repr_data.shape[1]
zero_feature = numpy.zeros(n_instances, dtype=data.dtype)
features_to_keep = [i for i in range(n_spn_features)
if not numpy.allclose(zero_feature, repr_data[:, i])]
repr_data = repr_data[:, numpy.array(features_to_keep)]
logging.info('Removed features ({0} x {1}) -> ({0} x {2})'.format(n_instances,
old_n_features,
repr_data.shape[1]))
return repr_data
def extract_features_nodes(spn,
data,
filter_node_func=filter_sum_nodes,
retrieve_func=node_in_path_feature,
remove_zero_features=True,
output_feature_info=None,
dtype=None,
verbose=False):
"""
Representing a dataset (n_instances x n_features)
in a new space (n_instances x n_spn_sum_nodes) where
the new features are built according to a retrieve function
(e.g. sum node id, its output signal, etc)
given an spn and its evaluation bottom up
"""
n_instances = data.shape[0]
n_features = data.shape[1]
#
# storing assoc: sum node -> new feature id
nodes_id_assoc = filter_node_func(spn)
n_spn_features = len(nodes_id_assoc)
#
# save scopes and other info?
if output_feature_info is not None:
save_feature_info(spn, nodes_id_assoc, output_feature_info)
if dtype is None:
dtype = data.dtype
repr_data = numpy.zeros((n_instances, n_spn_features), dtype=dtype)
logging.info('Old data ({0} x {1}) -> ({0} x {2})'.format(n_instances,
n_features,
n_spn_features))
if verbose:
id_nodes_assoc = {v: k for k, v in nodes_id_assoc.items()}
feature_nodes = []
for v in sorted(id_nodes_assoc.keys()):
try:
node_id = id_nodes_assoc[v].id
except:
node_id = (id_nodes_assoc[v][0].id,
id_nodes_assoc[v][1].id)
feature_nodes.append((v, node_id))
print(feature_nodes)
#
# evaluate MPE circuit for each instance
for i in range(n_instances):
#
# bottom up evaluation
spn.single_eval(data[i])
#
# visiting all nodes
for node in spn.top_down_nodes():
#
# retrieve the represented value for the feature
repr_data = retrieve_func(node, repr_data, nodes_id_assoc, i)
if remove_zero_features:
old_n_features = repr_data.shape[1]
zero_feature = numpy.zeros(n_instances, dtype=data.dtype)
features_to_keep = [i for i in range(n_spn_features)
if not numpy.allclose(zero_feature, repr_data[:, i])]
repr_data = repr_data[:, numpy.array(features_to_keep)]
logging.info('Removed features ({0} x {1}) -> ({0} x {2})'.format(n_instances,
old_n_features,
repr_data.shape[1]))
return repr_data
def scope_stats(spn, filter_node_func=filter_non_leaf_nodes, top_n_scopes=20):
scope_counter = Counter()
var_counter = Counter()
nodes_id_assoc = filter_node_func(spn)
for node in nodes_id_assoc:
if hasattr(node, 'var_scope'):
# check_node = True
# if no_leaf:
# check_node = hasattr(node, 'children')
# if check_node:
scope_counter[node.var_scope] += 1
for var in node.var_scope:
var_counter[var] += 1
if top_n_scopes > 1:
print('Most common scopes\n{}'.format(scope_counter.most_common(top_n_scopes)))
print('Most common vars\n{}'.format(var_counter.most_common(top_n_scopes)))
return scope_counter, var_counter
def scope_stats_marg(spn, marg_vars):
vars_to_marginalize = set(marg_vars)
overlap_perc_dict = {}
n_nodes = 0
for node in spn.top_down_nodes():
if hasattr(node, 'var_scope'):
perc = len(node.var_scope & vars_to_marginalize) / len(node.var_scope)
overlap_perc_dict[node] = perc
n_nodes += 1
product_children_percs_dict = {}
for node in overlap_perc_dict:
if isinstance(node, ProductNode):
child_percs = [overlap_perc_dict[child] for child in node.children]
# child_n_marg_vars = [p * len(child.var_scope)
# for p, child in zip(child_percs, node.children)]
product_children_percs_dict[node] = child_percs
#
# printing node stats
all_marg_scope_nodes = [(p, n) for n, p in overlap_perc_dict.items() if p > 0.999]
all_orig_scope_nodes = [(p, n) for n, p in overlap_perc_dict.items() if p < 0.001]
sorted_perc = sorted([(p, n.id) for n, p in overlap_perc_dict.items()], key=lambda x: x[0])
print('Nodes with all H: {}/{}'.format(len(all_marg_scope_nodes), n_nodes))
print('Nodes with all X: {}/{}'.format(len(all_orig_scope_nodes), n_nodes))
print('Nodes with mixed: scope {}/{}'.format(n_nodes -
len(all_orig_scope_nodes) -
len(all_marg_scope_nodes),
n_nodes))
print('Sorted perc {}'.format(
sorted_perc[len(all_orig_scope_nodes):len(all_orig_scope_nodes) + 50]))
fully_sep_prod_nodes = []
sep_prod_nodes = []
not_sep_prod_nodes = []
for node, perc_list in product_children_percs_dict.items():
if all([p > 0.999 or p < 0.001 for p in perc_list]):
fully_sep_prod_nodes.append((node.id, perc_list))
elif any([p > 0.999 for p in perc_list]) and any([p < 0.001 for p in perc_list]):
sep_prod_nodes.append((node.id, perc_list))
else:
not_sep_prod_nodes.append((node.id, perc_list))
print('Fully separating product nodes: {}/{}'.format(len(fully_sep_prod_nodes),
len(product_children_percs_dict)))
print('Separating product nodes: {}/{}'.format(len(sep_prod_nodes),
len(product_children_percs_dict)))
print('Not separating nodes: {}'.format(not_sep_prod_nodes))
def node_mpe_instantiation(node,
n_features,
dtype=numpy.int32,
verbose=False,
dont_care_val=MARG_IND,
only_first_max=False):
"""
Getting the mpe instantiation for a node (probability distribution over a scope)
Assuming an mpe bottom-up step has already been done
"""
instance_vals = defaultdict(set)
#
# traversing the spn top down
nodes_to_process = deque()
nodes_to_process.append(node)
while nodes_to_process:
curr_node = nodes_to_process.popleft()
#
# if it is a sum node, follow the max child
if isinstance(curr_node, SumNode):
max_children = []
for i, child in enumerate(curr_node.children):
if numpy.isclose(curr_node.log_weights[i] + child.log_val, curr_node.log_val):
max_children.append(child)
#
# following all nodes wtih equal value?
if only_first_max:
break
assert len(max_children) > 0
nodes_to_process.extend(max_children)
if verbose:
print('sum node, getting children {0} [{1}]'.format(len(max_children),
len(nodes_to_process)))
#
# adding them all if the current node is a product node
elif isinstance(curr_node, ProductNode):
nodes_to_process.extend(curr_node.children)
if verbose:
print('prod node, add {0} children [{1}]'.format(len(curr_node.children),
len(nodes_to_process)))
else:
#
# it is assumed to be a leaf
mpe_states = mpe_states_from_leaf(curr_node, only_first_max)
if verbose:
print('Reached a leaf: {0}'.format(mpe_states))
#
# updating assignment
for var, values in mpe_states.items():
for v in values:
instance_vals[var].add(v)
#
# now we need to create the combinations
instance_states = []
feature_ids = [k for k in instance_vals]
feature_vals = [instance_vals[k] for k in feature_ids]
for comb_vals in itertools.product(*feature_vals):
#
# create an instance, don't care values are set to MARG_IND by default
instance = numpy.zeros(n_features, dtype=dtype)
instance.fill(dont_care_val)
for i, val in enumerate(comb_vals):
instance[feature_ids[i]] = val
instance_states.append(instance)
instances_vec = numpy.array(instance_states)
if instances_vec.shape[0] == 1:
instances_vec = instances_vec.flatten()
return instances_vec
def retrieve_all_nodes_mpe_instantiations(spn,
n_features,
dtype=numpy.int32,
verbose=False,
dont_care_val=MARG_IND,
only_first_max=False):
#
# doing an mpe bottom up evaluation
marg_all_features_instance = numpy.zeros(n_features, dtype=int)
marg_all_features_instance.fill(MARG_IND)
spn.mpe_eval(marg_all_features_instance)
NodeInfo = namedtuple('NodeInfo', ['layer', 'mpes', 'scope'])
node_stats = {}
for i, layer in enumerate(spn.top_down_layers()):
print(layer.__class__.__name__)
for j, node in enumerate(layer.nodes()):
mpe_instantiations = node_mpe_instantiation(node,
n_features,
dtype=dtype,
verbose=verbose,
dont_care_val=dont_care_val,
only_first_max=only_first_max)
scope = []
if hasattr(node, 'var_scope'):
scope = node.var_scope
elif hasattr(node, 'var'):
scope = frozenset(node.var)
node_stats[node] = NodeInfo(layer, mpe_instantiations, scope)
return node_stats
def random_feature_mask(feature_mask, n_rand_features, p=None, rand_gen=None):
"""
Sets in a boolean vector as mask n_rand_features to be True
if p is None, samples from a uniform distribution
"""
if rand_gen is None:
rand_gen = numpy.random.RandomState(RND_SEED)
n_features = len(feature_mask)
rand_feature_ids = rand_gen.choice(n_features, n_rand_features, replace=False, p=p)
for f_id in rand_feature_ids:
feature_mask[f_id] = True
return feature_mask
def random_rectangular_feature_mask(feature_mask,
n_rows, n_cols,
n_min_rows=2, n_min_cols=2,
n_max_rows=3, n_max_cols=3,
rand_gen=None):
"""
Sets in a boolean vector as mask a number of features to True such that
in a n_rows x n_cols reshaping, the mask forms a rectangle
"""
if rand_gen is None:
rand_gen = numpy.random.RandomState(RND_SEED)
n_features = len(feature_mask)
#
# extract the origin randomly
possible_features = numpy.arange(n_features).reshape(n_rows, n_cols)
possible_features = possible_features[:n_rows - n_min_rows, :n_cols - n_min_cols].flatten()
origin_id = rand_gen.choice(possible_features, replace=False)
origin_x = origin_id // n_cols
origin_y = origin_id - (origin_x * n_cols)
# print(possible_features, origin_id)
#
# and then length and width
max_length = min(n_max_rows + 1, n_rows - origin_x + 1)
max_width = min(n_max_cols + 1, n_cols - origin_y + 1)
length = rand_gen.choice(numpy.arange(n_min_cols, max_length), replace=False)
width = rand_gen.choice(numpy.arange(n_min_cols, max_width), replace=False)
# print(origin_id, origin_x, origin_y, length, width)
rand_feature_ids = []
for i in range(origin_x, origin_x + length):
for j in range(origin_y, origin_y + width):
rand_feature_ids.append(i * n_cols + j)
for f_id in rand_feature_ids:
feature_mask[f_id] = True
return feature_mask
def mask_dataset_marginalization(data, feature_mask, marg_value=MARG_IND, copy=True):
#
# make a copy?
if copy:
data = numpy.array(data, copy=True)
#
# for each feature not in feature_mask, set a marginalization
data[:, 1 - feature_mask] = marg_value
return data
def extract_feature_marginalization_from_masks(spn,
data,
feature_masks,
marg_value=MARG_IND,
rand_gen=None,
dtype=float):
if rand_gen is None:
rand_gen = numpy.random.RandomState(RND_SEED)
n_instances = data.shape[0]
n_features = data.shape[1]
n_gen_features = len(feature_masks)
repr_data = numpy.zeros((n_instances, n_gen_features), dtype=dtype)
marg_data = numpy.zeros((n_instances, n_features), dtype=data.dtype)
feat_i_t = perf_counter()
for i, mask in enumerate(feature_masks):
feat_s_t = perf_counter()
marg_data.fill(marg_value)
#
# copy only the right features
marg_data[:, mask] = data[:, mask]
# print('{}\n{}'.format(i, marg_data))
#
# evaluate the spn to get a new feature
preds = evaluate_on_dataset(spn, marg_data)
repr_data[:, i] = preds
feat_e_t = perf_counter()
print('\tProcessed feature {}/{} (done in {})'.format(i + 1,
len(feature_masks),
feat_e_t - feat_s_t),
end=' \r')
if i % 100 == 0:
logging.info('\tProcessed {}/{} features (elapsed {})'.format(i + 1,
len(feature_masks),
feat_e_t - feat_i_t))
print('')
return repr_data
def extract_feature_marginalization_from_masks_opt_unique(spn,
data,
feature_masks,
marg_value=MARG_IND,
dtype=float):
"""
Same as extract_feature_marginalization_from_masks but
doing less queries to an spn, considering only the unique ones
according to the data
"""
n_instances = data.shape[0]
n_features = data.shape[1]
n_gen_features = len(feature_masks)
repr_data = numpy.zeros((n_instances, n_gen_features), dtype=dtype)
# marg_data = numpy.zeros((n_instances, n_features), dtype=data.dtype)
marg_instance = numpy.zeros(n_features, dtype=data.dtype)
feat_i_t = perf_counter()
for i, mask in enumerate(feature_masks):
feat_s_t = perf_counter()
marg_instance.fill(marg_value)
# marg_data.fill(marg_value)
#
# extracting only the right features
masked_data = data[:, mask]
#
# getting the unique instances
feature_patches_dict = defaultdict(lambda: numpy.zeros(n_instances, dtype=bool))
for k, instance in enumerate(masked_data):
#
# enumerating patches
patch = tuple(instance)
feature_patches_dict[patch][k] = True
#
# retrieving feature values for the patches
for patch, instance_mask in feature_patches_dict.items():
marg_instance[mask] = numpy.array(patch, dtype=data.dtype)
repr_val, = spn.single_eval(marg_instance)
repr_data[instance_mask, i] = repr_val
feat_e_t = perf_counter()
print('\tProcessed feature {}/{} (done in {})'.format(i + 1,
len(feature_masks),
feat_e_t - feat_s_t),
end=' \r')
if i % 100 == 0:
logging.info('\tProcessed {}/{} features (elapsed {})'.format(i + 1,
len(feature_masks),
feat_e_t - feat_i_t))
print('')
return repr_data
# @numba.njit
def feature_mask_to_marg(feature_mask, n_ohe_features, feature_vals):
"""
Converts a feature mask for a categorical dataset for one for a one hot encoded dataset
Es (False, True, True, False) with feature values [2, 2, 2, 2] becomes
(False, False, True, True, True, True, False, False)
"""
n_features = len(feature_mask)
ohe_feature_mask = numpy.zeros(n_ohe_features, dtype=bool)
for j in range(n_features):
if feature_mask[j]:
f_id = int(numpy.sum(feature_vals[:j]))
ohe_feature_mask[numpy.arange(f_id, f_id + feature_vals[j])] = True
return ohe_feature_mask
def extract_feature_marginalization_from_masks_theanok(spn,
data,
feature_masks,
feature_vals=None,
marg_value=MARG_IND,
rand_gen=None,
batch_size=None,
dtype=float):
"""
data is one hot encoded for theanok spns, masks are not
"""
if rand_gen is None:
rand_gen = numpy.random.RandomState(RND_SEED)
n_instances = data.shape[0]
n_features = data.shape[1]
n_gen_features = len(feature_masks)
if feature_vals is None:
#
# assuming all binary variables
assert n_features % 2 == 0
feature_vals = numpy.array([2 for i in range(n_features // 2)])
repr_data = numpy.zeros((n_instances, n_gen_features), dtype=dtype)
marg_data = numpy.ones((n_instances, n_features), dtype=data.dtype)
feat_i_t = perf_counter()
for i, mask in enumerate(feature_masks):
feat_s_t = perf_counter()
marg_data.fill(1)
#
# copy only the right features
ohe_mask = feature_mask_to_marg(mask, n_features, feature_vals)
marg_data[:, ohe_mask] = data[:, ohe_mask]
# print('{}\n{}'.format(i, marg_data))
#
# evaluate the spn to get a new feature
preds = evaluate_on_dataset_batch(spn, marg_data, batch_size)
repr_data[:, i] = preds
feat_e_t = perf_counter()
print('\tProcessed feature {}/{} (done in {})'.format(i + 1,
len(feature_masks),
feat_e_t - feat_s_t),
end=' \r')
if i % 100 == 0:
logging.info('\tProcessed {}/{} features (elapsed {})'.format(i + 1,
len(feature_masks),
feat_e_t - feat_i_t))
print('')
return repr_data
def extract_feature_marginalization_from_masks_theanok_opt_unique(spn,
data,
feature_masks,
feature_vals=None,
marg_value=MARG_IND,
batch_size=None,
dtype=float):
"""
Same as extract_feature_marginalization_from_masks_theanok but
doing less queries to an spn, considering only the unique ones
according to the data
"""
n_instances = data.shape[0]
n_features = data.shape[1]
n_gen_features = len(feature_masks)
if feature_vals is None:
#
# assuming all binary variables
assert n_features % 2 == 0
feature_vals = numpy.array([2 for i in range(n_features // 2)])
repr_data = numpy.zeros((n_instances, n_gen_features), dtype=dtype)
feat_i_t = perf_counter()
for i, mask in enumerate(feature_masks):
feat_s_t = perf_counter()
#
# copy only the right features
ohe_mask = feature_mask_to_marg(mask, n_features, feature_vals)
masked_data = data[:, ohe_mask]
#
# getting the unique instances
feature_patches_dict = defaultdict(lambda: numpy.zeros(n_instances, dtype=bool))
for k, instance in enumerate(masked_data):
#
# enumerating patches
patch = tuple(instance)
feature_patches_dict[patch][k] = True
#
# recreating a dataset
n_patches = len(feature_patches_dict)
# print('\t\tthere are {} patches'.format(n_patches))
marg_data = numpy.ones((n_patches, n_features), dtype=data.dtype)
ordered_patches = {}
for k, patch in enumerate(feature_patches_dict):
ordered_patches[patch] = k
marg_data[k, ohe_mask] = numpy.array(patch)
# print(marg_data.shape)
#
# evaluate the spn to get a new feature
preds = evaluate_on_dataset_batch(spn, marg_data, batch_size)
# print(preds.shape, preds[0])
#
# populating the dataset back
for patch, instance_mask in feature_patches_dict.items():
repr_val = preds[ordered_patches[patch]]
repr_data[instance_mask, i] = repr_val
feat_e_t = perf_counter()
print('\tProcessed feature {}/{} (done in {})'.format(i + 1,
len(feature_masks),
feat_e_t - feat_s_t),
end=' \r')
if i % 100 == 0:
logging.info('\tProcessed {}/{} features (elapsed {})'.format(i + 1,
len(feature_masks),
feat_e_t - feat_i_t))
print('')
return repr_data
def save_features_to_file(feature_masks, output_path, delimiter=','):
"""
Saving a seq of boolean feature masks to file as rows of ints
Eg. [[True, False, False], [False, True, True]]
"""
feature_masks_array = numpy.array(feature_masks, dtype=bool)
numpy.savetxt(output_path, feature_masks_array, fmt='%d', delimiter=delimiter)
def load_features_from_file(feature_file_path, delimiter=','):
return numpy.loadtxt(feature_file_path, dtype=bool, delimiter=delimiter)
def feature_mask_scope(feature_mask):
"""
Given a feature mask (a boolean ndarray), getting the scope
of the features set to true (an int ndarray)
"""
n_features = len(feature_mask)
return numpy.arange(n_features, dtype=int)[feature_mask]
def extract_features_marginalization_grid(n_rows, n_cols,
n_cell_rows, n_cell_cols,
feature_file_path=None):
n_features = n_rows * n_cols
feature_masks = []
for i in range(0, n_rows, n_cell_rows):
for j in range(0, n_cols, n_cell_cols):
mask = numpy.zeros((n_rows, n_cols), dtype=bool)
mask[i:i + n_cell_rows, j:j + n_cell_cols] = True
mask = mask.reshape(n_features)
feature_masks.append(mask)
if feature_file_path:
save_features_to_file(feature_masks, feature_file_path)
return feature_masks
def instance_from_disjoint_feature_masks(instance,
feature_masks,
feature_values,
dtype=float):
if instance is None:
n_features = feature_masks[0].shape[0]
instance = numpy.zeros(n_features, dtype=dtype)
for mask, feature_val in zip(feature_masks, feature_values):
instance[mask] = feature_val
return instance
def extract_features_marginalization_rectangles(n_features,
n_rows, n_cols,
feature_batch_sizes,
rect_min_sizes,
rect_max_sizes,
feature_file_path=None,
marg_value=MARG_IND,
rand_gen=None,
dtype=float):
"""
feature_batch_sizes = [10, 20, 30]
rect_min_sizes = [(2, 2), (2, 2), (4, 4)]
rect_max_sizes = [(3, 3), (4, 4), (2, 2)]
"""
assert len(feature_batch_sizes) == len(rect_min_sizes)
assert len(rect_min_sizes) == len(rect_max_sizes)
# n_instances = data.shape[0]
# n_features = data.shape[1]
#
# assuming all images to be squares
# TODO: generalize
# n_rows = int(numpy.sqrt(n_features))
# n_cols = int(numpy.sqrt(n_features))
if rand_gen is None:
rand_gen = numpy.random.RandomState(RND_SEED)
#
# generating the feature masks according to the parameters
feature_masks = []
for n_masks, (n_min_rows, n_min_cols), (n_max_rows, n_max_cols) in zip(feature_batch_sizes,
rect_min_sizes,
rect_max_sizes):
assert n_min_rows <= n_max_rows
assert n_min_cols <= n_max_cols
for i in range(n_masks):
mask = numpy.zeros(n_features, dtype=bool)
mask = random_rectangular_feature_mask(mask,
n_rows,
n_cols,
n_min_rows=n_min_rows,
n_min_cols=n_min_cols,
n_max_rows=n_max_rows,
n_max_cols=n_max_cols,
rand_gen=rand_gen)
feature_masks.append(mask)
# #
# # using the masks to evaluate the marginalizations
# repr_data = extract_feature_marginalization_from_masks(spn,
# data,
# feature_masks,
# marg_value=marg_value,
# rand_gen=rand_gen,
# dtype=dtype)
#
# saving them to file=
if feature_file_path:
save_features_to_file(feature_masks, feature_file_path)
# return repr_data
return feature_masks
def extract_features_marginalization_rand(n_features,
feature_batch_sizes,
n_feature_sizes,
feature_file_path=None,
marg_value=MARG_IND,
rand_gen=None,
dtype=float):
"""
feature_batch_sizes = [10, 20, 30]
rect_min_sizes = [(2, 2), (2, 2), (4, 4)]
rect_max_sizes = [(3, 3), (4, 4), (2, 2)]
"""
assert len(feature_batch_sizes) == len(n_feature_sizes)
# n_instances = data.shape[0]
# n_features = data.shape[1]
if rand_gen is None:
rand_gen = numpy.random.RandomState(RND_SEED)
#
# generating the feature masks according to the parameters
feature_masks = []
for n_masks, n_rand_features in zip(feature_batch_sizes,
n_feature_sizes):
assert n_rand_features <= n_features
print('Processing # masks {} # rand features{}'.format(n_masks,
n_rand_features))
for i in range(n_masks):
mask = numpy.zeros(n_features, dtype=bool)
mask = random_feature_mask(mask,
n_rand_features,
rand_gen=rand_gen)
feature_masks.append(mask)
# #
# # using the masks to evaluate the marginalizations
# repr_data = extract_feature_marginalization_from_masks(spn,
# data,
# feature_masks,
# marg_value=marg_value,
# rand_gen=rand_gen,
# dtype=dtype)
#
# saving them to file=
if feature_file_path:
save_features_to_file(feature_masks, feature_file_path)
# return repr_data
return feature_masks
def extract_instances_groups(data, group_func=dataset_to_instances_set, dtype=numpy.int8):
"""
From a dataset represented by a n_instances x n_features matrix
it extractes the gorups of identical instances and assign them to a class number
returns the mapping matrix n_instances x n_groups
"""
n_instances = data.shape[0]
n_features = data.shape[1]
#
# grouping by a certain criterion
logging.info('Grouping by function {}'.format(group_func))
groups = group_func(data)
n_groups = len(groups)
logging.info('There are {} groups'.format(n_groups))
repr_data = numpy.zeros((n_instances, n_groups), dtype=dtype)
group_feature_mapping = {centroid: i for centroid, i in zip(groups, range(n_groups))}
#
# assign groups to instances
for i in range(n_instances):
repr_data[i, group_feature_mapping[tuple(data[i])]] = 1
return repr_data
LIBRA_MARG_SYM = '*'
ACQUERY_EXEC = './acquery'
QUERY_EXT = 'q'
FEATURE_PREFIX = 'features'
def format_val(val,
dtype=int,
marg_value=MARG_IND,
marg_sym=LIBRA_MARG_SYM):
if val == marg_value:
return marg_sym
else:
return str(val)
def data_through_feature_mask(data,
feature_mask,
output_path,
delimiter=',',
marg_value=MARG_IND,
marg_sym=LIBRA_MARG_SYM):
"""
Masking a dataset according to a feature map, substituting all marg indices with
a char (default to Libra's don't care char), then serialize to file
"""
n_instances = data.shape[0]
n_features = data.shape[1]
marg_data = numpy.zeros((n_instances, n_features), dtype=data.dtype)
marg_data.fill(marg_value)
#
# storing only the request values
marg_data[:, feature_mask] = data[:, feature_mask]
#
# serializing
ser_s_t = perf_counter()
with open(output_path, 'w') as query_file:
for i in range(n_instances):
instance_str = delimiter.join(format_val(f) for f in marg_data[i])
query_file.write('{}\n'.format(instance_str))
ser_e_t = perf_counter()
logging.debug('Serialized feature query data to {} in {}'.format(output_path,
ser_e_t - ser_s_t))
return marg_data
def data_through_feature_mask_opt_unique(data,
feature_mask,
output_path,
delimiter=',',
marg_value=MARG_IND,
marg_sym=LIBRA_MARG_SYM):
"""
Same as data_through_feature_mask, but writing only the unique feature patches
Returning the composed marg data, the feature patch to instances dict and
the feature patch to feature id dict
"""
n_instances = data.shape[0]
n_features = data.shape[1]
# marg_data = numpy.zeros((n_instances, n_features), dtype=data.dtype)
# marg_data.fill(marg_value)
#
# storing only the request values
masked_data = data[:, feature_mask]
#
# getting the unique instances
feature_patches_dict = defaultdict(lambda: numpy.zeros(n_instances, dtype=bool))
for k, instance in enumerate(masked_data):
#
# enumerating patches
patch = tuple(instance)
feature_patches_dict[patch][k] = True
n_patches = len(feature_patches_dict)
marg_data = numpy.zeros((n_patches, n_features), dtype=data.dtype)
marg_data.fill(marg_value)
ordered_patches = {}
for k, patch in enumerate(feature_patches_dict):
ordered_patches[patch] = k
marg_data[k, feature_mask] = numpy.array(patch)
#
# serializing
ser_s_t = perf_counter()
with open(output_path, 'w') as query_file:
for i in range(n_patches):
instance_str = delimiter.join(format_val(f) for f in marg_data[i])
query_file.write('{}\n'.format(instance_str))
ser_e_t = perf_counter()
logging.debug('Serialized feature query data to {} in {}'.format(output_path,
ser_e_t - ser_s_t))
return marg_data, feature_patches_dict, ordered_patches
def ll_array_from_model_score(score_output):
"""
Quick and dirty parsing
"""
#
# split strings by newlines
lines = score_output.split('\n')
#
# remove all the lines that are not numbers
lls = []
for ll in lines:
try:
lls.append(float(ll))
except ValueError:
pass
#
# convert to numpy array
return numpy.array(lls)
def acquery(model, query_file, exec_path=ACQUERY_EXEC):
"""
Computing the likelihood for some queries given an instance
"""
process = None
process = subprocess.Popen([exec_path,
'-m', model,
'-q', query_file],
stdout=subprocess.PIPE)
proc_out, proc_err = process.communicate()
#
# TODO manage errors
# print(proc_out)
if proc_err is not None:
logging.error('acquery errors: {}'.format(proc_err))
scores = ll_array_from_model_score(proc_out.decode("utf-8"))
return scores
def extract_features_marginalization_acquery(data,
model_path,
feature_masks,
output_path,
dtype=float,
prefix=FEATURE_PREFIX,
overwrite_feature_file=True,
exec_path=ACQUERY_EXEC):
n_gen_features = len(feature_masks)
n_instances = data.shape[0]
repr_data = numpy.zeros((n_instances, n_gen_features), dtype=dtype)
for i, mask in enumerate(feature_masks):
#
# translating the mask into a query set
query_file_name = None
if overwrite_feature_file:
query_file_name = '{}.{}'.format(prefix, QUERY_EXT)
else:
query_file_name = '{}.{}.{}'.format(prefix, i, QUERY_EXT)
query_file_path = os.path.join(output_path, query_file_name)
feat_s_t = perf_counter()
#
# computing the queries (side effect: writing to file query_file_path)
queries = data_through_feature_mask(data, mask, query_file_path)
#
# getting the scores
feature_scores = acquery(model_path, query_file_path, exec_path=exec_path)
assert len(queries) == len(feature_scores)
#
# storing them
repr_data[:, i] = feature_scores
feat_e_t = perf_counter()
print('\tProcessed feature {}/{} (done in {})'.format(i + 1,
len(feature_masks),
feat_e_t - feat_s_t),
end=' \r')
return repr_data
def extract_features_marginalization_acquery_opt_unique(data,
model_path,
feature_masks,
output_path,
dtype=float,
prefix=FEATURE_PREFIX,
overwrite_feature_file=True,
exec_path=ACQUERY_EXEC):
n_gen_features = len(feature_masks)
n_instances = data.shape[0]
repr_data = numpy.zeros((n_instances, n_gen_features), dtype=dtype)
feat_i_t = perf_counter()
for i, mask in enumerate(feature_masks):
#
# translating the mask into a query set
query_file_name = None
if overwrite_feature_file:
query_file_name = '{}.{}'.format(prefix, QUERY_EXT)
else:
query_file_name = '{}.{}.{}'.format(prefix, i, QUERY_EXT)
query_file_path = os.path.join(output_path, query_file_name)
feat_s_t = perf_counter()
#
# computing the queries (side effect: writing to file query_file_path)
queries, feature_dict, feature_ids = data_through_feature_mask_opt_unique(data,
mask,
query_file_path)
#
# getting the scores
feature_scores = acquery(model_path, query_file_path, exec_path=exec_path)
assert len(queries) == len(feature_scores)
#
# storing them
for patch, instance_mask in feature_dict.items():
repr_val = feature_scores[feature_ids[patch]]
repr_data[instance_mask, i] = repr_val
# repr_data[:, i] = feature_scores
feat_e_t = perf_counter()
print('\tProcessed feature {}/{} (done in {})'.format(i + 1,
len(feature_masks),
feat_e_t - feat_s_t),
end=' \r')
if i % 100 == 0:
logging.info('\tProcessed {}/{} features (elapsed {})'.format(i + 1,
len(feature_masks),
feat_e_t - feat_i_t))
return repr_data
def node_activations_for_instance(spn,
nodes,
instance,
marg_mask=None,
mean=False,
log=False,
hard=False,
dtype=float):
"""
Given an SPN and an instance, return a same shape instance
containing the activations of all nodes, summed by scopes
"""
assert instance.ndim == 1
n_features = len(instance)
activations = numpy.zeros(n_features, dtype=dtype)
var_counter = Counter()
#
# marginalizing?
if marg_mask is not None:
instance = numpy.array(instance, copy=True)
instance[numpy.logical_not(marg_mask)] = MARG_IND
#
# evaluate it bottom, up
res, = spn.single_eval(instance)
node_set = set(nodes)
#
# then gather the node activation vals
for node in nodes:
if log:
val = node.log_val
else:
val = numpy.exp(node.log_val)
scope = None
if hasattr(node, 'var_scope'):
scope = node.var_scope
elif hasattr(node, 'var'):
scope = [node.var]
#
# accumulating scope
for var in scope:
var_counter[var] += 1
if hard:
activations[var] += 1
else:
# activations[var] += (val / len(scope))
activations[var] += val
# activations[var] = max(val / len(scope), activations[var])
# if instance[var] == 1:
# activations[var] += val
# else:
# activations[var] += (1 - val)
if mean:
for i in range(n_features):
activations[i] /= var_counter[i]
return activations
def extract_features_node_activations(spn,
nodes,
data,
marg_mask=None,
mean=False,
log=False,
hard=False,
dtype=float):
n_instances = data.shape[0]
n_features = data.shape[1]
repr_data = numpy.zeros((n_instances, n_features), dtype=dtype)
for i in range(n_instances):
ext_s_t = perf_counter()
repr_data[i, :] = node_activations_for_instance(spn,
nodes,
data[i],
marg_mask=marg_mask,
mean=mean,
log=log,
hard=hard,
dtype=dtype)
ext_e_t = perf_counter()
print('\tProcessed instance {}/{} (done in {})'.format(i + 1,
n_instances,
ext_e_t - ext_s_t),
end=' \r')
return repr_data
def all_single_marginals_spn(spn,
feature_vals,
dtype=numpy.int32):
n_features = len(feature_vals)
n_instantiations = numpy.sum(feature_vals)
feat_s_t = perf_counter()
marg_data = numpy.zeros((n_instantiations, n_features), dtype=dtype)
marg_data.fill(MARG_IND)
instance_id = 0
for i in range(n_features):
for j in range(feature_vals[i]):
marg_data[instance_id, i] = j
instance_id += 1
marginals = evaluate_on_dataset(spn, marg_data)
feat_e_t = perf_counter()
logging.info('Marginals extracted in {}'.format(feat_e_t - feat_s_t))
return marginals
def all_single_marginals_ml(data,
feature_vals,
alpha=0.0):
n_instances = data.shape[0]
n_features = data.shape[1]
n_instantiations = numpy.sum(feature_vals)
feat_s_t = perf_counter()
marginals = numpy.zeros(n_instantiations)
feature_vals_rep = numpy.zeros(n_instantiations)
feature_cum_sum = numpy.cumsum(feature_vals)
for j in range(n_features):
prev_id = feature_cum_sum[j - 1] if j > 0 else 0
feature_vals_rep[prev_id:prev_id + feature_vals[j]] = feature_vals[j]
for i in range(n_instances):
obs = data[i, j]
feature_val_id = prev_id + obs
marginals[feature_val_id] += 1
marginals = (marginals + alpha) / (n_instances + alpha * numpy.array(feature_vals_rep))
feat_e_t = perf_counter()
logging.info('Marginals extracted in {}'.format(feat_e_t - feat_s_t))
return marginals
def extract_features_all_marginals_spn(spn,
data,
feature_vals,
all_marginals=None,
dtype=numpy.int32):
n_instances = data.shape[0]
n_features = data.shape[1]
if all_marginals is None:
all_marginals = all_single_marginals_spn(spn, feature_vals, dtype=dtype)
repr_data = numpy.zeros((n_instances, n_features))
feature_cum_sum = numpy.cumsum(feature_vals)
for i in range(n_instances):
for j in range(n_features):
obs = data[i, j]
prev_id = feature_cum_sum[j - 1] if j > 0 else 0
feature_val_id = prev_id + obs
repr_data[i, j] = all_marginals[feature_val_id]
return repr_data
def extract_features_all_marginals_ml(train_data,
test_data,
feature_vals,
alpha=0.0,
all_marginals=None,
dtype=numpy.int32):
n_instances = test_data.shape[0]
n_features = test_data.shape[1]
if all_marginals is None and train_data is not None:
all_marginals = all_single_marginals_ml(train_data,
feature_vals,
alpha=alpha)
repr_data = numpy.zeros((n_instances, n_features))
feature_cum_sum = numpy.cumsum(feature_vals)
for i in range(n_instances):
for j in range(n_features):
obs = test_data[i, j]
prev_id = feature_cum_sum[j - 1] if j > 0 else 0
feature_val_id = prev_id + obs
repr_data[i, j] = all_marginals[feature_val_id]
return repr_data
def marginalizations_for_instance(spn,
instance,
feature_vals,
exp=False,
dtype=int):
"""
Given an SPN and an instance, return a same shape instance
containing the activations of all nodes, summed by scopes
"""
assert instance.ndim == 1
n_features = len(instance)
marg_data = numpy.zeros(n_features, dtype=instance.dtype)
marginalizations = numpy.zeros(n_features, dtype=dtype)
for i in range(n_features):
marg_data.fill(MARG_IND)
# if all_ones:
# marg_data[i] = 1
# else:
marg_data[i] = instance[i]
#
# evaluate it bottom, up
res, = spn.single_eval(marg_data)
if exp:
res = numpy.exp(res)
marginalizations[i] = res
return marginalizations
import theano
def get_nearest_neighbours_theano_func():
"""
Returns the id of the nearest instance to sample and its value,
in the euclidean distance sense
"""
sample = theano.tensor.vector(dtype=theano.config.floatX)
data = theano.tensor.matrix(dtype=theano.config.floatX)
distance_vec = theano.tensor.sum((data - sample) ** 2, axis=1)
nn_id = theano.tensor.argmin(distance_vec)
find_nearest_neighbour = theano.function(inputs=[sample, data],
outputs=[nn_id, data[nn_id]])
return find_nearest_neighbour
def get_nearest_neighbour(samples, data, masked=False, nn_func=None):
if nn_func is None:
nn_func = get_nearest_neighbours_theano_func()
data = data.astype(theano.config.floatX)
samples = [s.astype(theano.config.floatX) for s in samples]
neighbours = []
for instance in samples:
nn_s_t = perf_counter()
if masked:
feature_mask = instance == MARG_IND
# print(data.shape, instance.shape, feature_mask.shape)
# instance = instance[feature_mask]
# masked_data = data[:, feature_mask]
# print(masked_data.shape, instance.shape, feature_mask.shape)
# nn_id, instance_nn = nn_func(instance, masked_data)
#
# putting everything dont' care as background
print(instance.reshape(28, 28))
instance[feature_mask] = 1
print(instance.reshape(28, 28))
# else:
nn_id, instance_nn = nn_func(instance, data)
nn_e_t = perf_counter()
print(data.shape)
neighbours.append((nn_id, data[nn_id]))
logging.info('Got nn {} in {} secs'.format(nn_id, nn_e_t - nn_s_t))
return neighbours
| 65,192
| 34.087729
| 98
|
py
|
spyn-repr
|
spyn-repr-master/spn/linked/spn.py
|
from .layers import Layer
from .layers import SumLayer
from .layers import ProductLayer
from .layers import compute_feature_vals
from spn import AbstractSpn, AbstractLayeredSpn
from spn import LOG_ZERO
from spn import RND_SEED
from .nodes import SumNode
from .nodes import ProductNode
from .nodes import sample_from_leaf
from collections import deque
import math
# from math import exp
try:
from time import perf_counter
except:
from time import time
perf_counter = time
import numpy
import sys
class Spn(AbstractLayeredSpn):
"""
Spn layer-wise linked implementation using pure python
WRITEME
"""
def __init__(self,
input_layer=None,
layers=[]):
"""
layers is a collection of layer-blocks ordered bottom up for evaluation
"""
# meaningful layers
self._input_layer = input_layer
if input_layer is not None:
self._feature_vals = self._input_layer.feature_vals()
self._layers = None
self._root_layer = None
self.set_layers(layers)
def set_input_layer(self, layer):
"""
WRITEME
"""
self._input_layer = layer
self._feature_vals = self._input_layer.feature_vals()
def set_layers(self, layers):
"""
WRITEME
"""
self._layers = layers
# the root is the last level, if present
if layers:
self._root_layer = layers[-1]
def add_layer(self, layer, pos=None):
"""
WRITEME
"""
if pos is None:
self._layers.append(layer)
pos = len(self._layers) - 1
# pos = max(0, len(self._layers) - 1)
# self._root_layer = layer
else:
self._layers.insert(pos, layer)
# updating the pointer to the root
self._root_layer = layer
def insert_layer(self, layer, pos=None):
"""
WRITEME
"""
self._layers.insert(pos, layer)
def root_layer(self):
return self._root_layer
def root(self):
root_layer = self.root_layer()
assert len(list(root_layer.nodes())) == 1
return root_layer._nodes[0]
def input_layer(self):
return self._input_layer
def top_down_nodes(self):
for layer in self.top_down_layers():
for node in layer.nodes():
yield node
def bottom_up_nodes(self):
for layer in self.bottom_up_layers():
for node in layer.nodes():
yield node
def is_decomposable(self):
"""
WRITEME
"""
return all([layer.is_decomposable()
for layer in self._layers
if isinstance(layer, ProductLayer)])
def is_complete(self):
"""
WRITEME
"""
return all([layer.is_complete()
for layer in self._layers
if isinstance(layer, SumLayer)])
def is_valid(self):
"""
Here is checked a stricter condition for validity:
completeness AND decomposability => validity
"""
return self.is_complete() and self.is_decomposable()
def eval(self, input):
"""
WRITEME
"""
lls = None
# batch evaluation
if input.ndim > 1:
# returning a matrix of values
# TODO clean this up and make a numpy array
lls = []
for instance in input.T:
ll = self.single_eval(instance)
lls.append(ll)
else:
# returning an array (list)
lls = self.single_eval(input)
return lls
def single_eval(self, input):
"""
WRITEME
"""
# evaluating the input layer first
self._input_layer.eval(input)
# print('input log vals')
# print('{0}'.format([node.id for node in self._input_layer.nodes()]))
# print(self._input_layer.node_values())
# then propagate upwards by evaluating other layers
for layer in self._layers:
layer.eval()
# print('intermediate layer')
# print(layer.node_values())
# result is returned as a list of log-values
return self._root_layer.node_values()
def test_eval(self):
"""
this is done for testing purposes only,
bypasses the input layer (assuming it is already
evaluated)
"""
for layer in self._layers:
layer.eval()
# print('intermediate layer')
# print(layer.node_values())
# result is returned as a list of log-values
return self._root_layer.node_values()
def mpe_eval(self, input):
"""
WRITEME
"""
lls = None
# batch evaluation
if input.ndim > 1:
# returning a matrix of values
# TODO clean this up and make a numpy array
lls = []
for instance in input.T:
ll = self.single_mpe_eval(instance)
lls.append(ll)
else:
# returning an array (list)
lls = self.single_mpe_eval(input)
return lls
def single_mpe_eval(self, input):
"""
WRITEME
"""
# evaluating the input layer first
# smoothed input layers are evaluated soft as well
# (is this correct?)
self._input_layer.eval(input)
# then propagate upwards by evaluating other layers
# with MPE inference
for layer in self._layers:
layer.mpe_eval()
# result is returned as a list of log-values
return self._root_layer.node_values()
def test_mpe_eval(self):
"""
this is done for testing purposes only,
MPE evaluation (see test_eval)
"""
for layer in self._layers:
layer.mpe_eval()
# print('intermediate layer')
# print(layer.node_values())
# result is returned as a list of log-values
return self._root_layer.node_values()
def get_features(self):
"""
"""
return compute_feature_vals([n for n in self._input_layer.nodes()])
def sample(self,
n_instances=1,
feature_values=None,
one_hot_encoding=False,
starting_node=None,
dtype=numpy.int32,
rand_gen=None,
verbose=False):
"""
Sampling an SPN generating n_instances vectors
"""
if rand_gen is None:
rand_gen = numpy.random.RandomState(RND_SEED)
if feature_values is None:
feature_values = self.get_features()
if verbose:
print('Feature values {0}'.format(feature_values))
if one_hot_encoding:
n_features = numpy.sum(feature_values)
else:
n_features = len(feature_values)
if verbose:
print('Sampling {0} instances over {1} features'.format(n_instances,
n_features))
instances_vec = numpy.zeros((n_instances, n_features), dtype=dtype)
#
# setting all values to an unwanted val
instances_vec.fill(-1)
if starting_node is None:
starting_node = self.root()
for i in range(n_instances):
#
# traversing the spn top down
nodes_to_process = deque()
nodes_to_process.append(starting_node)
while nodes_to_process:
curr_node = nodes_to_process.popleft()
#
# if it is a sum node, sample just one child
if isinstance(curr_node, SumNode):
n_children = len(curr_node.children)
sampled_children_id = rand_gen.choice(n_children, p=curr_node.weights)
nodes_to_process.append(curr_node.children[sampled_children_id])
if verbose:
print('sum node, getting child {0} [{1}]'.format(sampled_children_id,
len(nodes_to_process)))
#
# adding them all if the current node is a product node
elif isinstance(curr_node, ProductNode):
nodes_to_process.extend(curr_node.children)
if verbose:
print('prod node, add {0} children [{1}]'.format(len(curr_node.children),
len(nodes_to_process)))
else:
#
# it is assumed to be a leaf
instances_vec = sample_from_leaf(curr_node,
instances_vec,
i,
rand_gen,
feature_values)
if verbose:
print('Reached a leaf: {0}'.format(instances_vec[i]))
#
# are all values filled?
assert numpy.sum(instances_vec == -1) == 0
#
# if just one instance, flattening the ndarray
if n_instances == 1:
instances_vec = instances_vec.flatten()
return instances_vec
def to_text(self, filename):
"""
Serialization routine to text format
"""
LAYER_GLYPH = '-'
n_layers = self.n_layers()
with open(filename, 'w') as out_stream:
#
# writing the first line
out_stream.write("spn\n\n")
#
# write the features
features_str = " ".join(list(map(str, self._feature_vals)))
out_stream.write(features_str + '\n\n')
#
# exploring other layers
for i, layer in enumerate(self.top_down_layers()):
out_stream.write(LAYER_GLYPH + ' ' +
str(n_layers - i) + '\n')
#
# printing nodes
for node in layer.nodes():
node_str = node.node_short_str()
out_stream.write(node_str + '\n')
out_stream.write('\n')
#
# dumping input layer
out_stream.write(LAYER_GLYPH + str(1) + '\n')
for node in self._input_layer.nodes():
node_str = node.node_short_str()
out_stream.write(node_str + '\n')
out_stream.write('\n')
def backprop(self):
"""
WRITEME
"""
# set top layer derivative to one
self._layers[-1].set_log_derivative(0.0)
# backpropagate to the leaves
for layer in self.top_down_layers():
layer.backprop()
def test_weight_update(_l_id,
_n_id,
_w_id,
old_weight,
grad):
eta = 0.1
return old_weight + eta * grad
def backprop_and_update(self, weight_update_rule):
"""
WRITEME
"""
# set top layer derivative to one
self._layers[-1].set_log_derivative(0.0)
# backpropagate to the leaves
layer_id = 0
for layer in self.top_down_layers():
layer.backprop()
# for sum layers
if isinstance(layer, SumLayer):
# updating weights according to a simple rule
layer.update_weights(weight_update_rule, layer_id)
layer_id += 1
def mpe_backprop(self):
"""
WRITEME
"""
# set top layer derivative to one
self._layers[-1].set_log_derivative(0.0)
# backpropagate to the leaves
for layer in self.top_down_layers():
layer.mpe_backprop()
def get_weights(self, empty=False):
"""
Returning the weights of the network in a multi dimensional
array (lists of lists of lists, (sum)layers x nodes x weights)
or an empty structure
"""
# creates a multi dim array for storing weights
# [layer_id][node_id][weight_id] all positional integers
weights_ds = None
# filling it with nodes
if not empty:
weights_ds = [[[weight for weight in node.weights]
for node in layer.nodes()]
for layer in self.top_down_layers()
if isinstance(layer, SumLayer)]
# filling it with zeros
else:
weights_ds = [[[0.0 for child in node.children]
for node in layer.nodes()]
for layer in self.top_down_layers()
if isinstance(layer, SumLayer)]
return weights_ds
def set_weights(self, weights_ds):
"""
Setting the network weights from a data structure
"""
layer_id = 0
for layer in self.top_down_layers():
if isinstance(layer, SumLayer):
for node_id, node in enumerate(layer.nodes()):
node.set_weights(weights_ds[layer_id][node_id])
layer_id += 1
def mpe_traversal(self):
"""
WRITEME
this shall be a generator for traversing the spn top down,
halting only in proximity of weights to be updated according
to MPE inference
- assuming a mpe_eval() bottom-up pass has been done (?)
- according to Poon, one can do a sum eval step and then a
max backprop step...
"""
# creating a queue
nodes_to_process = deque()
# adding the root nodes
for i, node in enumerate(self._layers[-1].nodes()):
nodes_to_process.append((0, node.id, node))
# print('roots', len(nodes_to_process))
# bfs search
child_nodes = deque()
while nodes_to_process:
# pop the first one
level, par_id, curr_node = nodes_to_process.popleft()
# print('now examining', level, id, curr_node)
# searching for the max valued child
# max_val = LOG_ZERO
# clearing the deque
child_nodes.clear()
for i, child in enumerate(curr_node.children):
# this is done by peharz
# posterior = child.log_val + child.log_der
# this, instead shall be the 'classic one'
# posterior = child.log_val + log_weight
# if posterior > max_val:
# max_val = posterior
# child_nodes.clear()
# if posterior == max_val:
# child_nodes.append(level, id, i, child)
# print('children', child.log_val +
# curr_node.log_weights[i], curr_node.log_val)
# compute the value, in theory the max_child has the
# same values as the parent
if numpy.isclose(child.log_val + curr_node.log_weights[i],
curr_node.log_val):
child_nodes.append((i, child))
# now for each prod child
for child_pos, child_node in child_nodes:
# print(node)
# yielding the node
yield (level, par_id, child_pos)
# for each child they have, add it to be processed
# checking for non leaf nodes
try:
for j, sum_node in enumerate(child_node.children):
nodes_to_process.append(
(level + 1, sum_node.id, sum_node))
except:
pass
def fit(self, train, valid, test, algo='sgd', options=None):
"""
WRITEME
"""
def fit_sgd(self,
train, valid, test,
n_epochs=50,
batch_size=1,
hard=False,
learning_rate=0.1,
grad_method=0, # 0=SGD, 1=ADAGRAD, 2=ADADELTA
validation_frequency=None,
early_stopping=30,
rand_gen=None,
epsilon=1e-7):
"""
Basic SGD
"""
# def simple_grad(weight, grad):
# return weight + learning_rate * grad
#
# ADAGRAD & ADADELTA
#
ada_grad_history = None
ada_grad_updates = None
if grad_method == 1 or grad_method == 2:
# initialize the previous gradients history
# used both for ADAGRAD and ADADELTA update rules
ada_grad_history = self.get_weights(empty=True)
if grad_method == 2:
# for ADADELTA, storing the previous updates as well
ada_grad_updates = self.get_weights(empty=True)
def compute_grad(layer_id,
node_id,
weight_id,
weight,
grad):
weight_update = weight
if grad_method == 0: # SGD NAIVE
weight_update = weight + learning_rate * grad
elif grad_method == 1: # ADAGRAD
# getting the previous gradient history
h_grad = ada_grad_history[layer_id][node_id][weight_id]
# update it
h_grad += grad * grad
# save it back
ada_grad_history[layer_id][node_id][weight_id] = h_grad
grad = grad / (epsilon + math.sqrt(h_grad))
weight_update = weight + learning_rate * grad
elif grad_method == 2: # ADADELTA
# getting the previous gradient history
h_grad = ada_grad_history[layer_id][node_id][weight_id]
# not a simple squared grad
h_grad = (learning_rate * h_grad +
(1.0 - learning_rate) * grad * grad)
ada_grad_history[layer_id][node_id][weight_id] = h_grad
h_update = ada_grad_updates[layer_id][node_id][weight_id]
update_t = ((math.sqrt(epsilon + h_update)) /
(math.sqrt(epsilon + h_grad))) * grad
h_update = (learning_rate * h_update +
(1.0 - learning_rate) * (update_t * update_t))
ada_grad_updates[layer_id][node_id][weight_id] = h_update
weight_update = weight + update_t
return weight_update
# keep track of ll
epoch_cost = 0.0
old_ll = 0.0
epoch = 0
done_looping = False
best_iter = 0
best_valid_avg_ll = -numpy.inf
best_params = self.get_weights()
best_train_avg_ll = -numpy.inf
local_valid_avg_ll = -numpy.inf
n_train_instances = train.shape[0]
n_train_batches = (n_train_instances
// batch_size)
if validation_frequency is None:
validation_frequency = n_train_batches
no_improvement = 0
#
# epochs loop
#
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
print('>>>>> epoch {0}/{1}'.format(epoch, n_epochs))
epoch_start_t = perf_counter()
# save and reset ll, I could save them all to plot them maybe
old_ll = epoch_cost
avg_time = 0.0
epoch_cost = 0.0
# shuffling the dataset
rand_gen.shuffle(train)
#
# for each training example
#
for m, instance in enumerate(train):
inst_start_t = perf_counter()
# evaluate it
sample_lls = None
if hard:
sample_lls = self.mpe_eval(instance)
else:
sample_lls = self.eval(instance)
# cumulate it (assuming one radix only)
sample_ll = sample_lls[0]
epoch_cost += sample_ll
eval_end_t = perf_counter()
# print('eval time', eval_end_t - inst_start_t)
back_start_t = perf_counter()
# backprop
self.backprop_and_update(compute_grad)
back_end_t = perf_counter()
# print('backpr time', back_end_t - back_start_t)
avg_time += (back_end_t - inst_start_t)
sys.stdout.write(
'\r-- mini batch {:d}/{:d} ({:.4f} secs avg)'
' [{:.4f} ll avg]'
.format(m + 1, n_train_batches,
avg_time / (m + 1),
epoch_cost / (m + 1)))
sys.stdout.flush()
#
# checking for validation set performance
#
if ((m + 1) % validation_frequency == 0 and
valid is not None):
valid_start_t = perf_counter()
valid_lls = self.eval(valid)
valid_avg_ll = numpy.mean(valid_lls)
valid_end_t = perf_counter()
print('\n\tLL on val:{ll} in {ss} secs'.
format(ll=valid_avg_ll,
ss=(valid_end_t - valid_start_t)))
#
# now comparing with best score
#
if valid_avg_ll > best_valid_avg_ll:
print('\tNEW BEST VALID LL: {0}'.
format(valid_avg_ll))
best_iter = epoch * (m + 1)
best_valid_avg_ll = valid_avg_ll
best_train_avg_ll = epoch_cost / (m + 1)
# saving the model
best_params = self.get_weights()
#
# Evaluating on the test set with best params
#
if test is not None:
test_start_t = perf_counter()
test_lls = self.eval(test)
test_avg_ll = numpy.mean(test_lls)
test_end_t = perf_counter()
print('\tLL on TEST:{ll} in {ss} secs'.
format(ll=test_avg_ll,
ss=(test_end_t - test_start_t)))
#
# early stopping
#
if valid_avg_ll > local_valid_avg_ll:
no_improvement = 0
local_valid_avg_ll = valid_avg_ll
else:
no_improvement += 1
if no_improvement >= early_stopping:
print('No improvement on valid set after {0} checks'.
format(no_improvement))
done_looping = True
break
epoch_end_t = perf_counter()
print('\n elapsed {0} secs'.format(epoch_end_t -
epoch_start_t))
rel_imp = abs((epoch_cost - old_ll) / epoch_cost)
print('relative improvement -> {0}'.format(rel_imp))
if rel_imp < epsilon:
done_looping = True
#
# Evaluating on the test set with best params
#
if test is not None:
self.set_weights(best_params)
test_start_t = perf_counter()
test_lls = self.eval(test)
test_avg_ll = numpy.mean(test_lls)
test_end_t = perf_counter()
print('\nLL on TEST:{ll} in {ss} secs'.
format(ll=test_avg_ll,
ss=(test_end_t - test_start_t)))
return best_train_avg_ll, best_valid_avg_ll, test_avg_ll
def fit_em(self,
train, valid, test,
n_epochs=50,
batch_size=1,
hard=True,
epsilon=1e-7):
"""
EM learning (see peharz)
"""
# keeping track of sum layers only
sum_layers_only = [i for i, layer
in enumerate(self._layers)
if isinstance(layer, SumLayer)]
# reversing the list
sum_layers_only.reverse()
print('sum layers', sum_layers_only)
# allocating a temp struct for weight updating
# it is a dynamic 3d-tensor
# w_updates[l][n][c] contains the new weight for the
# node c with parent n in the layer l (all integers)
# note that l counts layers ids top down
# TODO pass to a numpy tensor, even if sparse
w_updates = [i for i in range(len(sum_layers_only))]
# create a dict for each layer: node_id -> layer pos
w_layer_pos = []
for i, layer_id in enumerate(sum_layers_only):
layer = self._layers[layer_id]
layer_updates = [[0.0 for child in node.children]
for node in layer.nodes()]
w_updates[i] = layer_updates
w_layer_pos.append({})
# w_layer_pos[i] = {}
for j, node in enumerate(layer.nodes()):
w_layer_pos[i][node.id] = j
# print('w updates', w_updates)
# print('w layer pos', w_layer_pos)
# keep track of ll
epoch_cost = 0.0
old_ll = 0.0
epoch = 0
done_looping = False
n_train_instances = train.shape[0]
n_train_batches = (n_train_instances
// batch_size)
# epochs loop
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
print('>>>>> epoch {0}/{1}'.format(epoch, n_epochs))
epoch_start_t = perf_counter()
# reset updates
for l, layer in enumerate(w_updates):
for n, node in enumerate(layer):
for c, child in enumerate(node):
w_updates[l][n][c] = 0.0
# save and reset ll
old_ll = epoch_cost
epoch_cost = 0.0
avg_time = 0.0
# for each training example
# TODO we could shuffle them...
for m, instance in enumerate(train):
inst_start_t = perf_counter()
# evaluate it
sample_lls = None
if hard:
sample_lls = self.mpe_eval(instance)
else:
sample_lls = self.eval(instance)
# cumulate it (assuming one radix only)
sample_ll = sample_lls[0]
epoch_cost += sample_ll
eval_end_t = perf_counter()
# print('evalua time', eval_end_t - inst_start_t)
# weight update (hard?)
if hard:
hard_start_t = perf_counter()
# descending with MPE inference
# just adding a + 1 counter
for l_id, par_id, child_pos in self.mpe_traversal():
try:
par_pos = w_layer_pos[l_id][par_id]
w_updates[l_id][par_pos][child_pos] += 1.0
except:
print('error', l_id, par_id, par_pos, child_pos)
hard_end_t = perf_counter()
# print('hard time', hard_end_t -
# hard_start_t)
else:
back_start_t = perf_counter()
# backprop
self.backprop()
back_end_t = perf_counter()
# print('backpr time', back_end_t -
# back_start_t)
update_start_t = perf_counter()
# update weights
for l, layer_upd in enumerate(w_updates):
layer = self._layers[sum_layers_only[l]]
for p, parent in enumerate(layer_upd):
sum_node = layer._nodes[p]
for c in range(len(parent)):
child = sum_node.children[c]
child_log_w = sum_node.log_weights[c]
w_updates[l][p][c] += \
math.exp(child.log_val +
sum_node.log_der +
child_log_w -
sample_ll)
update_end_t = perf_counter()
# print('update time', update_end_t -
# update_start_t)
inst_end_t = perf_counter()
# print('instan time', inst_end_t -
# inst_start_t)
avg_time += (inst_end_t - inst_start_t)
sys.stdout.write(
'\r-- mini batch {:d}/{:d} ({:.4f} secs avg)'
' [{:.4f} ll avg]'
.format(m + 1, n_train_batches,
avg_time / (m + 1),
epoch_cost / (m + 1)))
sys.stdout.flush()
# normalizing weight updates
for l, layer_upd in enumerate(w_updates):
layer = self._layers[sum_layers_only[l]]
for p, parent in enumerate(layer_upd):
sum_node = layer._nodes[p]
sum_node_tot = 0.0
num_children = len(parent)
for c in range(num_children):
sum_node_tot += w_updates[l][p][c]
# if no update occurred , all weights are normalized to
# 1/num_children
if sum_node_tot > 0.0:
for c in range(num_children):
w_updates[l][p][c] /= sum_node_tot
else:
for c in range(num_children):
w_updates[l][p][c] = 1.0 / float(num_children)
# setting the weights
sum_node.set_weights(w_updates[l][p])
# checking for convergence
print('\ttrain ll', epoch_cost / train.shape[0])
# computing the log-likelihood on the validation set, if any
# rel_imp = abs((current_ll - old_ll) / current_ll)
# print('relative improvement -> {0}'.format(rel_imp))
# if rel_imp < epsilon:
# break
epoch_end_t = perf_counter()
print('elapsed {0} secs'.format(epoch_end_t -
epoch_start_t))
epoch_end_t = perf_counter()
print('elapsed {0} secs'.format(epoch_end_t -
epoch_start_t))
if valid is not None:
valid_start_t = perf_counter()
valid_lls = self.eval(valid)
valid_avg_ll = numpy.mean(valid_lls)
valid_end_t = perf_counter()
print('\tLL on val:{ll} in {ss} secs'.
format(ll=valid_avg_ll,
ss=(valid_end_t - valid_start_t)))
# rel_imp = abs((current_ll - old_ll) / current_ll)
rel_imp = abs((epoch_cost - old_ll) / epoch_cost)
print('relative improvement -> {0}'.format(rel_imp))
if rel_imp < epsilon:
break
def evaluate_on_dataset(spn, data):
n_instances = data.shape[0]
pred_lls = numpy.zeros(n_instances)
for i, instance in enumerate(data):
(pred_ll, ) = spn.single_eval(instance)
pred_lls[i] = pred_ll
return pred_lls
| 32,300
| 33.546524
| 97
|
py
|
spyn-repr
|
spyn-repr-master/spn/linked/tests/test_layers.py
|
from spn.linked.layers import Layer
from spn.linked.layers import ProductLayer
from spn.linked.layers import SumLayer
from spn.linked.layers import CategoricalInputLayer
from spn.linked.layers import CategoricalIndicatorLayer
from spn.linked.layers import CategoricalSmoothedLayer
from spn.linked.layers import CategoricalCLInputLayer
from spn.linked.nodes import Node
from spn.linked.nodes import SumNode
from spn.linked.nodes import ProductNode
from spn.linked.nodes import CategoricalSmoothedNode
from spn.linked.nodes import CategoricalIndicatorNode
from spn.linked.nodes import CLTreeNode
from spn.linked.spn import Spn
from spn.tests import compute_smoothed_ll
from spn.tests import PRECISION
from spn import LOG_ZERO
from spn import MARG_IND
from spn import IS_LOG_ZERO
from math import log
import numpy
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
def test_layer_create():
# creating generic nodes
node1 = Node()
node2 = Node()
node3 = Node()
# and a generic Layer
layer = Layer()
layer.add_node(node1)
layer.add_node(node2)
layer.add_node(node3)
layer2 = Layer([node1, node2, node3])
assert layer._nodes == layer2._nodes
def test_sum_layer_create_and_eval():
# creating generic nodes
node1 = Node()
node2 = Node()
node3 = Node()
# whose values are
val1 = 1.
val2 = 1.
val3 = 0.
node1.set_val(val1)
node2.set_val(val2)
node3.set_val(val3)
# setting weights
weight11 = 0.2
weight12 = 0.3
weight13 = 0.5
weight21 = 0.3
weight22 = 0.7
weight32 = 0.4
weight33 = 0.6
# creating sum nodes
sum1 = SumNode()
sum2 = SumNode()
sum3 = SumNode()
# adding children
sum1.add_child(node1, weight11)
sum1.add_child(node2, weight12)
sum1.add_child(node3, weight13)
sum2.add_child(node1, weight21)
sum2.add_child(node2, weight22)
sum3.add_child(node2, weight32)
sum3.add_child(node3, weight33)
# adding to layer
sum_layer = SumLayer([sum1, sum2, sum3])
# evaluation
sum_layer.eval()
# computing 'log values by hand'
layer_evals = sum_layer.node_values()
print('Layer eval nodes')
print(layer_evals)
logval1 = log(weight11 * val1 +
weight12 * val2 +
weight13 * val3)
logval2 = log(weight21 * val1 +
weight22 * val2)
logval3 = log(weight32 * val2 +
weight33 * val3)
logvals = [logval1, logval2, logval3]
print('log vals')
print(logvals)
# checking for correctness
for logval, eval in zip(logvals, layer_evals):
assert_almost_equal(logval, eval, PRECISION)
def test_product_layer_create_and_eval():
# creating generic nodes
node1 = Node()
node2 = Node()
node3 = Node()
# whose values are
val1 = 0.8
val2 = 1.
val3 = 0.
node1.set_val(val1)
node2.set_val(val2)
node3.set_val(val3)
# creating product nodes
prod1 = ProductNode()
prod2 = ProductNode()
prod3 = ProductNode()
# adding children
prod1.add_child(node1)
prod1.add_child(node2)
prod2.add_child(node1)
prod2.add_child(node3)
prod3.add_child(node2)
prod3.add_child(node3)
# adding product nodes to layer
product_layer = ProductLayer([prod1, prod2, prod3])
# evaluating
product_layer.eval()
# getting log vals
layer_evals = product_layer.node_values()
print('layer eval nodes')
print(layer_evals)
# computing our values
prodval1 = val1 * val2
logval1 = log(prodval1) if prodval1 > 0. else LOG_ZERO
prodval2 = val1 * val3
logval2 = log(prodval2) if prodval2 > 0. else LOG_ZERO
prodval3 = val2 * val3
logval3 = log(prodval3) if prodval3 > 0. else LOG_ZERO
logvals = [logval1, logval2, logval3]
print('log vals')
print(logvals)
for logval, eval in zip(logvals, layer_evals):
if logval == LOG_ZERO:
# for zero log check this way for correctness
assert IS_LOG_ZERO(eval) is True
else:
assert_almost_equal(logval, eval, PRECISION)
vars = [2, 2, 3, 4]
freqs = [[1, 2],
[5, 5],
[1, 0, 2],
None]
obs = [0, MARG_IND, 1, 2]
def test_categorical_input_layer():
print('categorical input layer')
# I could loop through alpha as well
alpha = 0.1
for var_id1 in range(len(vars)):
for var_id2 in range(len(vars)):
for var_val1 in range(vars[var_id1]):
print('varid1, varid2, varval1',
var_id1, var_id2, var_val1)
# var_id1 = 0
# var_val1 = 0
node1 = CategoricalIndicatorNode(var_id1,
var_val1)
# var_id2 = 0
var_vals2 = vars[var_id2]
node2 = CategoricalSmoothedNode(
var_id2, var_vals2, alpha, freqs[var_id2])
# creating the generic input layer
input_layer = CategoricalInputLayer([node1,
node2])
# evaluating according to an observation
input_layer.eval(obs)
layer_evals = input_layer.node_values()
print('layer eval nodes')
print(layer_evals)
# computing evaluation by hand
val1 = 1 if var_val1 == obs[var_id1] or obs[
var_id1] == MARG_IND else 0
logval1 = log(val1) if val1 == 1 else LOG_ZERO
logval2 = compute_smoothed_ll(
obs[var_id2], freqs[var_id2], vars[var_id2], alpha)
logvals = [logval1, logval2]
print('log vals')
print(logvals)
for logval, eval in zip(logvals, layer_evals):
if logval == LOG_ZERO:
# for zero log check this way for correctness
assert IS_LOG_ZERO(eval) is True
else:
assert_almost_equal(logval, eval, PRECISION)
def test_categorical_indicator_layer_eval():
# create a layer from vars
input_layer = CategoricalIndicatorLayer(vars=vars)
# evaluating for obs
input_layer.eval(obs)
# getting values
layer_evals = input_layer.node_values()
print('layer eval nodes')
print(layer_evals)
# bulding the log vals by hand
log_vals = []
for var, obs_val in zip(vars, obs):
var_log_vals = None
if obs_val == MARG_IND:
# all 1s
var_log_vals = [0. for i in range(var)]
else:
# just one is 1, the rest are 0
var_log_vals = [LOG_ZERO for i in range(var)]
var_log_vals[obs_val] = 0.
# concatenate vals
log_vals.extend(var_log_vals)
print('log vals')
print(log_vals)
assert log_vals == layer_evals
# a dictionary for vars
dicts = [{'var': 0, 'freqs': [6, 5]},
{'var': 0},
{'var': 1, 'freqs': [1, 1]},
{'var': 2, 'freqs': [0, 1, 1]},
{'var': 2, 'freqs': [10, 10, 1]},
{'var': 3},
{'var': 3, 'freqs': [6, 5, 1, 1]}]
def test_categorical_smoothed_layer_eval():
alpha = 0.1
# creating input layer
input_layer = CategoricalSmoothedLayer(vars=vars,
node_dicts=dicts,
alpha=alpha)
# evaluate it
input_layer.eval(obs)
# getting values
layer_evals = input_layer.node_values()
print('layer eval nodes')
print(layer_evals)
# crafting by hand
logvals = []
for node_dict in dicts:
var_id = node_dict['var']
freqs = node_dict['freqs'] if 'freqs' in node_dict else None
logvals.append(compute_smoothed_ll(obs[var_id],
freqs,
vars[var_id],
alpha))
print('log vals')
print(logvals)
assert logvals == layer_evals
# now changing alphas
print('\nCHANGING ALPHAS\n')
alphas = [0., 0.1, 1., 10.]
for alpha_new in alphas:
print('alpha', alpha_new)
input_layer.smooth_probs(alpha_new)
# evaluating again
input_layer.eval(obs)
# getting values
layer_evals = input_layer.node_values()
print('layer evals')
print(layer_evals)
logvals = []
for node_dict in dicts:
var_id = node_dict['var']
freqs = node_dict['freqs'] if 'freqs' in node_dict else None
logvals.append(compute_smoothed_ll(obs[var_id],
freqs,
vars[var_id],
alpha_new))
print('logvals')
print(logvals)
assert_array_almost_equal(logvals, layer_evals)
def test_categorical_indicator_layer_vars():
# create indicator nodes first
ind1 = CategoricalIndicatorNode(var=0, var_val=0)
ind2 = CategoricalIndicatorNode(var=3, var_val=0)
ind3 = CategoricalIndicatorNode(var=3, var_val=1)
ind4 = CategoricalIndicatorNode(var=2, var_val=0)
ind5 = CategoricalIndicatorNode(var=1, var_val=1)
ind6 = CategoricalIndicatorNode(var=2, var_val=1)
ind7 = CategoricalIndicatorNode(var=1, var_val=0)
ind8 = CategoricalIndicatorNode(var=0, var_val=1)
ind9 = CategoricalIndicatorNode(var=2, var_val=2)
ind10 = CategoricalIndicatorNode(var=3, var_val=2)
ind11 = CategoricalIndicatorNode(var=3, var_val=3)
# building the layer from nodes
layer = CategoricalIndicatorLayer(nodes=[ind1, ind2,
ind3, ind4,
ind5, ind6,
ind7, ind8,
ind9, ind10, ind11])
# checking for the construction of the vars property
layer_vars = layer.vars()
assert vars == layer_vars
def test_categorical_smoothed_layer_vars():
# creating single nodes in a list from dicts
nodes = [CategoricalSmoothedNode(dict_i['var'], vars[dict_i['var']])
for dict_i in dicts]
# creating the layer
layer = CategoricalSmoothedLayer(nodes)
# evaluating for the construction of vars
layer_vars = layer.vars()
assert vars == layer_vars
def test_prod_layer_backprop():
# input layer made of 5 generic nodes
node1 = Node()
node2 = Node()
node3 = Node()
node4 = Node()
node5 = Node()
input_layer = CategoricalInputLayer([node1, node2,
node3, node4,
node5])
# top layer made by 3 prod nodes
prod1 = ProductNode()
prod2 = ProductNode()
prod3 = ProductNode()
# linking to input nodes
prod1.add_child(node1)
prod1.add_child(node2)
prod1.add_child(node3)
prod2.add_child(node2)
prod2.add_child(node3)
prod2.add_child(node4)
prod3.add_child(node3)
prod3.add_child(node4)
prod3.add_child(node5)
prod_layer = ProductLayer([prod1, prod2, prod3])
# setting input values
val1 = 0.0
node1.set_val(val1)
val2 = 0.5
node2.set_val(val2)
val3 = 0.3
node3.set_val(val3)
val4 = 1.0
node4.set_val(val4)
val5 = 0.0
node5.set_val(val5)
print('input', [node.log_val for node in input_layer.nodes()])
# evaluating
prod_layer.eval()
print('eval\'d layer:', prod_layer.node_values())
# set the parent derivatives
prod_der1 = 1.0
prod1.log_der = log(prod_der1)
prod_der2 = 1.0
prod2.log_der = log(prod_der2)
prod_der3 = 0.0
prod3.log_der = LOG_ZERO
# back prop layer wise
prod_layer.backprop()
# check for correctness
try:
log_der1 = log(prod_der1 * val2 * val3)
except:
log_der1 = LOG_ZERO
try:
log_der2 = log(prod_der1 * val1 * val3 +
prod_der2 * val3 * val4)
except:
log_der2 = LOG_ZERO
try:
log_der3 = log(prod_der2 * val2 * val4 +
prod_der3 * val4 * val5 +
prod_der1 * val1 * val2)
except:
log_der3 = LOG_ZERO
try:
log_der4 = log(prod_der2 * val2 * val3 +
prod_der3 * val3 * val5)
except:
log_der4 = LOG_ZERO
try:
log_der5 = log(prod_der3 * val3 * val4)
except:
log_der5 = LOG_ZERO
# printing, just in case
print('child log der', node1.log_der, node2.log_der,
node3.log_der, node4.log_der, node5.log_der)
print('exact log der', log_der1, log_der2, log_der3,
log_der4, log_der5)
if IS_LOG_ZERO(log_der1):
assert IS_LOG_ZERO(node1.log_der)
else:
assert_almost_equal(log_der1, node1.log_der, 15)
if IS_LOG_ZERO(log_der2):
assert IS_LOG_ZERO(node2.log_der)
else:
assert_almost_equal(log_der2, node2.log_der, 15)
if IS_LOG_ZERO(log_der3):
assert IS_LOG_ZERO(node3.log_der)
else:
assert_almost_equal(log_der3, node3.log_der, 15)
if IS_LOG_ZERO(log_der4):
assert IS_LOG_ZERO(node4.log_der)
else:
assert_almost_equal(log_der4, node4.log_der, 15)
if IS_LOG_ZERO(log_der5):
assert IS_LOG_ZERO(node5.log_der)
else:
assert_almost_equal(log_der5, node5.log_der, 15)
# resetting derivatives
node1.log_der = LOG_ZERO
node2.log_der = LOG_ZERO
node3.log_der = LOG_ZERO
node4.log_der = LOG_ZERO
node5.log_der = LOG_ZERO
# setting new values as inputs
val1 = 0.0
node1.set_val(val1)
val2 = 0.0
node2.set_val(val2)
val3 = 0.3
node3.set_val(val3)
val4 = 1.0
node4.set_val(val4)
val5 = 1.0
node5.set_val(val5)
# evaluating again
prod_layer.eval()
print('eval\'d layer:', prod_layer.node_values())
# set the parent derivatives
prod_der1 = 1.0
prod1.log_der = log(prod_der1)
prod_der2 = 1.0
prod2.log_der = log(prod_der2)
prod_der3 = 0.0
prod3.log_der = LOG_ZERO
# back prop layer wise
prod_layer.backprop()
# check for correctness
try:
log_der1 = log(prod_der1 * val2 * val3)
except:
log_der1 = LOG_ZERO
try:
log_der2 = log(prod_der1 * val1 * val3 +
prod_der2 * val3 * val4)
except:
log_der2 = LOG_ZERO
try:
log_der3 = log(prod_der2 * val2 * val4 +
prod_der3 * val4 * val5 +
prod_der1 * val1 * val2)
except:
log_der3 = LOG_ZERO
try:
log_der4 = log(prod_der2 * val2 * val3 +
prod_der3 * val3 * val5)
except:
log_der4 = LOG_ZERO
try:
log_der5 = log(prod_der3 * val3 * val4)
except:
log_der5 = LOG_ZERO
# printing, just in case
print('child log der', node1.log_der, node2.log_der,
node3.log_der, node4.log_der, node5.log_der)
print('exact log der', log_der1, log_der2, log_der3,
log_der4, log_der5)
if IS_LOG_ZERO(log_der1):
assert IS_LOG_ZERO(node1.log_der)
else:
assert_almost_equal(log_der1, node1.log_der, 15)
if IS_LOG_ZERO(log_der2):
assert IS_LOG_ZERO(node2.log_der)
else:
assert_almost_equal(log_der2, node2.log_der, 15)
if IS_LOG_ZERO(log_der3):
assert IS_LOG_ZERO(node3.log_der)
else:
assert_almost_equal(log_der3, node3.log_der, 15)
if IS_LOG_ZERO(log_der4):
assert IS_LOG_ZERO(node4.log_der)
else:
assert_almost_equal(log_der4, node4.log_der, 15)
if IS_LOG_ZERO(log_der5):
assert IS_LOG_ZERO(node5.log_der)
else:
assert_almost_equal(log_der5, node5.log_der, 15)
def test_sum_layer_backprop():
# input layer made of 5 generic nodes
node1 = Node()
node2 = Node()
node3 = Node()
node4 = Node()
node5 = Node()
# top layer made by 3 sum nodes
sum1 = SumNode()
sum2 = SumNode()
sum3 = SumNode()
# linking to input nodes
weight11 = 0.3
sum1.add_child(node1, weight11)
weight12 = 0.3
sum1.add_child(node2, weight12)
weight13 = 0.4
sum1.add_child(node3, weight13)
weight22 = 0.15
sum2.add_child(node2, weight22)
weight23 = 0.15
sum2.add_child(node3, weight23)
weight24 = 0.7
sum2.add_child(node4, weight24)
weight33 = 0.4
sum3.add_child(node3, weight33)
weight34 = 0.25
sum3.add_child(node4, weight34)
weight35 = 0.35
sum3.add_child(node5, weight35)
sum_layer = SumLayer([sum1, sum2, sum3])
# setting input values
val1 = 0.0
node1.set_val(val1)
val2 = 0.5
node2.set_val(val2)
val3 = 0.3
node3.set_val(val3)
val4 = 1.0
node4.set_val(val4)
val5 = 0.0
node5.set_val(val5)
# evaluating
sum_layer.eval()
print('eval\'d layer:', sum_layer.node_values())
# set the parent derivatives
sum_der1 = 1.0
sum1.log_der = log(sum_der1)
sum_der2 = 1.0
sum2.log_der = log(sum_der2)
sum_der3 = 0.0
sum3.log_der = LOG_ZERO
# back prop layer wise
sum_layer.backprop()
# check for correctness
try:
log_der1 = log(sum_der1 * weight11)
except:
log_der1 = LOG_ZERO
try:
log_der2 = log(sum_der1 * weight12 +
sum_der2 * weight22)
except:
log_der2 = LOG_ZERO
try:
log_der3 = log(sum_der1 * weight13 +
sum_der2 * weight23 +
sum_der3 * weight33)
except:
log_der3 = LOG_ZERO
try:
log_der4 = log(sum_der2 * weight24 +
sum_der3 * weight34)
except:
log_der4 = LOG_ZERO
try:
log_der5 = log(sum_der3 * weight35)
except:
log_der5 = LOG_ZERO
# printing, just in case
print('child log der', node1.log_der, node2.log_der,
node3.log_der, node4.log_der, node5.log_der)
print('exact log der', log_der1, log_der2, log_der3,
log_der4, log_der5)
if IS_LOG_ZERO(log_der1):
assert IS_LOG_ZERO(node1.log_der)
else:
assert_almost_equal(log_der1, node1.log_der, 15)
if IS_LOG_ZERO(log_der2):
assert IS_LOG_ZERO(node2.log_der)
else:
assert_almost_equal(log_der2, node2.log_der, 15)
if IS_LOG_ZERO(log_der3):
assert IS_LOG_ZERO(node3.log_der)
else:
assert_almost_equal(log_der3, node3.log_der, 15)
if IS_LOG_ZERO(log_der4):
assert IS_LOG_ZERO(node4.log_der)
else:
assert_almost_equal(log_der4, node4.log_der, 15)
if IS_LOG_ZERO(log_der5):
assert IS_LOG_ZERO(node5.log_der)
else:
assert_almost_equal(log_der5, node5.log_der, 15)
# updating weights
eta = 0.1
sum_layer.update_weights(Spn.test_weight_update, 0)
# checking for correctness
weight_u11 = sum_der1 * val1 * eta + weight11
weight_u12 = sum_der1 * val2 * eta + weight12
weight_u13 = sum_der1 * val3 * eta + weight13
weight_u22 = sum_der2 * val2 * eta + weight22
weight_u23 = sum_der2 * val3 * eta + weight23
weight_u24 = sum_der2 * val4 * eta + weight24
weight_u33 = sum_der3 * val3 * eta + weight33
weight_u34 = sum_der3 * val4 * eta + weight34
weight_u35 = sum_der3 * val5 * eta + weight35
# normalizing
weight_sum1 = weight_u11 + weight_u12 + weight_u13
weight_sum2 = weight_u22 + weight_u23 + weight_u24
weight_sum3 = weight_u33 + weight_u34 + weight_u35
weight_u11 = weight_u11 / weight_sum1
weight_u12 = weight_u12 / weight_sum1
weight_u13 = weight_u13 / weight_sum1
weight_u22 = weight_u22 / weight_sum2
weight_u23 = weight_u23 / weight_sum2
weight_u24 = weight_u24 / weight_sum2
weight_u33 = weight_u33 / weight_sum3
weight_u34 = weight_u34 / weight_sum3
weight_u35 = weight_u35 / weight_sum3
print('expected weights', weight_u11, weight_u12, weight_u13,
weight_u22, weight_u23, weight_u24,
weight_u33, weight_u34, weight_u35)
print('found weights', sum1.weights[0], sum1.weights[1], sum1.weights[2],
sum2.weights[0], sum2.weights[1], sum2.weights[2],
sum3.weights[0], sum3.weights[1], sum3.weights[2])
assert_almost_equal(weight_u11, sum1.weights[0], 10)
assert_almost_equal(weight_u12, sum1.weights[1], 10)
assert_almost_equal(weight_u13, sum1.weights[2], 10)
assert_almost_equal(weight_u22, sum2.weights[0], 10)
assert_almost_equal(weight_u23, sum2.weights[1], 10)
assert_almost_equal(weight_u24, sum2.weights[2], 10)
assert_almost_equal(weight_u33, sum3.weights[0], 10)
assert_almost_equal(weight_u34, sum3.weights[1], 10)
assert_almost_equal(weight_u35, sum3.weights[2], 10)
#
# resetting derivatives
#
node1.log_der = LOG_ZERO
node2.log_der = LOG_ZERO
node3.log_der = LOG_ZERO
node4.log_der = LOG_ZERO
node5.log_der = LOG_ZERO
# setting new values as inputs
val1 = 0.0
node1.set_val(val1)
val2 = 0.0
node2.set_val(val2)
val3 = 0.3
node3.set_val(val3)
val4 = 1.0
node4.set_val(val4)
val5 = 1.0
node5.set_val(val5)
# evaluating again
sum_layer.eval()
print('eval\'d layer:', sum_layer.node_values())
# set the parent derivatives
sum_der1 = 1.0
sum1.log_der = log(sum_der1)
sum_der2 = 1.0
sum2.log_der = log(sum_der2)
sum_der3 = 0.0
sum3.log_der = LOG_ZERO
# back prop layer wise
sum_layer.backprop()
# check for correctness
try:
log_der1 = log(sum_der1 * weight_u11)
except:
log_der1 = LOG_ZERO
try:
log_der2 = log(sum_der1 * weight_u12 +
sum_der2 * weight_u22)
except:
log_der2 = LOG_ZERO
try:
log_der3 = log(sum_der1 * weight_u13 +
sum_der2 * weight_u23 +
sum_der3 * weight_u33)
except:
log_der3 = LOG_ZERO
try:
log_der4 = log(sum_der2 * weight_u24 +
sum_der3 * weight_u34)
except:
log_der4 = LOG_ZERO
try:
log_der5 = log(sum_der3 * weight_u35)
except:
log_der5 = LOG_ZERO
# printing, just in case
print('child log der', node1.log_der, node2.log_der,
node3.log_der, node4.log_der, node5.log_der)
print('exact log der', log_der1, log_der2, log_der3,
log_der4, log_der5)
if IS_LOG_ZERO(log_der1):
assert IS_LOG_ZERO(node1.log_der)
else:
assert_almost_equal(log_der1, node1.log_der, 15)
if IS_LOG_ZERO(log_der2):
assert IS_LOG_ZERO(node2.log_der)
else:
assert_almost_equal(log_der2, node2.log_der, 15)
if IS_LOG_ZERO(log_der3):
assert IS_LOG_ZERO(node3.log_der)
else:
assert_almost_equal(log_der3, node3.log_der, 15)
if IS_LOG_ZERO(log_der4):
assert IS_LOG_ZERO(node4.log_der)
else:
assert_almost_equal(log_der4, node4.log_der, 15)
if IS_LOG_ZERO(log_der5):
assert IS_LOG_ZERO(node5.log_der)
else:
assert_almost_equal(log_der5, node5.log_der, 15)
def test_sum_layer_is_complete():
# creating two scopes and two sum nodes
scope1 = frozenset({0, 2, 3})
scope2 = frozenset({10})
sum_node_1 = SumNode(var_scope=scope1)
sum_node_2 = SumNode(var_scope=scope2)
# adding product nodes as children to the first, indicator the second
for i in range(4):
sum_node_1.add_child(ProductNode(var_scope=scope1), 1.0)
sum_node_2.add_child(CategoricalIndicatorNode(var=10, var_val=i), 1.0)
# creating sum layer
sum_layer = SumLayer(nodes=[sum_node_1, sum_node_2])
assert sum_layer.is_complete()
# now with errors in scope
scope3 = frozenset({6})
sum_node_1 = SumNode(var_scope=scope1)
sum_node_2 = SumNode(var_scope=scope3)
# adding product nodes as children to the first, indicator the second
for i in range(4):
sum_node_1.add_child(ProductNode(var_scope=scope1), 1.0)
sum_node_2.add_child(CategoricalIndicatorNode(var=10, var_val=i), 1.0)
# creating sum layer
sum_layer = SumLayer(nodes=[sum_node_1, sum_node_2])
assert not sum_layer.is_complete()
sum_node_2.var_scope = scope2
assert sum_layer.is_complete()
sum_node_2.children[3].var_scope = scope3
assert not sum_layer.is_complete()
def test_product_layer_is_decomposable():
# creating scopes and nodes
scope1 = frozenset({0, 2, 3})
scope2 = frozenset({10, 9})
prod_node_1 = ProductNode(var_scope=scope1)
prod_node_2 = ProductNode(var_scope=scope2)
# creating children manually (argh=)
for var in scope1:
prod_node_1.add_child(SumNode(var_scope=frozenset({var})))
for var in scope2:
prod_node_2.add_child(CategoricalSmoothedNode(var=var,
var_values=2))
# creating layer
prod_layer = ProductLayer(nodes=[prod_node_1, prod_node_2])
assert prod_layer.is_decomposable()
# making it not decomposable anymore
scope3 = frozenset({2})
prod_node_1.add_child(SumNode(var_scope=scope3))
assert not prod_layer.is_decomposable()
def test_categorical_clt_input_layer_eval():
#
# just a little test to see how mixed nodes layers
# dispatch eval
s_data = numpy.array([[1, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0],
[1, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0]])
features = [0, 2, 3]
feature_vals = [2, 2, 2]
clt_node = CLTreeNode(data=s_data[:, features],
vars=features,
var_values=feature_vals,
alpha=0.0)
#
# creating a categorical smoothed node
cs_node = CategoricalSmoothedNode(var=1,
var_values=2,
alpha=0.,
data=s_data[:, [1]])
clti_layer = CategoricalCLInputLayer(nodes=[cs_node, clt_node])
nico_cltree_subtree = numpy.array([-1, 0, 1])
nico_cltree_sublls = numpy.array([-1.09861228867,
-0.69314718056,
-0.69314718056,
-1.79175946923,
-1.09861228867,
-0.69314718056])
assert_array_equal(numpy.array(features), clt_node.vars)
assert_array_equal(nico_cltree_subtree,
clt_node._cltree._tree)
s_log_prob = numpy.log(0.5)
#
# evaluating the layer
for i, instance in enumerate(s_data):
clti_layer.eval(instance)
for node in clti_layer.nodes():
if isinstance(node, CLTreeNode):
assert_almost_equal(nico_cltree_sublls[i], node.log_val)
else:
# in the other case is 0.5
assert_almost_equal(s_log_prob, node.log_val)
| 27,427
| 27.422798
| 78
|
py
|
spyn-repr
|
spyn-repr-master/spn/linked/tests/test_weight_learning.py
|
import numpy
from numpy.testing import assert_array_almost_equal
from ..weight_learning import evaluate_indicator_node
from ..weight_learning import evaluate_categorical_node
from ..weight_learning import evaluate_sum_node
from ..weight_learning import evaluate_product_node
from ..weight_learning import ml_evaluation
from ..nodes import SumNode
from ..nodes import ProductNode
from ..nodes import CategoricalIndicatorNode
from ..nodes import CategoricalSmoothedNode
from ..layers import SumLayer
from ..layers import ProductLayer
from ..layers import CategoricalIndicatorLayer
from ..layers import CategoricalInputLayer
from ..spn import Spn
from spn import LOG_ZERO
from spn import MARG_IND
data = numpy.array([[1, 0, 1, 1, 1, 0],
[1, 0, 2, 1, 0, 1],
[0, 0, 2, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[1, 0, 1, 0, 1, 0],
[1, 0, 0, 1, 1, 1],
[0, 0, 2, 0, 0, 0],
[1, 0, 1, 1, 1, 1]])
n_instances = data.shape[0]
n_features = data.shape[1]
feature_vals = [2, 2, 3, 2, 2, 2]
def test_evaluate_indicator_node():
ind_node = CategoricalIndicatorNode(var=0, var_val=1)
node_eval = evaluate_indicator_node(ind_node, data)
print(node_eval)
assert len(node_eval) == n_instances
node_true_eval_1 = numpy.array([0., 0., LOG_ZERO, LOG_ZERO, 0., 0., LOG_ZERO, 0.])
assert_array_almost_equal(node_eval, node_true_eval_1)
ind_node = CategoricalIndicatorNode(var=2, var_val=2)
node_eval = evaluate_indicator_node(ind_node, data)
print(node_eval)
assert len(node_eval) == n_instances
node_true_eval_2 = numpy.array([LOG_ZERO, 0., 0., LOG_ZERO, LOG_ZERO, LOG_ZERO, 0., LOG_ZERO])
assert_array_almost_equal(node_eval, node_true_eval_2)
def test_evaluate_categorical_node():
var = 1
cat_node = CategoricalSmoothedNode(var=var,
var_values=2,
alpha=0.0,
data=data[:, var][:, numpy.newaxis])
node_eval = evaluate_categorical_node(cat_node, data)
print(node_eval)
assert len(node_eval) == n_instances
assert_array_almost_equal(node_eval, numpy.zeros(n_instances))
var = 0
cat_node = CategoricalSmoothedNode(var=var,
var_values=2,
alpha=0.0,
data=data[:, var][:, numpy.newaxis])
node_eval = evaluate_categorical_node(cat_node, data)
print(node_eval)
assert len(node_eval) == n_instances
one_prob_val = numpy.sum(data[:, var]) / n_instances
zero_prob_val = 1 - one_prob_val
prob_vals = [one_prob_val,
one_prob_val,
zero_prob_val,
zero_prob_val,
one_prob_val,
one_prob_val,
zero_prob_val,
one_prob_val]
assert_array_almost_equal(node_eval, numpy.log(prob_vals))
def test_evaluate_product_node():
child_1 = SumNode()
child_2 = SumNode()
child_3 = SumNode()
child_1_eval = numpy.random.rand(n_instances)
child_2_eval = numpy.random.rand(n_instances)
child_3_eval = numpy.random.rand(n_instances)
assoc = {child_1: child_1_eval,
child_2: child_2_eval,
child_3: child_3_eval}
prod_node = ProductNode()
prod_node.add_child(child_1)
prod_node.add_child(child_2)
prod_node.add_child(child_3)
node_eval = evaluate_product_node(prod_node, assoc, n_instances)
print(node_eval)
sum_child_array = child_1_eval + child_2_eval + child_3_eval
assert_array_almost_equal(node_eval, sum_child_array)
def test_evaluate_sum_node():
child_1 = ProductNode()
child_2 = ProductNode()
child_3 = ProductNode()
child_1_eval = numpy.random.rand(n_instances)
child_2_eval = numpy.random.rand(n_instances)
child_3_eval = numpy.random.rand(n_instances)
# child_1_eval = numpy.array([1, 1, 1, 1, 1, 1, 1, 1])
# child_2_eval = numpy.array([1, 1, 1, 1, 1, 1, 1, 1])
# child_3_eval = numpy.array([1, 1, 1, 1, 1, 1, 1, 1])
assoc = {child_1: child_1_eval,
child_2: child_2_eval,
child_3: child_3_eval}
weights = numpy.random.rand(len(assoc))
sum_node = SumNode()
for i, child in enumerate(assoc):
sum_node.add_child(child, weights[i])
assert_array_almost_equal(sum_node.log_weights, numpy.log(weights))
node_eval = evaluate_sum_node(sum_node, assoc, n_instances)
print(node_eval)
weighted_sum_child_array = numpy.zeros(n_instances)
for i, child in enumerate(assoc):
weighted_sum_child_array += weights[i] * numpy.exp(assoc[child])
assert_array_almost_equal(node_eval, numpy.log(weighted_sum_child_array))
def test_ml_evaluation():
input_vec = numpy.array([[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.],
[1., 1., 1.]]).T
ind_node_1 = CategoricalIndicatorNode(var=0, var_val=0)
ind_node_2 = CategoricalIndicatorNode(var=0, var_val=1)
ind_node_3 = CategoricalIndicatorNode(var=1, var_val=0)
ind_node_4 = CategoricalIndicatorNode(var=1, var_val=1)
ind_node_5 = CategoricalIndicatorNode(var=2, var_val=0)
ind_node_6 = CategoricalIndicatorNode(var=2, var_val=1)
input_layer = CategoricalInputLayer(nodes=[ind_node_1,
ind_node_2,
ind_node_3,
ind_node_4,
ind_node_5,
ind_node_6])
n_nodes_layer_1 = 6
layer_1_sum_nodes = [SumNode() for i in range(n_nodes_layer_1)]
layer_1_sum_nodes[0].add_child(ind_node_1, 0.6)
layer_1_sum_nodes[0].add_child(ind_node_2, 0.4)
layer_1_sum_nodes[1].add_child(ind_node_1, 0.3)
layer_1_sum_nodes[1].add_child(ind_node_2, 0.7)
layer_1_sum_nodes[2].add_child(ind_node_3, 0.1)
layer_1_sum_nodes[2].add_child(ind_node_4, 0.9)
layer_1_sum_nodes[3].add_child(ind_node_3, 0.7)
layer_1_sum_nodes[3].add_child(ind_node_4, 0.3)
layer_1_sum_nodes[4].add_child(ind_node_5, 0.5)
layer_1_sum_nodes[4].add_child(ind_node_6, 0.5)
layer_1_sum_nodes[5].add_child(ind_node_5, 0.2)
layer_1_sum_nodes[5].add_child(ind_node_6, 0.8)
layer_1 = SumLayer(layer_1_sum_nodes)
n_nodes_layer_2 = 4
layer_2_prod_nodes = [ProductNode() for i in range(n_nodes_layer_2)]
layer_2_prod_nodes[0].add_child(layer_1_sum_nodes[0])
layer_2_prod_nodes[0].add_child(layer_1_sum_nodes[2])
layer_2_prod_nodes[0].add_child(layer_1_sum_nodes[4])
layer_2_prod_nodes[1].add_child(layer_1_sum_nodes[1])
layer_2_prod_nodes[1].add_child(layer_1_sum_nodes[3])
layer_2_prod_nodes[1].add_child(layer_1_sum_nodes[5])
layer_2_prod_nodes[2].add_child(layer_1_sum_nodes[0])
layer_2_prod_nodes[2].add_child(layer_1_sum_nodes[2])
layer_2_prod_nodes[2].add_child(layer_1_sum_nodes[5])
layer_2_prod_nodes[3].add_child(layer_1_sum_nodes[1])
layer_2_prod_nodes[3].add_child(layer_1_sum_nodes[3])
layer_2_prod_nodes[3].add_child(layer_1_sum_nodes[4])
layer_2 = ProductLayer(layer_2_prod_nodes)
root = SumNode()
root.add_child(layer_2_prod_nodes[0], 0.2)
root.add_child(layer_2_prod_nodes[1], 0.4)
root.add_child(layer_2_prod_nodes[2], 0.15)
root.add_child(layer_2_prod_nodes[3], 0.25)
layer_3 = SumLayer([root])
spn = Spn(input_layer=input_layer,
layers=[layer_1, layer_2, layer_3])
print(spn)
res = spn.eval(input_vec)
print('First evaluation')
print(res)
root = spn.root()
node_evals = ml_evaluation(spn, input_vec.T, nodes_to_eval={root})
print(node_evals)
res_vec = numpy.array(res).flatten()
print(res_vec)
assert_array_almost_equal(node_evals[root], res_vec)
#
# store previous sum nodes weights
old_weights = [node.weights for node in layer_1_sum_nodes]
nodes_to_evaluate = set(layer_1_sum_nodes)
nodes_to_evaluate.add(root)
node_evals = ml_evaluation(spn, input_vec.T,
nodes_to_eval=None,
# nodes_to_skip_updating=nodes_to_skip,
update_weights=True)
# assert_array_almost_equal(node_evals[root], res_vec)
print(node_evals)
assert node_evals == {}
print(node_evals)
print(spn)
#
# evaluating and changing only the root
node_evals = ml_evaluation(spn, input_vec.T,
nodes_to_eval={root},
update_weights=True)
print(node_evals)
print(spn)
new_weights = [node.weights for node in layer_1_sum_nodes]
for old_w, new_w in zip(old_weights, new_weights):
assert_array_almost_equal(old_w, new_w)
node_evals = ml_evaluation(spn, input_vec.T,
nodes_to_eval=nodes_to_evaluate,
update_weights=True)
print(node_evals)
print(spn)
assert_array_almost_equal(root.weights,
numpy.array([0.25 for i in range(len(layer_2_prod_nodes))]))
| 9,356
| 34.988462
| 98
|
py
|
spyn-repr
|
spyn-repr-master/spn/linked/tests/test_representation.py
|
import dataset
from spn import MARG_IND
from ..spn import Spn
from ..spn import evaluate_on_dataset
from ..layers import SumLayer
from ..layers import ProductLayer
from ..layers import CategoricalIndicatorLayer
from ..nodes import SumNode
from ..nodes import ProductNode
from ..nodes import CategoricalIndicatorNode
from ..representation import extract_features_nodes_mpe
from ..representation import node_in_path_feature
from ..representation import acc_node_in_path_feature
from ..representation import filter_non_sum_nodes
from ..representation import max_child_id_feature
from ..representation import max_hidden_var_feature, filter_hidden_var_nodes
from ..representation import extract_features_nodes
from ..representation import max_hidden_var_val, max_hidden_var_log_val
from ..representation import hidden_var_val, hidden_var_log_val
from ..representation import filter_all_nodes
from ..representation import var_log_val
from ..representation import node_mpe_instantiation
from ..representation import random_feature_mask
from ..representation import random_rectangular_feature_mask
from ..representation import extract_feature_marginalization_from_masks
from ..representation import extract_features_marginalization_rand
from ..representation import extract_features_marginalization_rectangles
from ..representation import extract_feature_marginalization_from_masks_opt_unique
from ..representation import extract_instances_groups
from ..representation import load_feature_info
from ..representation import store_feature_info
from ..representation import filter_features_by_layer
from ..representation import filter_features_by_scope_length
from ..representation import feature_mask_from_info
from ..representation import filter_features_by_node_type
from ..representation import feature_mask_to_marg
from ..representation import extract_feature_marginalization_from_masks_theanok
from ..representation import extract_feature_marginalization_from_masks_theanok_opt_unique
from ..representation import save_features_to_file
from ..representation import load_features_from_file
from ..representation import feature_mask_scope
from ..representation import all_single_marginals
from ..representation import extract_features_all_marginals
import numpy
from numpy.testing import assert_array_almost_equal
def test_extract_features_sum_nodes():
#
# creating an SPN
ind_x_00 = CategoricalIndicatorNode(0, 0)
ind_x_01 = CategoricalIndicatorNode(0, 1)
ind_x_10 = CategoricalIndicatorNode(1, 0)
ind_x_11 = CategoricalIndicatorNode(1, 1)
ind_x_20 = CategoricalIndicatorNode(2, 0)
ind_x_21 = CategoricalIndicatorNode(2, 1)
input_layer = CategoricalIndicatorLayer([ind_x_00, ind_x_01,
ind_x_10, ind_x_11,
ind_x_20, ind_x_21])
#
# sum layer
#
sum_node_1 = SumNode()
sum_node_1.add_child(ind_x_00, 0.1)
sum_node_1.add_child(ind_x_01, 0.9)
sum_node_2 = SumNode()
sum_node_2.add_child(ind_x_00, 0.4)
sum_node_2.add_child(ind_x_01, 0.6)
sum_node_3 = SumNode()
sum_node_3.add_child(ind_x_10, 0.3)
sum_node_3.add_child(ind_x_11, 0.7)
sum_node_4 = SumNode()
sum_node_4.add_child(ind_x_10, 0.6)
sum_node_4.add_child(ind_x_11, 0.4)
sum_node_5 = SumNode()
sum_node_5.add_child(ind_x_20, 0.5)
sum_node_5.add_child(ind_x_21, 0.5)
sum_node_6 = SumNode()
sum_node_6.add_child(ind_x_20, 0.2)
sum_node_6.add_child(ind_x_21, 0.8)
sum_layer = SumLayer([sum_node_1, sum_node_2,
sum_node_3, sum_node_4,
sum_node_5, sum_node_6])
#
# prod layer
#
prod_node_1 = ProductNode()
prod_node_1.add_child(sum_node_1)
prod_node_1.add_child(sum_node_3)
prod_node_1.add_child(sum_node_5)
prod_node_2 = ProductNode()
prod_node_2.add_child(sum_node_2)
prod_node_2.add_child(sum_node_4)
prod_node_2.add_child(sum_node_6)
prod_node_3 = ProductNode()
prod_node_3.add_child(sum_node_1)
prod_node_3.add_child(sum_node_4)
prod_node_3.add_child(sum_node_6)
prod_node_4 = ProductNode()
prod_node_4.add_child(sum_node_2)
prod_node_4.add_child(sum_node_3)
prod_node_4.add_child(sum_node_5)
prod_layer = ProductLayer([prod_node_1, prod_node_2,
prod_node_3, prod_node_4])
root = SumNode()
root.add_child(prod_node_1, 0.1)
root.add_child(prod_node_2, 0.2)
root.add_child(prod_node_3, 0.25)
root.add_child(prod_node_4, 0.45)
root_layer = SumLayer([root])
spn = Spn(input_layer, [sum_layer,
prod_layer,
root_layer])
print(spn)
ind_data = numpy.array([[0, 1, 0, 1, 1, 0],
[1, 0, 0, 1, 0, 1],
[0, 1, 1, 0, 1, 0],
[0, 1, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0]])
data = numpy.array([[1, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[0, 0, 0]])
for instance in data:
res = spn.single_eval(instance)
print([node.log_val for node in input_layer.nodes()])
print(numpy.exp([node.log_val for node in input_layer.nodes()]))
print([node.log_val for node in sum_layer.nodes()])
print(numpy.exp([node.log_val for node in sum_layer.nodes()]))
print([node.log_val for node in prod_layer.nodes()])
print(numpy.exp([node.log_val for node in prod_layer.nodes()]))
print(res)
print(numpy.exp(res))
print('\n')
# ret_func = node_in_path_feature
print('MPE, max Hidden var only, id')
ret_func = max_hidden_var_feature
filter_func = filter_hidden_var_nodes
new_data = extract_features_nodes_mpe(spn,
data,
filter_node_func=filter_func,
retrieve_func=ret_func,
remove_zero_features=False,
verbose=True)
print(new_data)
print('Without empty features')
new_data = extract_features_nodes_mpe(spn,
data,
filter_node_func=filter_func,
retrieve_func=ret_func,
remove_zero_features=True,
verbose=True)
print(new_data)
print('MPE, max Hidden var only, val')
ret_func = max_hidden_var_val
filter_func = filter_hidden_var_nodes
new_data = extract_features_nodes_mpe(spn,
data,
filter_node_func=filter_func,
retrieve_func=ret_func,
remove_zero_features=False,
dtype=numpy.float,
verbose=True)
print(new_data)
print('Hidden max var only, val')
ret_func = max_hidden_var_val
filter_func = filter_hidden_var_nodes
new_data = extract_features_nodes(spn,
data,
filter_node_func=filter_func,
retrieve_func=ret_func,
remove_zero_features=False,
dtype=numpy.float,
verbose=True)
print(new_data)
print('Hidden var only, val')
ret_func = hidden_var_val
filter_func = filter_hidden_var_nodes
new_data = extract_features_nodes(spn,
data,
filter_node_func=filter_func,
retrieve_func=ret_func,
remove_zero_features=False,
dtype=numpy.float,
verbose=True)
print(new_data)
def build_test_mini_spn():
#
# creating an SPN
ind_x_00 = CategoricalIndicatorNode(0, 0)
ind_x_01 = CategoricalIndicatorNode(0, 1)
ind_x_10 = CategoricalIndicatorNode(1, 0)
ind_x_11 = CategoricalIndicatorNode(1, 1)
ind_x_20 = CategoricalIndicatorNode(2, 0)
ind_x_21 = CategoricalIndicatorNode(2, 1)
input_layer = CategoricalIndicatorLayer([ind_x_00, ind_x_01,
ind_x_10, ind_x_11,
ind_x_20, ind_x_21])
#
# sum layer
#
sum_node_1 = SumNode(var_scope=frozenset([0]))
sum_node_1.add_child(ind_x_00, 0.1)
sum_node_1.add_child(ind_x_01, 0.9)
sum_node_2 = SumNode(var_scope=frozenset([0]))
sum_node_2.add_child(ind_x_00, 0.4)
sum_node_2.add_child(ind_x_01, 0.6)
sum_node_3 = SumNode(var_scope=frozenset([1]))
sum_node_3.add_child(ind_x_10, 0.3)
sum_node_3.add_child(ind_x_11, 0.7)
sum_node_4 = SumNode(var_scope=frozenset([1]))
sum_node_4.add_child(ind_x_10, 0.6)
sum_node_4.add_child(ind_x_11, 0.4)
sum_node_5 = SumNode(var_scope=frozenset([2]))
sum_node_5.add_child(ind_x_20, 0.5)
sum_node_5.add_child(ind_x_21, 0.5)
sum_node_6 = SumNode(var_scope=frozenset([2]))
sum_node_6.add_child(ind_x_20, 0.2)
sum_node_6.add_child(ind_x_21, 0.8)
sum_layer = SumLayer([sum_node_1, sum_node_2,
sum_node_3, sum_node_4,
sum_node_5, sum_node_6])
#
# prod layer
#
prod_node_1 = ProductNode(var_scope=frozenset([0, 1, 2]))
prod_node_1.add_child(sum_node_1)
prod_node_1.add_child(sum_node_3)
prod_node_1.add_child(sum_node_5)
prod_node_2 = ProductNode(var_scope=frozenset([0, 1, 2]))
prod_node_2.add_child(sum_node_2)
prod_node_2.add_child(sum_node_4)
prod_node_2.add_child(sum_node_6)
prod_node_3 = ProductNode(var_scope=frozenset([0, 1, 2]))
prod_node_3.add_child(sum_node_1)
prod_node_3.add_child(sum_node_4)
prod_node_3.add_child(sum_node_6)
prod_node_4 = ProductNode(var_scope=frozenset([0, 1, 2]))
prod_node_4.add_child(sum_node_2)
prod_node_4.add_child(sum_node_3)
prod_node_4.add_child(sum_node_5)
prod_layer = ProductLayer([prod_node_1, prod_node_2,
prod_node_3, prod_node_4])
root = SumNode(var_scope=frozenset([0, 1, 2]))
root.add_child(prod_node_1, 0.1)
root.add_child(prod_node_2, 0.2)
root.add_child(prod_node_3, 0.25)
root.add_child(prod_node_4, 0.45)
root_layer = SumLayer([root])
spn = Spn(input_layer, [sum_layer,
prod_layer,
root_layer])
print(spn)
layers = [input_layer, sum_layer, prod_layer, root_layer]
nodes = [node for layer in layers for node in layer.nodes()]
return spn, layers, nodes
def test_extract_features_all():
data = numpy.array([[1, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[0, 0, 0]])
spn, layers, nodes = build_test_mini_spn()
feature_info_file = 'test_extract_features_all.feature.info'
ret_func = var_log_val
filter_func = filter_all_nodes
new_data = extract_features_nodes(spn,
data,
filter_node_func=filter_func,
retrieve_func=ret_func,
remove_zero_features=False,
dtype=numpy.float,
output_feature_info=feature_info_file,
verbose=True)
print('Repre shape {}'.format(new_data.shape))
print(new_data)
def test_extract_features_all_extract_info():
data = numpy.array([[1, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[0, 0, 0]])
spn, layers, nodes = build_test_mini_spn()
feature_info_file = 'test_extract_features_all.feature.info'
ret_func = var_log_val
filter_func = filter_all_nodes
new_data = extract_features_nodes(spn,
data,
filter_node_func=filter_func,
retrieve_func=ret_func,
remove_zero_features=False,
dtype=numpy.float,
output_feature_info=feature_info_file,
verbose=True)
print('Repre shape {}'.format(new_data.shape))
print(new_data)
print('Loading feature info back')
feature_info = load_feature_info(feature_info_file)
print(feature_info)
n_features = len(feature_info)
all_feature_mask = feature_mask_from_info(feature_info, n_features)
assert_array_almost_equal(all_feature_mask, numpy.ones(n_features, dtype=bool))
#
# saving again
feature_info_file_2 = 'test_extract_features_all_2.feature.info'
store_feature_info(feature_info, feature_info_file_2)
feature_info_2 = load_feature_info(feature_info_file_2)
print(feature_info_2)
for i_1, i_2 in zip(feature_info, feature_info_2):
assert i_1 == i_2
print('Extracting all features from different levels')
n_layers = spn.n_layers()
for i in range(n_layers):
print('\tlayer: {}'.format(i))
filtered_feature_info = filter_features_by_layer(feature_info, i)
feature_mask = feature_mask_from_info(filtered_feature_info, n_features)
print(feature_mask)
print('Extracting all features from different scopes')
scope_lengths = set()
for node in nodes:
scope = None
if hasattr(node, 'var_scope'):
scope = node.var_scope
elif hasattr(node, 'var'):
scope = node.var
scope_lengths.add(scope)
n_scope_lengths = len(scope_lengths)
for i in range(1, n_scope_lengths):
print('\tscope length {}'.format(i))
filtered_feature_info = filter_features_by_scope_length(feature_info, i)
feature_mask = feature_mask_from_info(filtered_feature_info, n_features)
if i == 2:
assert not any(feature_mask)
print(feature_mask)
print('Extracting all features from different node types')
node_types = ('SumNode', 'ProductNode')
for type in node_types:
print('\tnode type {}'.format(type))
filtered_feature_info = filter_features_by_node_type(feature_info, type)
feature_mask = feature_mask_from_info(filtered_feature_info, n_features)
print(feature_mask)
def test_node_mpe_instantiation():
spn, layers, nodes = build_test_mini_spn()
input_layer, sum_layer, prod_layer, root_layer = layers
ind_data = numpy.array([[0, 1, 0, 1, 1, 0],
[1, 0, 0, 1, 0, 1],
[0, 1, 1, 0, 1, 0],
[0, 1, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0]])
data = numpy.array([[1, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[0, 0, 0]])
for instance in data:
res = spn.single_eval(instance)
print([node.log_val for node in input_layer.nodes()])
print(numpy.exp([node.log_val for node in input_layer.nodes()]))
print([node.log_val for node in sum_layer.nodes()])
print(numpy.exp([node.log_val for node in sum_layer.nodes()]))
print([node.log_val for node in prod_layer.nodes()])
print(numpy.exp([node.log_val for node in prod_layer.nodes()]))
print(res)
print(numpy.exp(res))
print('\n')
#
# mpe bottom up pass
for instance in data:
res = spn.single_mpe_eval(instance)
instances = node_mpe_instantiation(spn.root(), 3)
print('MPE instances:\n {}'.format(instances))
def test_random_feature_mask():
n_features = 20
feature_mask = numpy.zeros(n_features, dtype=bool)
assert sum(feature_mask) == 0
n_rand_features = 5
feature_mask = random_feature_mask(feature_mask, n_rand_features)
assert sum(feature_mask) == n_rand_features
def test_random_rectangular_feature_mask():
n_trials = 20
rand_gen = numpy.random.RandomState(1337)
for i in range(n_trials):
n_features = 49
feature_mask = numpy.zeros(n_features, dtype=bool)
assert sum(feature_mask) == 0
n_rows = int(numpy.sqrt(n_features))
n_cols = int(numpy.sqrt(n_features))
n_min_rows = 3
n_min_cols = 3
n_max_rows = 4
n_max_cols = 4
feature_mask = random_rectangular_feature_mask(feature_mask,
n_rows, n_cols,
n_min_rows, n_min_cols,
n_max_rows, n_max_cols,
rand_gen=rand_gen)
#
# reshaping
feature_mask_rect = feature_mask.reshape(n_rows, n_cols)
print(feature_mask_rect)
assert sum(feature_mask) >= n_min_rows * n_min_cols
assert sum(feature_mask) <= n_max_rows * n_max_cols
def test_extract_feature_marginalization():
n_instances = 10
n_features = 3
#
# creating some data
rand_gen = numpy.random.RandomState(1337)
data = rand_gen.binomial(1, 0.5, size=(n_instances, n_features))
rand_gen = numpy.random.RandomState(1337)
print(data)
#
# generating a set of feature masks randomly
n_masks = 10
masks = []
for i in range(n_masks):
feature_mask = numpy.zeros(n_features, dtype=bool)
assert sum(feature_mask) == 0
n_rand_features = 2
feature_mask = random_feature_mask(feature_mask, n_rand_features, rand_gen=rand_gen)
assert sum(feature_mask) == n_rand_features
print(feature_mask)
masks.append(feature_mask)
spn, _layers, _nodes = build_test_mini_spn()
repr_data = extract_feature_marginalization_from_masks(spn, data, masks, rand_gen=rand_gen)
assert repr_data.shape[0] == n_instances
assert repr_data.shape[1] == len(masks)
print(repr_data)
#
# evaluating the spn by hand
for i, mask in enumerate(masks):
masked_data = numpy.array(data, copy=True)
inv_mask = numpy.logical_not(mask)
print('inv mask', inv_mask)
masked_data[:, inv_mask] = MARG_IND
# print('{}\n{}'.format(i, masked_data))
preds = evaluate_on_dataset(spn, masked_data)
assert_array_almost_equal(preds, repr_data[:, i])
#
# with the complete method
print('Calling extract_features_marginalization_rand')
rand_gen = numpy.random.RandomState(1337)
feature_sizes = [10]
n_rand_sizes = [2]
feature_masks = extract_features_marginalization_rand(n_features,
feature_sizes,
n_rand_sizes,
rand_gen=rand_gen)
repr_data_2 = extract_feature_marginalization_from_masks(spn,
data,
feature_masks,
marg_value=MARG_IND,
rand_gen=rand_gen,
dtype=float)
assert_array_almost_equal(repr_data, repr_data_2)
print('Results are reproducible by setting the seeds')
def test_extract_feature_marginalization_opt():
n_instances = 100
n_features = 3
#
# creating some data
rand_gen = numpy.random.RandomState(1337)
data = rand_gen.binomial(1, 0.5, size=(n_instances, n_features))
rand_gen = numpy.random.RandomState(1337)
print(data)
#
# generating a set of feature masks randomly
n_masks = 10
masks = []
for i in range(n_masks):
feature_mask = numpy.zeros(n_features, dtype=bool)
assert sum(feature_mask) == 0
n_rand_features = 2
feature_mask = random_feature_mask(feature_mask, n_rand_features, rand_gen=rand_gen)
assert sum(feature_mask) == n_rand_features
print(feature_mask)
masks.append(feature_mask)
spn, _layers, _nodes = build_test_mini_spn()
repr_data = extract_feature_marginalization_from_masks(spn, data, masks)
assert repr_data.shape[0] == n_instances
assert repr_data.shape[1] == len(masks)
print(repr_data)
#
# evaluating the spn by hand
for i, mask in enumerate(masks):
masked_data = numpy.array(data, copy=True)
inv_mask = numpy.logical_not(mask)
print('inv mask', inv_mask)
masked_data[:, inv_mask] = MARG_IND
# print('{}\n{}'.format(i, masked_data))
preds = evaluate_on_dataset(spn, masked_data)
assert_array_almost_equal(preds, repr_data[:, i])
#
# now with the optimization on unique values
repr_data_2 = extract_feature_marginalization_from_masks_opt_unique(spn,
data,
masks)
assert repr_data_2.shape[0] == n_instances
assert repr_data_2.shape[1] == len(masks)
print(repr_data_2)
assert_array_almost_equal(repr_data, repr_data_2)
def test_extract_instances_groups():
data = numpy.array([[1, 0, 0, 1],
[1, 0, 0, 1],
[1, 1, 0, 1],
[0, 0, 0, 1],
[1, 0, 1, 0],
[0, 0, 0, 0],
[1, 0, 1, 0],
[0, 0, 0, 0]])
n_instances = data.shape[0]
repr_data = extract_instances_groups(data)
print(repr_data)
assert numpy.sum(repr_data) == n_instances
assert numpy.allclose(repr_data[0], repr_data[1])
assert numpy.allclose(repr_data[5], repr_data[7])
assert numpy.allclose(repr_data[4], repr_data[6])
def test_feature_mask_to_marg():
feature_vals = [2, 2, 2, 2]
feature_mask = numpy.array([False, True, True, False], dtype=bool)
n_ohe_features = numpy.sum(feature_vals)
ohe_feature_mask = feature_mask_to_marg(feature_mask, n_ohe_features, feature_vals)
print(ohe_feature_mask)
true_mask = numpy.array([False, False, True, True, True, True, False, False], dtype=bool)
assert_array_almost_equal(ohe_feature_mask, true_mask)
feature_vals = [3, 2, 2, 2, 4]
feature_mask = numpy.array([True, False, True, True, False, False], dtype=bool)
n_ohe_features = numpy.sum(feature_vals)
ohe_feature_mask = feature_mask_to_marg(feature_mask, n_ohe_features, feature_vals)
print(ohe_feature_mask)
true_mask = numpy.array([True, True, True,
False, False,
True, True,
True, True,
False, False, False, False], dtype=bool)
assert_array_almost_equal(ohe_feature_mask, true_mask)
from spn.factory import build_theanok_spn_from_block_linked
def test_extract_feature_marginalization_from_masks_theanok():
n_features = 3
n_instances = 20
feature_vals = [2, 2, 2]
#
# generate some masks
print('Calling extract_features_marginalization_rand')
rand_gen = numpy.random.RandomState(1337)
feature_sizes = [10]
n_rand_sizes = [2]
feature_masks = extract_features_marginalization_rand(n_features,
feature_sizes,
n_rand_sizes,
rand_gen=rand_gen)
#
# creating some data
rand_gen = numpy.random.RandomState(1337)
data = rand_gen.binomial(1, 0.5, size=(n_instances, n_features))
ind_data = dataset.one_hot_encoding(data, feature_vals)
rand_gen = numpy.random.RandomState(1337)
print(data)
spn, _layers, _nodes = build_test_mini_spn()
repr_data = extract_feature_marginalization_from_masks(spn,
data,
feature_masks,
rand_gen=rand_gen)
#
# now doing the same for theano
theano_spn = build_theanok_spn_from_block_linked(spn, ind_data.shape[1], feature_vals)
print(theano_spn)
theano_repr_data = extract_feature_marginalization_from_masks_theanok(theano_spn,
ind_data,
feature_masks,
feature_vals,
rand_gen=rand_gen)
print(theano_repr_data)
assert_array_almost_equal(repr_data, theano_repr_data)
#
# now doing that by "hand"
for i, mask in enumerate(feature_masks):
marg_data = numpy.zeros((n_instances, n_features), dtype=data.dtype)
marg_data.fill(MARG_IND)
marg_data[:, mask] = data[:, mask]
ind_marg_data = dataset.one_hot_encoding(marg_data, feature_vals)
preds = theano_spn.evaluate(ind_marg_data.astype(numpy.float32))
assert_array_almost_equal(theano_repr_data[:, i], preds.flatten())
def test_extract_feature_marginalization_from_masks_theanok_opt():
n_features = 3
n_instances = 100
feature_vals = [2, 2, 2]
#
# generate some masks
print('Calling extract_features_marginalization_rand')
rand_gen = numpy.random.RandomState(1337)
feature_sizes = [10]
n_rand_sizes = [2]
feature_masks = extract_features_marginalization_rand(n_features,
feature_sizes,
n_rand_sizes,
rand_gen=rand_gen)
#
# creating some data
rand_gen = numpy.random.RandomState(1337)
data = rand_gen.binomial(1, 0.5, size=(n_instances, n_features))
ind_data = dataset.one_hot_encoding(data, feature_vals)
rand_gen = numpy.random.RandomState(1337)
print(data)
spn, _layers, _nodes = build_test_mini_spn()
repr_data = extract_feature_marginalization_from_masks(spn,
data,
feature_masks)
#
# now doing the same for theano
theano_spn = build_theanok_spn_from_block_linked(spn, ind_data.shape[1], feature_vals)
print(theano_spn)
theano_repr_data = extract_feature_marginalization_from_masks_theanok(theano_spn,
ind_data,
feature_masks,
feature_vals)
print(theano_repr_data)
assert_array_almost_equal(repr_data, theano_repr_data)
#
# now doing that by "hand"
for i, mask in enumerate(feature_masks):
marg_data = numpy.zeros((n_instances, n_features), dtype=data.dtype)
marg_data.fill(MARG_IND)
marg_data[:, mask] = data[:, mask]
ind_marg_data = dataset.one_hot_encoding(marg_data, feature_vals)
preds = theano_spn.evaluate(ind_marg_data.astype(numpy.float32))
assert_array_almost_equal(theano_repr_data[:, i], preds.flatten())
#
# and now with optimization
theano_repr_data_2 = extract_feature_marginalization_from_masks_theanok_opt_unique(theano_spn,
ind_data,
feature_masks,
feature_vals)
print(theano_repr_data_2)
assert_array_almost_equal(theano_repr_data, theano_repr_data_2)
def test_load_save_features_masks_file():
#
# creating some feature_masks
n_masks = 100
n_features = 10
n_rand_features = 4
feature_masks = []
rand_gen = numpy.random.RandomState(1337)
print('Creating features')
for i in range(n_masks):
feature_mask = numpy.zeros(n_features, dtype=bool)
feature_mask = random_feature_mask(feature_mask, n_rand_features, rand_gen=rand_gen)
print(feature_mask)
feature_masks.append(feature_mask)
#
# saving them to file
file_path = 'test.features'
save_features_to_file(feature_masks, file_path)
#
# now loading them back
print('Loading them back')
rec_feature_masks = load_features_from_file(file_path)
for i in range(rec_feature_masks.shape[0]):
print(rec_feature_masks[i])
#
# asserting equality
assert_array_almost_equal(numpy.array(feature_masks), rec_feature_masks)
def test_feature_mask_scope():
n_rand_features = 4
n_features = 10
rand_gen = numpy.random.RandomState(1337)
feature_mask = numpy.zeros(n_features, dtype=bool)
feature_mask = random_feature_mask(feature_mask, n_rand_features, rand_gen=rand_gen)
n_true_features = numpy.sum(feature_mask)
print(feature_mask)
scope = feature_mask_scope(feature_mask)
print(scope)
assert n_true_features == len(scope)
scope_list = []
for i, f in enumerate(feature_mask):
if f:
scope_list.append(i)
assert_array_almost_equal(scope, numpy.array(scope_list))
def test_all_single_marginals():
n_features = 3
n_instances = 100
feature_vals = [2, 2, 2]
#
# creating some data
rand_gen = numpy.random.RandomState(1337)
data = rand_gen.binomial(1, 0.5, size=(n_instances, n_features))
spn, _layers, _nodes = build_test_mini_spn()
all_marginals = all_single_marginals(spn, feature_vals)
#
# computing them by hand
marginals = []
for i in range(n_features):
for j in range(feature_vals[i]):
marg_instance = numpy.zeros(n_features, dtype=data.dtype)
marg_instance.fill(MARG_IND)
marg_instance[i] = j
print(marg_instance)
marg_res, = spn.single_eval(marg_instance)
marginals.append(marg_res)
print('Computed', all_marginals)
print('Expected', marginals)
assert_array_almost_equal(numpy.array(marginals), all_marginals)
def test_extract_features_all_marginals():
n_features = 3
n_instances = 100
feature_vals = [2, 2, 2]
#
# creating some data
rand_gen = numpy.random.RandomState(1337)
data = rand_gen.binomial(1, 0.5, size=(n_instances, n_features))
spn, _layers, _nodes = build_test_mini_spn()
repr_data = extract_features_all_marginals(spn,
data,
feature_vals)
#
# doing by hand
#
# computing them by hand
marginals = []
for i in range(n_features):
for j in range(feature_vals[i]):
marg_instance = numpy.zeros(n_features, dtype=data.dtype)
marg_instance.fill(MARG_IND)
marg_instance[i] = j
print(marg_instance)
marg_res, = spn.single_eval(marg_instance)
marginals.append(marg_res)
marginals = numpy.array(marginals)
repr_data_2 = numpy.zeros(data.shape)
for i in range(n_instances):
for j in range(n_features):
f_id = numpy.sum(feature_vals[:j]) + data[i, j]
repr_data_2[i, j] = marginals[f_id]
print('Computed', repr_data)
print('Expected', repr_data_2)
assert_array_almost_equal(repr_data, repr_data_2)
| 32,624
| 34.655738
| 101
|
py
|
spyn-repr
|
spyn-repr-master/spn/linked/tests/test_spn.py
|
from spn.linked.spn import Spn
from spn.linked.layers import SumLayer
from spn.linked.layers import ProductLayer
from spn.linked.layers import CategoricalIndicatorLayer
from spn.linked.layers import CategoricalSmoothedLayer
from spn.linked.layers import CategoricalInputLayer
from ..nodes import SumNode
from ..nodes import ProductNode
from ..nodes import Node
from ..nodes import CategoricalIndicatorNode
from ..nodes import CategoricalSmoothedNode
from spn import MARG_IND
from spn import LOG_ZERO
from spn import IS_LOG_ZERO
from spn.tests import logify
from spn.tests import assert_log_array_almost_equal
import numpy
from math import log
from nose.tools import assert_almost_equal
from time import perf_counter
# the SPN build is on 4 binary vars X_1, ..., X_4
vars = numpy.array([2, 2, 2, 2])
alpha = 0.
dicts = [{'var': 0, 'freqs': [1, 1]},
{'var': 1, 'freqs': [1, 9]},
{'var': 2, 'freqs': [3, 7]},
{'var': 3, 'freqs': [6, 4]}]
# the input layer is made of 8 indicator vars for X_1, ..., X_4:
# lambda_{X_1}, lambda_{not X_1} ... lambda_{X_4}, lambda_{not X_4} =
# ind1, ind2, ..., ind7, ind8
# then there is a sum layer of 4 nodes
# sum1 -> 0.5 ind1, 0.5 ind2
# sum2 -> 0.1 ind3, 0.9 ind4
# sum3 -> 0.3 ind5, 0.7 ind6
# sum4 -> 0.6 ind7, 0.4 ind8
# on top a prod layer with 3 nodes:
# prod1 -> sum1, sum2
# prod2 -> sum2, sum3
# prod3 -> sum3, sum4
def build_spn_indicator_layer(the_vars):
input_layer = CategoricalIndicatorLayer(vars=the_vars)
return input_layer
def build_spn_smoothed_layer(the_vars, node_dicts, the_alpha):
input_layer = CategoricalSmoothedLayer(vars=the_vars,
node_dicts=node_dicts,
alpha=the_alpha)
# print('FREQS')
# print([node._var_freqs for node in input_layer._nodes])
# print('PROBS')
# print([node._var_probs for node in input_layer._nodes])
return input_layer
def build_spn_layers(input_layer):
# this is ugly... TODO try to beutify this process
ind1 = input_layer._nodes[0]
ind2 = input_layer._nodes[1]
ind3 = input_layer._nodes[2]
ind4 = input_layer._nodes[3]
ind5 = input_layer._nodes[4]
ind6 = input_layer._nodes[5]
ind7 = input_layer._nodes[6]
ind8 = input_layer._nodes[7]
# creating sum nodes
sum_node1 = SumNode()
sum_node2 = SumNode()
sum_node3 = SumNode()
sum_node4 = SumNode()
# linking them with nodes
sum_node1.add_child(ind1, 0.5)
sum_node1.add_child(ind2, 0.5)
sum_node2.add_child(ind3, 0.1)
sum_node2.add_child(ind4, 0.9)
sum_node3.add_child(ind5, 0.3)
sum_node3.add_child(ind6, 0.7)
sum_node4.add_child(ind7, 0.6)
sum_node4.add_child(ind8, 0.4)
# creating sumlayer
sum_layer = SumLayer([sum_node1,
sum_node2,
sum_node3,
sum_node4])
# creating product nodes
prod_node1 = ProductNode()
prod_node2 = ProductNode()
prod_node3 = ProductNode()
# linking them to sum nodes
prod_node1.add_child(sum_node1)
prod_node1.add_child(sum_node2)
prod_node2.add_child(sum_node2)
prod_node2.add_child(sum_node3)
prod_node3.add_child(sum_node3)
prod_node3.add_child(sum_node4)
# creating a product layer
prod_layer = ProductLayer([prod_node1,
prod_node2,
prod_node3])
return sum_layer, prod_layer
# when a smoothed layer is the input layer
# then there is no sum layer
def build_spn_layers_II(input_layer):
# this is ugly... TODO try to beutify this process
ind1 = input_layer._nodes[0]
ind2 = input_layer._nodes[1]
ind3 = input_layer._nodes[2]
ind4 = input_layer._nodes[3]
# creating product nodes
prod_node1 = ProductNode()
prod_node2 = ProductNode()
prod_node3 = ProductNode()
# linking them to sum nodes
prod_node1.add_child(ind1)
prod_node1.add_child(ind2)
prod_node2.add_child(ind2)
prod_node2.add_child(ind3)
prod_node3.add_child(ind3)
prod_node3.add_child(ind4)
# creating a product layer
prod_layer = ProductLayer([prod_node1,
prod_node2,
prod_node3])
return prod_layer
# creating the input matrix for the tests
I = numpy.array([[MARG_IND, MARG_IND, MARG_IND, MARG_IND],
[0, 1, MARG_IND, 0],
[0, 0, 0, MARG_IND],
[1, 0, 0, 0]]).T
# evaluating S(I) shall lead to:
root_vals = numpy.array([[1., 1., 1.],
[0.45, 0.9, 0.6],
[0.05, 0.03, 0.3],
[0.05, 0.03, 0.18]])
# whose logs are
logify(root_vals)
def test_spn_construction_by_add_and_evaluation():
spn = Spn()
# building the same levels
input_layer = build_spn_indicator_layer(vars)
sum_layer, prod_layer = build_spn_layers(input_layer)
# adding all layers to the spn
spn.set_input_layer(input_layer)
spn.add_layer(sum_layer)
spn.add_layer(prod_layer)
res = spn.eval(I)
print('First evaluation')
print(res)
assert_log_array_almost_equal(root_vals, res)
def test_spn_construction_by_add_and_evaluation_II():
spn = Spn()
# print('empty spn')
# print(spn)
input_layer = build_spn_smoothed_layer(vars, dicts, alpha)
prod_layer = build_spn_layers_II(input_layer)
# adding all layers to the spn
spn.set_input_layer(input_layer)
spn.add_layer(prod_layer)
# print('created spn')
# print(spn)
res = spn.eval(I)
print('First smoothed evaluation')
print(res)
assert_log_array_almost_equal(root_vals, res)
def test_spn_construction_by_init_and_evaluation():
# building the same levels
input_layer = build_spn_indicator_layer(vars)
sum_layer, prod_layer = build_spn_layers(input_layer)
spn = Spn(input_layer=input_layer, layers=[sum_layer, prod_layer])
res = spn.eval(I)
print('First evaluation')
print(res)
assert_log_array_almost_equal(root_vals, res)
def test_spn_backprop():
# create initial layer
node1 = Node()
node2 = Node()
node3 = Node()
node4 = Node()
node5 = Node()
input_layer = CategoricalInputLayer([node1, node2,
node3, node4,
node5])
# top layer made by 3 sum nodes
sum1 = SumNode()
sum2 = SumNode()
sum3 = SumNode()
# linking to input nodes
weight11 = 0.3
sum1.add_child(node1, weight11)
weight12 = 0.3
sum1.add_child(node2, weight12)
weight13 = 0.4
sum1.add_child(node3, weight13)
weight22 = 0.15
sum2.add_child(node2, weight22)
weight23 = 0.15
sum2.add_child(node3, weight23)
weight24 = 0.7
sum2.add_child(node4, weight24)
weight33 = 0.4
sum3.add_child(node3, weight33)
weight34 = 0.25
sum3.add_child(node4, weight34)
weight35 = 0.35
sum3.add_child(node5, weight35)
sum_layer = SumLayer([sum1, sum2, sum3])
# another layer with two product nodes
prod1 = ProductNode()
prod2 = ProductNode()
prod1.add_child(sum1)
prod1.add_child(sum2)
prod2.add_child(sum2)
prod2.add_child(sum3)
prod_layer = ProductLayer([prod1, prod2])
# root layer, double sum
root1 = SumNode()
root2 = SumNode()
weightr11 = 0.5
root1.add_child(prod1, weightr11)
weightr12 = 0.5
root1.add_child(prod2, weightr12)
weightr21 = 0.9
root2.add_child(prod1, weightr21)
weightr22 = 0.1
root2.add_child(prod2, weightr22)
root_layer = SumLayer([root1, root2])
# root_layer = SumLayer([root1])
# create the spn
spn = Spn(input_layer=input_layer,
layers=[sum_layer, prod_layer, root_layer])
# setting the input values
val1 = 0.0
node1.set_val(val1)
val2 = 0.5
node2.set_val(val2)
val3 = 0.3
node3.set_val(val3)
val4 = 1.0
node4.set_val(val4)
val5 = 0.0
node5.set_val(val5)
# evaluating the spn
res = spn.test_eval()
print('spn eval\'d', res)
# backprop
spn.backprop()
# computing derivatives by hand
# topdown: root layer
root_der = 1.0
log_root_der = log(root_der)
# print('root ders', root1.log_der, root2.log_der)
print('root ders', root1.log_der)
assert_almost_equal(log_root_der, root1.log_der)
assert_almost_equal(log_root_der, root2.log_der)
# product layer
prod_der1 = (root_der * weightr11 +
root_der * weightr21)
prod_der2 = (root_der * weightr12 +
root_der * weightr22)
# prod_der1 = (root_der * weightr11)
# prod_der2 = (root_der * weightr12)
log_prod_der1 = log(prod_der1) if prod_der1 > 0.0 else LOG_ZERO
log_prod_der2 = log(prod_der2) if prod_der2 > 0.0 else LOG_ZERO
print('found prod ders', prod1.log_der, prod2.log_der)
print('expect prod ders', log_prod_der1, log_prod_der2)
if IS_LOG_ZERO(log_prod_der1):
assert IS_LOG_ZERO(prod1.log_der)
else:
assert_almost_equal(log_prod_der1, prod1.log_der)
if IS_LOG_ZERO(log_prod_der2):
assert IS_LOG_ZERO(prod2.log_der)
else:
assert_almost_equal(log_prod_der2, prod2.log_der)
# sum layer
sum_der1 = (
prod_der1 * (weight22 * val2 +
weight23 * val3 +
weight24 * val4))
log_sum_der1 = log(sum_der1) if sum_der1 > 0.0 else LOG_ZERO
sum_der2 = (prod_der1 * (weight11 * val1 +
weight12 * val2 +
weight13 * val3) +
prod_der2 * (weight33 * val3 +
weight34 * val4 +
weight35 * val5))
log_sum_der2 = log(sum_der2) if sum_der2 > 0.0 else LOG_ZERO
sum_der3 = (prod_der2 * (weight22 * val2 +
weight23 * val3 +
weight24 * val4))
log_sum_der3 = log(sum_der3) if sum_der3 > 0.0 else LOG_ZERO
print('expected sum ders', log_sum_der1,
log_sum_der2,
log_sum_der3)
print('found sum ders', sum1.log_der,
sum2.log_der,
sum3.log_der)
if IS_LOG_ZERO(log_sum_der1):
assert IS_LOG_ZERO(sum1.log_der)
else:
assert_almost_equal(log_sum_der1, sum1.log_der)
if IS_LOG_ZERO(log_sum_der2):
assert IS_LOG_ZERO(sum2.log_der)
else:
assert_almost_equal(log_sum_der2, sum2.log_der)
if IS_LOG_ZERO(log_sum_der3):
assert IS_LOG_ZERO(sum3.log_der)
else:
assert_almost_equal(log_sum_der3, sum3.log_der)
# final level, the first one
try:
log_der1 = log(sum_der1 * weight11)
except:
log_der1 = LOG_ZERO
try:
log_der2 = log(sum_der1 * weight12 +
sum_der2 * weight22)
except:
log_der2 = LOG_ZERO
try:
log_der3 = log(sum_der1 * weight13 +
sum_der2 * weight23 +
sum_der3 * weight33)
except:
log_der3 = LOG_ZERO
try:
log_der4 = log(sum_der2 * weight24 +
sum_der3 * weight34)
except:
log_der4 = LOG_ZERO
try:
log_der5 = log(sum_der3 * weight35)
except:
log_der5 = LOG_ZERO
# printing, just in case
print('child log der', node1.log_der, node2.log_der,
node3.log_der, node4.log_der, node5.log_der)
print('exact log der', log_der1, log_der2, log_der3,
log_der4, log_der5)
if IS_LOG_ZERO(log_der1):
assert IS_LOG_ZERO(node1.log_der)
else:
assert_almost_equal(log_der1, node1.log_der, 15)
if IS_LOG_ZERO(log_der2):
assert IS_LOG_ZERO(node2.log_der)
else:
assert_almost_equal(log_der2, node2.log_der, 15)
if IS_LOG_ZERO(log_der3):
assert IS_LOG_ZERO(node3.log_der)
else:
assert_almost_equal(log_der3, node3.log_der, 15)
if IS_LOG_ZERO(log_der4):
assert IS_LOG_ZERO(node4.log_der)
else:
assert_almost_equal(log_der4, node4.log_der, 15)
if IS_LOG_ZERO(log_der5):
assert IS_LOG_ZERO(node5.log_der)
else:
assert_almost_equal(log_der5, node5.log_der, 15)
def test_spn_mpe_eval_and_traversal():
# create initial layer
node1 = Node()
node2 = Node()
node3 = Node()
node4 = Node()
node5 = Node()
input_layer = CategoricalInputLayer([node1, node2,
node3, node4,
node5])
# top layer made by 3 sum nodes
sum1 = SumNode()
sum2 = SumNode()
sum3 = SumNode()
# linking to input nodes
weight11 = 0.3
sum1.add_child(node1, weight11)
weight12 = 0.3
sum1.add_child(node2, weight12)
weight13 = 0.4
sum1.add_child(node3, weight13)
weight22 = 0.15
sum2.add_child(node2, weight22)
weight23 = 0.15
sum2.add_child(node3, weight23)
weight24 = 0.7
sum2.add_child(node4, weight24)
weight33 = 0.4
sum3.add_child(node3, weight33)
weight34 = 0.25
sum3.add_child(node4, weight34)
weight35 = 0.35
sum3.add_child(node5, weight35)
sum_layer = SumLayer([sum1, sum2, sum3])
# another layer with two product nodes
prod1 = ProductNode()
prod2 = ProductNode()
prod1.add_child(sum1)
prod1.add_child(sum2)
prod2.add_child(sum2)
prod2.add_child(sum3)
prod_layer = ProductLayer([prod1, prod2])
# root layer, double sum
root1 = SumNode()
root2 = SumNode()
weightr11 = 0.5
root1.add_child(prod1, weightr11)
weightr12 = 0.5
root1.add_child(prod2, weightr12)
weightr21 = 0.9
root2.add_child(prod1, weightr21)
weightr22 = 0.1
root2.add_child(prod2, weightr22)
root_layer = SumLayer([root1, root2])
# create the spn
spn = Spn(input_layer=input_layer,
layers=[sum_layer, prod_layer, root_layer])
print('===================')
print(spn)
print('===================')
# setting the input values
val1 = 0.0
node1.set_val(val1)
val2 = 0.5
node2.set_val(val2)
val3 = 0.3
node3.set_val(val3)
val4 = 1.0
node4.set_val(val4)
val5 = 0.0
node5.set_val(val5)
# evaluating the spn with MPE inference
res = spn.test_mpe_eval()
print('spn eval\'d', res)
# testing it
#
# testing the max layer
max1 = max(val1 * weight11,
val2 * weight12,
val3 * weight13)
max2 = max(val2 * weight22,
val3 * weight23,
val4 * weight24)
max3 = max(val3 * weight33,
val4 * weight34,
val5 * weight35)
log_max1 = log(max1) if not numpy.isclose(max1, 0) else LOG_ZERO
log_max2 = log(max2) if not numpy.isclose(max2, 0) else LOG_ZERO
log_max3 = log(max3) if not numpy.isclose(max3, 0) else LOG_ZERO
print('expected max vals {0}, {1}, {2}'.format(log_max1,
log_max2,
log_max3))
print('found max vals {0}, {1}, {2}'.format(sum1.log_val,
sum2.log_val,
sum3.log_val))
if IS_LOG_ZERO(log_max1):
assert IS_LOG_ZERO(sum1.log_val)
else:
assert_almost_equal(log_max1, sum1.log_val)
if IS_LOG_ZERO(log_max2):
assert IS_LOG_ZERO(sum2.log_val)
else:
assert_almost_equal(log_max2, sum2.log_val)
if IS_LOG_ZERO(log_max3):
assert IS_LOG_ZERO(sum3.log_val)
else:
assert_almost_equal(log_max3, sum3.log_val)
# product layer is assumed to be fine, but let's check
# it anyways
prod_val1 = max1 * max2
prod_val2 = max2 * max3
prod_log_val1 = log_max1 + log_max2
prod_log_val2 = log_max2 + log_max3
print('exp prod vals {0}, {1}'.format(prod_log_val1,
prod_log_val2))
print('rea prod vals {0}, {1}'.format(prod1.log_val,
prod2.log_val))
if IS_LOG_ZERO(prod_log_val1):
assert IS_LOG_ZERO(prod1.log_val)
else:
assert_almost_equal(prod_log_val1, prod1.log_val)
if IS_LOG_ZERO(prod_log_val2):
assert IS_LOG_ZERO(prod2.log_val)
else:
assert_almost_equal(prod_log_val2, prod2.log_val)
# root layer, again a sum layer
root_val1 = max(prod_val1 * weightr11,
prod_val2 * weightr12)
root_val2 = max(prod_val1 * weightr21,
prod_val2 * weightr22)
root_log_val1 = log(root_val1) if not numpy.isclose(
root_val1, 0) else LOG_ZERO
root_log_val2 = log(root_val2) if not numpy.isclose(
root_val2, 0) else LOG_ZERO
print('exp root vals {0}, {1}'.format(root_log_val1,
root_log_val2))
print('found ro vals {0}, {1}'.format(root1.log_val,
root2.log_val))
if IS_LOG_ZERO(root_log_val1):
assert IS_LOG_ZERO(root1.log_val)
else:
assert_almost_equal(root_log_val1, root1.log_val)
if IS_LOG_ZERO(root_log_val2):
assert IS_LOG_ZERO(root2.log_val)
else:
assert_almost_equal(root_log_val2, root2.log_val)
# now we are traversing top down the net
print('mpe traversing')
for i, j, k in spn.mpe_traversal():
print(i, j, k)
#
# this is very UGLY
sum_node_2 = None
prod_node_6 = None
scope_1 = None
input_layer = None
def create_valid_toy_spn():
# root layer
whole_scope = frozenset({0, 1, 2, 3})
root_node = SumNode(var_scope=whole_scope)
root_layer = SumLayer([root_node])
# prod layer
prod_node_1 = ProductNode(var_scope=whole_scope)
prod_node_2 = ProductNode(var_scope=whole_scope)
prod_layer_1 = ProductLayer([prod_node_1, prod_node_2])
root_node.add_child(prod_node_1, 0.5)
root_node.add_child(prod_node_2, 0.5)
# sum layer
scope_1 = frozenset({0, 1})
scope_2 = frozenset({2})
scope_3 = frozenset({3})
scope_4 = frozenset({2, 3})
sum_node_1 = SumNode(var_scope=scope_1)
sum_node_2 = SumNode(var_scope=scope_2)
sum_node_3 = SumNode(var_scope=scope_3)
sum_node_4 = SumNode(var_scope=scope_4)
prod_node_1.add_child(sum_node_1)
prod_node_1.add_child(sum_node_2)
prod_node_1.add_child(sum_node_3)
prod_node_2.add_child(sum_node_1)
prod_node_2.add_child(sum_node_4)
sum_layer_1 = SumLayer([sum_node_1, sum_node_2,
sum_node_3, sum_node_4])
# another product layer
prod_node_3 = ProductNode(var_scope=scope_1)
prod_node_4 = ProductNode(var_scope=scope_1)
prod_node_5 = ProductNode(var_scope=scope_4)
prod_node_6 = ProductNode(var_scope=scope_4)
sum_node_1.add_child(prod_node_3, 0.5)
sum_node_1.add_child(prod_node_4, 0.5)
sum_node_4.add_child(prod_node_5, 0.5)
sum_node_4.add_child(prod_node_6, 0.5)
prod_layer_2 = ProductLayer([prod_node_3, prod_node_4,
prod_node_5, prod_node_6])
# last sum one
scope_5 = frozenset({0})
scope_6 = frozenset({1})
sum_node_5 = SumNode(var_scope=scope_5)
sum_node_6 = SumNode(var_scope=scope_6)
sum_node_7 = SumNode(var_scope=scope_5)
sum_node_8 = SumNode(var_scope=scope_6)
sum_node_9 = SumNode(var_scope=scope_2)
sum_node_10 = SumNode(var_scope=scope_3)
sum_node_11 = SumNode(var_scope=scope_2)
sum_node_12 = SumNode(var_scope=scope_3)
prod_node_3.add_child(sum_node_5)
prod_node_3.add_child(sum_node_6)
prod_node_4.add_child(sum_node_7)
prod_node_4.add_child(sum_node_8)
prod_node_5.add_child(sum_node_9)
prod_node_5.add_child(sum_node_10)
prod_node_6.add_child(sum_node_11)
prod_node_6.add_child(sum_node_12)
sum_layer_2 = SumLayer([sum_node_5, sum_node_6,
sum_node_7, sum_node_8,
sum_node_9, sum_node_10,
sum_node_11, sum_node_12])
# input layer
vars = [2, 3, 2, 2]
input_layer = CategoricalIndicatorLayer(vars=vars)
last_sum_nodes = [sum_node_2, sum_node_3,
sum_node_5, sum_node_6,
sum_node_7, sum_node_8,
sum_node_9, sum_node_10,
sum_node_11, sum_node_12]
for sum_node in last_sum_nodes:
(var_scope,) = sum_node.var_scope
for input_node in input_layer.nodes():
if input_node.var == var_scope:
sum_node.add_child(input_node, 1.0)
spn = Spn(input_layer=input_layer,
layers=[sum_layer_2, prod_layer_2,
sum_layer_1, prod_layer_1,
root_layer])
# print(spn)
return spn
def test_spn_is_valid():
# create an SPN
spn = create_valid_toy_spn()
assert spn.is_complete()
assert spn.is_decomposable()
assert spn.is_valid()
# now changing completeness
sum_node_2.add_child(input_layer._nodes[0], 1.0)
assert not spn.is_complete()
assert spn.is_decomposable()
assert not spn.is_valid()
# now even decomposability
prod_node_6.var_scope = scope_1
assert not spn.is_complete()
assert not spn.is_decomposable()
# assert not spn.is_valid()
# two sum layers, 2 nodes first, 3 second (top down)
weights_ds = [[[0.2, 0.8]],
[[0.15, 0.85],
[0.5, 0.25, 0.25],
[0.1, 0.9]]]
def test_spn_set_get_weights():
# create a simple spn
root_node = SumNode()
root_layer = SumLayer([root_node])
prod_node_1 = ProductNode()
prod_node_2 = ProductNode()
root_node.add_child(prod_node_1, 0.5)
root_node.add_child(prod_node_2, 0.5)
prod_layer = ProductLayer([prod_node_1,
prod_node_2])
sum_node_1 = SumNode()
sum_node_2 = SumNode()
sum_node_3 = SumNode()
prod_node_1.add_child(sum_node_1)
prod_node_1.add_child(sum_node_2)
prod_node_2.add_child(sum_node_2)
prod_node_2.add_child(sum_node_3)
sum_layer = SumLayer([sum_node_1, sum_node_2,
sum_node_3])
ind_node_1 = CategoricalIndicatorNode(var=0, var_val=1)
ind_node_2 = CategoricalIndicatorNode(var=0, var_val=1)
ind_node_3 = CategoricalIndicatorNode(var=0, var_val=1)
ind_node_4 = CategoricalIndicatorNode(var=0, var_val=1)
ind_node_5 = CategoricalIndicatorNode(var=0, var_val=1)
input_layer = CategoricalInputLayer(nodes=[ind_node_1,
ind_node_2,
ind_node_3,
ind_node_4,
ind_node_5])
sum_node_1.add_child(ind_node_1, 0.2)
sum_node_1.add_child(ind_node_2, 0.2)
sum_node_2.add_child(ind_node_2, 0.2)
sum_node_2.add_child(ind_node_3, 0.2)
sum_node_2.add_child(ind_node_4, 0.2)
sum_node_3.add_child(ind_node_4, 0.2)
sum_node_3.add_child(ind_node_5, 0.2)
spn = Spn(input_layer=input_layer,
layers=[sum_layer, prod_layer, root_layer])
print(spn)
# storing these weights
curr_weights = spn.get_weights()
# setting the new weights
spn.set_weights(weights_ds)
# getting them again
new_weights = spn.get_weights()
# comparing them
assert new_weights == weights_ds
# now setting back the previous one
spn.set_weights(curr_weights)
# getting them back again
old_weights = spn.get_weights()
# and checking
assert old_weights == curr_weights
def test_to_text():
spn = create_valid_toy_spn()
spn.to_text('test.spn')
def test_toy_spn_numpy_linked():
input_vec = numpy.array([[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.],
[MARG_IND, MARG_IND, MARG_IND]]).T
ind_node_1 = CategoricalIndicatorNode(var=0, var_val=0)
ind_node_2 = CategoricalIndicatorNode(var=0, var_val=1)
ind_node_3 = CategoricalIndicatorNode(var=1, var_val=0)
ind_node_4 = CategoricalIndicatorNode(var=1, var_val=1)
ind_node_5 = CategoricalIndicatorNode(var=2, var_val=0)
ind_node_6 = CategoricalIndicatorNode(var=2, var_val=1)
input_layer = CategoricalInputLayer(nodes=[ind_node_1,
ind_node_2,
ind_node_3,
ind_node_4,
ind_node_5,
ind_node_6])
n_nodes_layer_1 = 6
layer_1_sum_nodes = [SumNode() for i in range(n_nodes_layer_1)]
layer_1_sum_nodes[0].add_child(ind_node_1, 0.6)
layer_1_sum_nodes[0].add_child(ind_node_2, 0.4)
layer_1_sum_nodes[1].add_child(ind_node_1, 0.3)
layer_1_sum_nodes[1].add_child(ind_node_2, 0.7)
layer_1_sum_nodes[2].add_child(ind_node_3, 0.1)
layer_1_sum_nodes[2].add_child(ind_node_4, 0.9)
layer_1_sum_nodes[3].add_child(ind_node_3, 0.7)
layer_1_sum_nodes[3].add_child(ind_node_4, 0.3)
layer_1_sum_nodes[4].add_child(ind_node_5, 0.5)
layer_1_sum_nodes[4].add_child(ind_node_6, 0.5)
layer_1_sum_nodes[5].add_child(ind_node_5, 0.2)
layer_1_sum_nodes[5].add_child(ind_node_6, 0.8)
layer_1 = SumLayer(layer_1_sum_nodes)
n_nodes_layer_2 = 4
layer_2_prod_nodes = [ProductNode() for i in range(n_nodes_layer_2)]
layer_2_prod_nodes[0].add_child(layer_1_sum_nodes[0])
layer_2_prod_nodes[0].add_child(layer_1_sum_nodes[2])
layer_2_prod_nodes[0].add_child(layer_1_sum_nodes[4])
layer_2_prod_nodes[1].add_child(layer_1_sum_nodes[1])
layer_2_prod_nodes[1].add_child(layer_1_sum_nodes[3])
layer_2_prod_nodes[1].add_child(layer_1_sum_nodes[5])
layer_2_prod_nodes[2].add_child(layer_1_sum_nodes[0])
layer_2_prod_nodes[2].add_child(layer_1_sum_nodes[2])
layer_2_prod_nodes[2].add_child(layer_1_sum_nodes[5])
layer_2_prod_nodes[3].add_child(layer_1_sum_nodes[1])
layer_2_prod_nodes[3].add_child(layer_1_sum_nodes[3])
layer_2_prod_nodes[3].add_child(layer_1_sum_nodes[4])
layer_2 = ProductLayer(layer_2_prod_nodes)
root = SumNode()
root.add_child(layer_2_prod_nodes[0], 0.2)
root.add_child(layer_2_prod_nodes[1], 0.4)
root.add_child(layer_2_prod_nodes[2], 0.15)
root.add_child(layer_2_prod_nodes[3], 0.25)
layer_3 = SumLayer([root])
spn = Spn(input_layer=input_layer,
layers=[layer_1, layer_2, layer_3])
res = spn.eval(input_vec)
print('First evaluation')
print(res)
def test_spn_sampling():
from collections import Counter
from spn.factory import linked_categorical_input_to_indicators
#
# building a small mixture model
features = [2, 2, 2, 2]
n_features = len(features)
#
# different categorical vars groups as leaves
input_nodes_1 = [CategoricalSmoothedNode(i, features[i], alpha=0.0,
freqs=[0, 1]) for i in range(n_features)]
input_nodes_2 = [CategoricalSmoothedNode(i, features[i], alpha=0.0,
freqs=[1, 0]) for i in range(n_features)]
input_nodes_3 = [CategoricalSmoothedNode(i, features[i], alpha=0.0,
freqs=[1, 0]) for i in range(n_features // 2)] + \
[CategoricalSmoothedNode(i, features[i], alpha=0.0,
freqs=[0, 1]) for i in range(n_features // 2, n_features)]
input_nodes_4 = [CategoricalSmoothedNode(i, features[i], alpha=0.0,
freqs=[0, 1]) for i in range(n_features // 2)] + \
[CategoricalSmoothedNode(i, features[i], alpha=0.0,
freqs=[1, 0]) for i in range(n_features // 2, n_features)]
input_layer = CategoricalSmoothedLayer(nodes=input_nodes_1 +
input_nodes_2 +
input_nodes_3 +
input_nodes_4)
#
# one product node for each group
prod_node_1 = ProductNode()
for leaf in input_nodes_1:
prod_node_1.add_child(leaf)
prod_node_2 = ProductNode()
for leaf in input_nodes_2:
prod_node_2.add_child(leaf)
prod_node_3 = ProductNode()
for leaf in input_nodes_3:
prod_node_3.add_child(leaf)
prod_node_4 = ProductNode()
for leaf in input_nodes_4:
prod_node_4.add_child(leaf)
prod_layer = ProductLayer(nodes=[prod_node_1, prod_node_2, prod_node_3, prod_node_4])
#
# one root as a mixture
root = SumNode()
root.add_child(prod_node_1, 0.5)
root.add_child(prod_node_2, 0.1)
root.add_child(prod_node_3, 0.2)
root.add_child(prod_node_4, 0.2)
root_layer = SumLayer(nodes=[root])
spn = Spn(input_layer=input_layer,
layers=[prod_layer, root_layer])
print(spn)
n_instances = 1000
#
# sampling some instances
sample_start_t = perf_counter()
samples = spn.sample(n_instances=n_instances, verbose=False)
sample_end_t = perf_counter()
print('Sampled in {} secs'.format(sample_end_t - sample_start_t))
if n_instances < 20:
print(samples)
#
# some statistics
tuple_samples = [tuple(s) for s in samples]
if n_instances < 20:
print(tuple_samples)
sample_counter = Counter(tuple_samples)
print(sample_counter)
#
# transforming into an spn with indicator nodes
print('Into indicator nodes')
ind_start_t = perf_counter()
spn = linked_categorical_input_to_indicators(spn)
ind_end_t = perf_counter()
print('Done in ', ind_end_t - ind_start_t)
sample_start_t = perf_counter()
samples = spn.sample(n_instances=n_instances, verbose=False, one_hot_encoding=True)
sample_end_t = perf_counter()
print('Sampled in {} secs'.format(sample_end_t - sample_start_t))
if n_instances < 20:
print(samples)
#
# some statistics
tuple_samples = [tuple(s) for s in samples]
if n_instances < 20:
print(tuple_samples)
sample_counter = Counter(tuple_samples)
print(sample_counter)
| 30,480
| 28.679649
| 95
|
py
|
spyn-repr
|
spyn-repr-master/spn/linked/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
spyn-repr
|
spyn-repr-master/spn/linked/tests/test_nodes.py
|
from spn.linked.nodes import Node
from spn.linked.nodes import SumNode
from spn.linked.nodes import ProductNode
from spn.linked.nodes import CategoricalIndicatorNode
from spn.linked.nodes import CategoricalSmoothedNode
from spn.linked.nodes import CLTreeNode
from spn.tests import compute_smoothed_ll
from spn import LOG_ZERO
from spn import MARG_IND
from spn import IS_LOG_ZERO
import numpy
from math import log
from spn.tests import assert_log_array_almost_equal
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
def test_node_set_val():
node = Node()
# asserting log(0) == LOG_ZERO
node.set_val(0)
assert node.log_val == LOG_ZERO
# any other value shall get to its log value
half_truth = 0.5
node.set_val(half_truth)
assert node.log_val == log(half_truth)
# truth checking 1 -> 0
truth = 1.
node.set_val(truth)
assert node.log_val == log(truth)
print(node)
def test_sum_node_create_and_eval():
# create child nodes
child1 = Node()
val1 = 1.
child1.set_val(val1)
child2 = Node()
val2 = 1.
child2.set_val(val2)
# create sum node and adding children to it
sum_node = SumNode()
weight1 = 0.8
weight2 = 0.2
sum_node.add_child(child1, weight1)
sum_node.add_child(child2, weight2)
assert len(sum_node.children) == 2
assert len(sum_node.weights) == 2
assert len(sum_node.log_weights) == 2
log_weights = [log(weight1), log(weight2)]
assert log_weights == sum_node.log_weights
print(sum_node)
# evaluating
sum_node.eval()
print(sum_node.log_val)
assert_almost_equal(sum_node.log_val,
log(val1 * weight1 + val2 * weight2),
places=15)
# changing values 1,0
val1 = 1.
child1.set_val(val1)
val2 = 0.
child2.set_val(val2)
# evaluating
sum_node.eval()
print(sum_node.log_val)
assert_almost_equal(sum_node.log_val,
log(val1 * weight1 + val2 * weight2),
places=15)
# changing values 0,0 -> LOG_ZERO
val1 = 0.
child1.set_val(val1)
val2 = 0.
child2.set_val(val2)
# evaluating
sum_node.eval()
print(sum_node.log_val)
assert_almost_equal(sum_node.log_val,
LOG_ZERO,
places=15)
def test_sum_node_backprop():
# create child nodes
child1 = Node()
val1 = 1.
child1.set_val(val1)
child2 = Node()
val2 = 1.
child2.set_val(val2)
# create sum node and adding children to it
sum_node1 = SumNode()
weight11 = 0.8
weight12 = 0.2
sum_node1.add_child(child1, weight11)
sum_node1.add_child(child2, weight12)
# adding a coparent
sum_node2 = SumNode()
weight21 = 0.6
weight22 = 0.4
sum_node2.add_child(child1, weight21)
sum_node2.add_child(child2, weight22)
# evaluating
sum_node1.eval()
sum_node2.eval()
# setting the log derivatives to the parents
sum_node_der1 = 1.0
sum_node1.log_der = log(sum_node_der1)
sum_node1.backprop()
sum_node_der2 = 1.0
sum_node2.log_der = log(sum_node_der2)
sum_node2.backprop()
# checking for correctness
log_der1 = log(weight11 * sum_node_der1 +
weight21 * sum_node_der2)
log_der2 = log(weight12 * sum_node_der1 +
weight22 * sum_node_der2)
print('log ders 1:{lgd1} 2:{lgd2}'.format(lgd1=log_der1,
lgd2=log_der2))
assert_almost_equal(log_der1, child1.log_der, 15)
assert_almost_equal(log_der2, child2.log_der, 15)
# resetting
child1.log_der = LOG_ZERO
child2.log_der = LOG_ZERO
# now changing the initial der values
sum_node_der1 = 0.5
sum_node1.log_der = log(sum_node_der1)
sum_node1.backprop()
sum_node_der2 = 0.0
sum_node2.log_der = LOG_ZERO
sum_node2.backprop()
# checking for correctness
log_der1 = log(weight11 * sum_node_der1 +
weight21 * sum_node_der2)
log_der2 = log(weight12 * sum_node_der1 +
weight22 * sum_node_der2)
print('log ders 1:{lgd1} 2:{lgd2}'.format(lgd1=log_der1,
lgd2=log_der2))
assert_almost_equal(log_der1, child1.log_der, 15)
assert_almost_equal(log_der2, child2.log_der, 15)
def test_product_node_create_and_eval():
# create child nodes
child1 = Node()
val1 = 1.
child1.set_val(val1)
child2 = Node()
val2 = 1.
child2.set_val(val2)
# create product node and add children
prod_node = ProductNode()
prod_node.add_child(child1)
prod_node.add_child(child2)
assert len(prod_node.children) == 2
print(prod_node)
# evaluation
prod_node.eval()
print(prod_node.log_val)
assert_almost_equal(prod_node.log_val,
log(val1 * val2),
places=15)
# changing values 0,1 -> LOG_ZERO
val1 = 0.
child1.set_val(val1)
val2 = 1.
child2.set_val(val2)
prod_node.eval()
print(prod_node.log_val)
assert_almost_equal(prod_node.log_val,
LOG_ZERO,
places=15)
# changing values 0,1 -> LOG_ZERO
val1 = 0.
child1.set_val(val1)
val2 = 0.
child2.set_val(val2)
prod_node.eval()
print(prod_node.log_val)
# now testing with macro since -1000 + -1000 != -1000
assert IS_LOG_ZERO(prod_node.log_val) is True
def test_product_node_backprop():
# create child nodes
child1 = Node()
val1 = 1.
child1.set_val(val1)
child2 = Node()
val2 = 1.
child2.set_val(val2)
child3 = Node()
val3 = 0.0
child3.set_val(val3)
# create a product node and add children
prod_node1 = ProductNode()
prod_node1.add_child(child1)
prod_node1.add_child(child2)
# create a second node on all children
prod_node2 = ProductNode()
prod_node2.add_child(child1)
prod_node2.add_child(child2)
prod_node2.add_child(child3)
# eval
prod_node1.eval()
prod_node2.eval()
# set der and backprop
prod_node_der1 = 1.0
prod_node1.log_der = log(prod_node_der1)
prod_node1.backprop()
prod_node_der2 = 1.0
prod_node2.log_der = log(prod_node_der2)
prod_node2.backprop()
# check for correctness
log_der1 = log(prod_node_der1 * val2 +
prod_node_der2 * val2 * val3)
log_der2 = log(prod_node_der1 * val1 +
prod_node_der2 * val1 * val3)
log_der3 = log(prod_node_der2 * val1 * val2)
print('log ders 1:{lgd1} 2:{lgd2} 3:{lgd3}'.format(lgd1=log_der1,
lgd2=log_der2,
lgd3=log_der3))
assert_almost_equal(log_der1, child1.log_der, 15)
assert_almost_equal(log_der2, child2.log_der, 15)
assert_almost_equal(log_der3, child3.log_der, 15)
# setting different values for children
val1 = 0.
child1.set_val(val1)
val2 = 0.
child2.set_val(val2)
val3 = 1.
child3.set_val(val3)
# eval
prod_node1.eval()
prod_node2.eval()
child1.log_der = LOG_ZERO
child2.log_der = LOG_ZERO
child3.log_der = LOG_ZERO
# set der and backprop
prod_node_der1 = 0.5
prod_node1.log_der = log(prod_node_der1)
prod_node1.backprop()
prod_node_der2 = 0.1
prod_node2.log_der = log(prod_node_der2)
prod_node2.backprop()
# check for correctness
try:
log_der1 = log(prod_node_der1 * val2 +
prod_node_der2 * val2 * val3)
except:
log_der1 = LOG_ZERO
try:
log_der2 = log(prod_node_der1 * val1 +
prod_node_der2 * val1 * val3)
except:
log_der2 = LOG_ZERO
try:
log_der3 = log(prod_node_der2 * val1 * val2)
except:
log_der3 = LOG_ZERO
print('log ders 1:{lgd1} 2:{lgd2} 3:{lgd3}'.format(lgd1=log_der1,
lgd2=log_der2,
lgd3=log_der3))
print('log ders 1:{lgd1} 2:{lgd2} 3:{lgd3}'.format(lgd1=child1.log_der,
lgd2=child2.log_der,
lgd3=child3.log_der))
if IS_LOG_ZERO(log_der1):
assert IS_LOG_ZERO(child1.log_der)
else:
assert_almost_equal(log_der1, child1.log_der, 15)
if IS_LOG_ZERO(log_der2):
assert IS_LOG_ZERO(child2.log_der)
else:
assert_almost_equal(log_der2, child2.log_der, 15)
if IS_LOG_ZERO(log_der3):
assert IS_LOG_ZERO(child3.log_der)
else:
assert_almost_equal(log_der3, child3.log_der, 15)
# setting different values for children
val1 = 0.
child1.set_val(val1)
val2 = 0.2
child2.set_val(val2)
val3 = 1.
child3.set_val(val3)
# eval
prod_node1.eval()
prod_node2.eval()
child1.log_der = LOG_ZERO
child2.log_der = LOG_ZERO
child3.log_der = LOG_ZERO
# set der and backprop
prod_node_der1 = 0.5
prod_node1.log_der = log(prod_node_der1)
prod_node1.backprop()
prod_node_der2 = 0.1
prod_node2.log_der = log(prod_node_der2)
prod_node2.backprop()
# check for correctness
try:
log_der1 = log(prod_node_der1 * val2 +
prod_node_der2 * val2 * val3)
except:
log_der1 = LOG_ZERO
try:
log_der2 = log(prod_node_der1 * val1 +
prod_node_der2 * val1 * val3)
except:
log_der2 = LOG_ZERO
try:
log_der3 = log(prod_node_der2 * val1 * val2)
except:
log_der3 = LOG_ZERO
print('log ders 1:{lgd1} 2:{lgd2} 3:{lgd3}'.format(lgd1=log_der1,
lgd2=log_der2,
lgd3=log_der3))
print('log ders 1:{lgd1} 2:{lgd2} 3:{lgd3}'.format(lgd1=child1.log_der,
lgd2=child2.log_der,
lgd3=child3.log_der))
if IS_LOG_ZERO(log_der1):
assert IS_LOG_ZERO(child1.log_der)
else:
assert_almost_equal(log_der1, child1.log_der, 15)
if IS_LOG_ZERO(log_der2):
assert IS_LOG_ZERO(child2.log_der)
else:
assert_almost_equal(log_der2, child2.log_der, 15)
if IS_LOG_ZERO(log_der3):
assert IS_LOG_ZERO(child3.log_der)
else:
assert_almost_equal(log_der3, child3.log_der, 15)
def test_sum_node_normalize():
# create child nodes
child1 = Node()
val1 = 1.
child1.set_val(val1)
child2 = Node()
val2 = 1.
child2.set_val(val2)
# create sum node and adding children to it
sum_node = SumNode()
weight1 = 1.
weight2 = 0.2
weights = [weight1, weight2]
sum_node.add_child(child1, weight1)
sum_node.add_child(child2, weight2)
un_sum = sum(weights)
# normalizing
sum_node.normalize()
assert len(sum_node.children) == 2
assert len(sum_node.weights) == 2
assert len(sum_node.log_weights) == 2
# checking weight sum
w_sum = sum(sum_node.weights)
assert w_sum == 1.
# and check the correct values
normal_sum = [weight / un_sum for weight in weights]
print(normal_sum)
assert normal_sum == sum_node.weights
# checking log_weights
log_weights = [log(weight) for weight in normal_sum]
print(log_weights)
assert log_weights == sum_node.log_weights
def test_categorical_indicator_node_create_and_eval():
# created a node on the first var and its first value
ind = CategoricalIndicatorNode(0, 0)
# seen x0 = 0 -> 1.
ind.eval(0)
assert ind.log_val == 0.
# this indicator is not fired
ind.eval(1)
assert ind.log_val == LOG_ZERO
# all indicators for that var are fired
ind.eval(MARG_IND)
assert ind.log_val == 0.
# the var has only 2 values, but the node does not know!
ind.eval(2)
assert ind.log_val == LOG_ZERO
# list of var values (var = position in the list)
vars = [2, 2, 3, 4]
freqs = [[1, 2],
[5, 5],
[1, 0, 2],
None]
# observed values for the 4 vars
obs = [0, MARG_IND, 1, 2]
# testing for each variable for alpha = 0
alphas = [0., 0.1, 1., 10.]
def test_categorical_smoothed_node_create_and_eval():
for alpha in alphas:
for i, var in enumerate(vars):
var_freq = freqs[i]
smo = CategoricalSmoothedNode(i, var, alpha, var_freq)
smo.eval(obs[i])
print('smo values')
print(smo.log_val)
ll = compute_smoothed_ll(obs[i], var_freq, var, alpha)
print('log values')
print(ll)
assert_almost_equal(ll, smo.log_val, 15)
def test_categorical_smoothed_node_resmooth():
for i, var in enumerate(vars):
alpha = alphas[0]
var_freq = freqs[i]
smo = CategoricalSmoothedNode(i, var, alpha, var_freq)
smo.eval(obs[i])
print('smo values')
print(smo.log_val)
# checking the right value
ll = compute_smoothed_ll(obs[i], var_freq, var, alpha)
print('log values')
print(ll)
assert_almost_equal(ll, smo.log_val, 15)
# now setting another alpha
print('Changing smooth level')
for alpha_new in alphas:
smo.smooth_probs(alpha_new)
smo.eval(obs[i])
print('smo values')
print(smo.log_val)
ll = compute_smoothed_ll(obs[i], var_freq, var, alpha_new)
print('log values')
print(ll)
assert_almost_equal(ll, smo.log_val, 15)
def test_sum_node_is_complete():
# create a sum node with a scope
scope = frozenset({0, 2, 7, 13})
sum_node = SumNode(var_scope=scope)
# creating children with same scope
children = [ProductNode(var_scope=scope) for i in range(4)]
for prod_node in children:
sum_node.add_child(prod_node, 1.0)
assert sum_node.is_complete()
# now altering one child's scope with one less var
children[0].var_scope = frozenset({0, 7, 13})
assert sum_node.is_complete() is False
# now adding one more
children[0].var_scope = scope
children[3].var_scope = frozenset({0, 2, 7, 13, 3})
assert not sum_node.is_complete()
# now checking with indicator input nodes
var = 4
sum_node = SumNode(var_scope=frozenset({var}))
children = [CategoricalIndicatorNode(var=var, var_val=i)
for i in range(4)]
for input_node in children:
sum_node.add_child(input_node, 1.0)
assert sum_node.is_complete()
def test_product_node_is_decomposable():
# create a prod node with a scope
scope = frozenset({0, 2, 7, 13})
# creating sub scopes
sub_scope_1 = frozenset({0})
sub_scope_2 = frozenset({0, 2})
sub_scope_3 = frozenset({7})
sub_scope_4 = frozenset({17})
sub_scope_5 = frozenset({7, 13})
# now with decomposable children
child1 = SumNode(var_scope=sub_scope_2)
child2 = SumNode(var_scope=sub_scope_5)
child3 = SumNode(var_scope=sub_scope_2)
child4 = SumNode(var_scope=sub_scope_1)
prod_node = ProductNode(var_scope=scope)
prod_node.add_child(child1)
prod_node.add_child(child2)
assert prod_node.is_decomposable()
prod_node = ProductNode(var_scope=scope)
prod_node.add_child(child4)
prod_node.add_child(child1)
prod_node.add_child(child2)
assert not prod_node.is_decomposable()
prod_node = ProductNode(var_scope=scope)
prod_node.add_child(child4)
prod_node.add_child(child2)
assert not prod_node.is_decomposable()
# now with input nodes
child5 = CategoricalSmoothedNode(var=0, var_values=2)
child6 = CategoricalSmoothedNode(var=2, var_values=2)
child7 = CategoricalSmoothedNode(var=7, var_values=2)
child8 = CategoricalSmoothedNode(var=13, var_values=2)
child9 = CategoricalSmoothedNode(var=17, var_values=2)
prod_node = ProductNode(var_scope=scope)
prod_node.add_child(child5)
prod_node.add_child(child6)
prod_node.add_child(child7)
prod_node.add_child(child8)
assert prod_node.is_decomposable()
prod_node = ProductNode(var_scope=scope)
prod_node.add_child(child5)
prod_node.add_child(child6)
prod_node.add_child(child7)
prod_node.add_child(child9)
assert not prod_node.is_decomposable()
prod_node = ProductNode(var_scope=scope)
prod_node.add_child(child5)
prod_node.add_child(child6)
prod_node.add_child(child8)
assert not prod_node.is_decomposable()
def test_categorical_smoothed_node_data_smooth():
data_1 = numpy.array([[1],
[0],
[1],
[0],
[1]])
data_2 = numpy.array([[1, 0],
[0, 1],
[1, 1],
[0, 1],
[1, 0]])
alpha = 0
freqs = CategoricalSmoothedNode.smooth_freq_from_data(data_1, alpha)
print('freqs', freqs)
exp_freqs = CategoricalSmoothedNode.smooth_ll([2 / 5, 3 / 5], alpha)
print('exp freqs', exp_freqs)
assert_array_almost_equal(exp_freqs, freqs)
# now create a node
input_node = CategoricalSmoothedNode(var=0,
var_values=2,
instances={0, 2, 4})
input_node.smooth_probs(alpha, data=data_1)
exp_probs = CategoricalSmoothedNode.smooth_ll([0, 1], alpha)
print('exp probs', exp_probs)
print('probs', input_node._var_probs)
assert_log_array_almost_equal(exp_probs,
input_node._var_probs)
input_node.smooth_probs(alpha, data=data_2)
assert_log_array_almost_equal(exp_probs,
input_node._var_probs)
# TODO: check that data_2 raises an exception
def test_cltree_node_init_and_eval():
s_data = numpy.array([[1, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0],
[1, 0, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0]])
features = [0, 1, 2, 3]
feature_vals = [2, 2, 2, 2]
clt_node = CLTreeNode(data=s_data,
vars=features,
var_values=feature_vals,
alpha=0.0)
assert_array_equal(numpy.array(features), clt_node.vars)
assert_almost_equal(clt_node._alpha, 0.0)
assert_array_almost_equal(s_data, clt_node._data)
nico_cltree_tree = numpy.array([-1, 2, 0, 2])
nico_cltree_lls = numpy.array([-2.01490302054,
-1.20397280433,
-1.20397280433,
-1.79175946923,
-1.60943791243,
-1.60943791243])
assert_array_equal(nico_cltree_tree, clt_node._cltree._tree)
print('Created node')
print(clt_node)
#
# evaluating
for i, instance in enumerate(s_data):
clt_node.eval(instance)
assert_almost_equal(nico_cltree_lls[i], clt_node.log_val)
print(clt_node.log_val, nico_cltree_lls[i])
#
# creating now with a subset from data
features = [0, 2, 3]
feature_vals = [2, 2, 2]
clt_node = CLTreeNode(data=s_data[:, features],
vars=features,
var_values=feature_vals,
alpha=0.0)
nico_cltree_subtree = numpy.array([-1, 0, 1])
nico_cltree_sublls = numpy.array([-1.09861228867,
-0.69314718056,
-0.69314718056,
-1.79175946923,
-1.09861228867,
-0.69314718056])
assert_array_equal(numpy.array(features), clt_node.vars)
assert_array_equal(nico_cltree_subtree,
clt_node._cltree._tree)
for i, instance in enumerate(s_data):
clt_node.eval(instance)
assert_almost_equal(nico_cltree_sublls[i], clt_node.log_val)
print(clt_node.log_val, nico_cltree_sublls[i])
def test_cltree_node_eval():
#
# testing for the correctness of data masking while evaluating
# shall I move this test to CLTree?
s_data = numpy.random.binomial(n=1, p=0.5, size=(1000, 100))
print(s_data)
random_features = numpy.random.choice(s_data.shape[1], 20)
sub_s_data = s_data[:, random_features]
clt_node = CLTreeNode(vars=random_features,
data=sub_s_data,
var_values=numpy.array([2 for i in
range(sub_s_data.shape[1])]))
# evaluating on all s_data
lls = []
for instance in s_data:
clt_node.eval(instance)
lls.append(clt_node.log_val)
#
# now do one on the only sub
clt_node = CLTreeNode(vars=[i for i in range(sub_s_data.shape[1])],
data=sub_s_data,
var_values=numpy.array([2 for i in
range(sub_s_data.shape[1])]))
lls_s = []
for instance in sub_s_data:
clt_node.eval(instance)
lls_s.append(clt_node.log_val)
# print(lls)
# print(lls_s)
assert_array_almost_equal(numpy.array(lls), numpy.array(lls_s))
| 21,756
| 27.515072
| 79
|
py
|
spyn-repr
|
spyn-repr-master/spn/linked/tests/test_learning.py
|
from spn.linked.spn import Spn
from spn.linked.layers import CategoricalIndicatorLayer
from spn.linked.layers import SumLayer
from spn.linked.layers import ProductLayer
from spn.linked.nodes import SumNode
from spn.linked.nodes import ProductNode
import numpy
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_almost_equal
import dataset
from spn.factory import SpnFactory
syn_train_data = numpy.array([[0., 1., 1., 0.],
[0., 1., 0., 0.],
[1., 0., 0., 1.],
[0., 1., 1., 1.],
[1., 1., 0., 0.],
[1., 1., 0., 0.],
[1., 1., 1., 0.],
[0., 1., 0., 1.],
[1., 0., 1., 1.],
[1., 0., 0., 1.]])
syn_val_data = numpy.array([[1., 1., 1., 0.],
[1., 1., 1., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 1.],
[1., 0., 0., 1.]])
def test_mini_spn_fit_em():
vars = numpy.array([2, 2, 2, 2])
input_layer = CategoricalIndicatorLayer(vars=vars)
print(input_layer)
ind1 = input_layer._nodes[0]
ind2 = input_layer._nodes[1]
ind3 = input_layer._nodes[2]
ind4 = input_layer._nodes[3]
ind5 = input_layer._nodes[4]
ind6 = input_layer._nodes[5]
ind7 = input_layer._nodes[6]
ind8 = input_layer._nodes[7]
# creating a sum layer of 4 nodes
sum1 = SumNode()
sum2 = SumNode()
sum3 = SumNode()
sum4 = SumNode()
sum1.add_child(ind1, 0.6)
sum1.add_child(ind2, 0.4)
sum2.add_child(ind3, 0.5)
sum2.add_child(ind4, 0.5)
sum3.add_child(ind5, 0.7)
sum3.add_child(ind6, 0.3)
sum4.add_child(ind7, 0.4)
sum4.add_child(ind8, 0.6)
sum_layer = SumLayer(nodes=[sum1, sum2,
sum3, sum4])
# and a top layer of 3 products
prod1 = ProductNode()
prod2 = ProductNode()
prod3 = ProductNode()
prod1.add_child(sum1)
prod1.add_child(sum2)
prod2.add_child(sum2)
prod2.add_child(sum3)
prod3.add_child(sum3)
prod3.add_child(sum4)
prod_layer = ProductLayer(nodes=[prod1, prod2, prod3])
# root layer
root = SumNode()
root.add_child(prod1, 0.4)
root.add_child(prod2, 0.25)
root.add_child(prod3, 0.35)
root_layer = SumLayer(nodes=[root])
spn = Spn(input_layer=input_layer,
layers=[sum_layer, prod_layer, root_layer])
print(spn)
# training on obs
spn.fit_em(train=syn_train_data,
valid=syn_val_data,
test=None,
hard=True)
def atest_nltcs_em_fit():
print('Loading datasets')
train, valid, test = dataset.load_train_val_test_csvs('nltcs')
n_instances = train.shape[0]
# estimating the frequencies for the features
print('Estimating features')
freqs, features = dataset.data_2_freqs(train)
print('Build kernel density estimation')
spn = SpnFactory.linked_kernel_density_estimation(n_instances,
features)
print('EM training')
spn.fit_em(train, valid, test,
hard=True,
epochs=2)
def profiling():
print('Loading datasets')
train, valid, test = dataset.load_train_val_test_csvs('nltcs')
n_instances = train.shape[0]
# estimating the frequencies for the features
print('Estimating features')
freqs, features = dataset.data_2_freqs(train)
print('Build kernel density estimation')
spn = SpnFactory.linked_kernel_density_estimation(n_instances,
features)
# print(spn)
print('EM training')
spn.fit_em(train, valid, test,
hard=True,
epochs=2)
def test_em():
from pycallgraph import PyCallGraph
from pycallgraph.output import GraphvizOutput
with PyCallGraph(output=GraphvizOutput()):
profiling()
def test_sgd():
print('Loading datasets')
train, valid, test = dataset.load_train_val_test_csvs('nltcs')
n_instances = train.shape[0]
n_test_instances = test.shape[0]
# estimating the frequencies for the features
print('Estimating features')
freqs, features = dataset.data_2_freqs(train)
print('Build kernel density estimation')
spn = SpnFactory.linked_kernel_density_estimation(
n_instances,
features)
print('Created SPN with\n' + spn.stats())
print('Starting SGD')
spn.fit_sgd(train, valid, test,
learning_rate=0.1,
n_epochs=20,
batch_size=1,
hard=False)
import random
def test_random_spn_sgd():
print('Loading datasets')
train, valid, test = dataset.load_train_val_test_csvs('nltcs')
n_instances = train.shape[0]
n_test_instances = test.shape[0]
# estimating the frequencies for the features
print('Estimating features')
freqs, features = dataset.data_2_freqs(train)
n_layers = 1
n_max_children = 2000
n_scope_children = 2000
max_scope_split = -1
merge_prob = 0.5
seed = 1337
rand_gen = random.Random(seed)
print('Build random spn')
spn = SpnFactory.linked_random_spn_top_down(features,
n_layers,
n_max_children,
n_scope_children,
max_scope_split,
merge_prob,
rand_gen=rand_gen)
assert spn.is_valid()
print('Stats\n')
print(spn.stats())
np_rand_gen = numpy.random.RandomState(seed)
spn.fit_sgd(train, valid, test,
learning_rate=0.2,
n_epochs=10,
batch_size=1,
grad_method=1,
validation_frequency=100,
rand_gen=np_rand_gen,
hard=False)
def test_random_spn_em():
print('Loading datasets')
train, valid, test = dataset.load_train_val_test_csvs('nltcs')
n_instances = train.shape[0]
n_test_instances = test.shape[0]
# estimating the frequencies for the features
print('Estimating features')
freqs, features = dataset.data_2_freqs(train)
n_layers = 2
n_max_children = 4
n_scope_children = 5
max_scope_split = 3
merge_prob = 0.5
print('Build random spn')
spn = SpnFactory.linked_random_spn_top_down(features,
n_layers,
n_max_children,
n_scope_children,
max_scope_split,
merge_prob)
assert spn.is_valid()
print('Stats\n')
print(spn.stats())
spn.fit_em(train, valid, test,
hard=False,
n_epochs=10)
from spn.linked.learning import SpectralStructureLearner
from spn.linked.learning import CoClusteringStructureLearner
def test_spectral_structure_learner_sim():
data = numpy.array([[1, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 0, 1]], dtype='int8')
learner = SpectralStructureLearner(sigma=2.0)
sim_gauss_1 = learner.compute_similarity_matrix(data,
metric='gaussian')
sim_gauss_2 = learner.compute_similarity_matrix_pair(data,
metric=learner.gaussian_kernel)
print(sim_gauss_1)
print(sim_gauss_2)
sim_gtest_2 = learner.compute_similarity_matrix_pair(data,
metric=learner.g_test)
print(sim_gtest_2)
sim_gtest_1 = learner.compute_similarity_matrix(data,
metric='gtest')
print(sim_gtest_1)
def test_spectral_structure_learner_diag_sum():
# create a similarity matrix
W = numpy.array([[1.0, 0.2, 0.3],
[0.2, 1.0, 0.5],
[0.3, 0.5, 1.0]])
learner = SpectralStructureLearner()
D = learner.diag_sum(W)
print('Diagonal sum matrix:', D)
assert_almost_equal(D[0, 0], 1.5)
assert_almost_equal(D[1, 1], 1.7)
assert_almost_equal(D[2, 2], 1.8)
# must check for all other cells to be 0
def test_spectral_structure_learner_cut_val():
# create a similarity matrix
W = numpy.array([[1.0, 0.2, 0.3],
[0.2, 1.0, 0.5],
[0.3, 0.5, 1.0]])
learner = SpectralStructureLearner()
D = learner.diag_sum(W)
print('Diagonal sum matrix:', D)
# create a clustering [0,2][1]
f = numpy.array([1, -1, 1])
vol_f = learner.vol(W, f, 1)
vol_minus_f = learner.vol(W, f, -1)
print('vol_f', vol_f)
print('vol_m_f', vol_minus_f)
assert_almost_equal(D[0, 0] + D[2, 2], vol_f)
assert_almost_equal(D[1, 1], vol_minus_f)
f_clu = learner.f_clu(W, f)
print('f clu', f_clu)
cut_f = learner.cut_val_f(W, f_clu)
print('cut val:', cut_f)
cut_w = learner.cut_val_w(W, f)
print('cut val w:', cut_w)
ncut_f = learner.ncut_val(W, f_clu)
print('ncut val:', ncut_f)
ncut_hand = ((W[0, 1] + W[2, 1]) / vol_f +
(W[0, 1] + W[2, 1]) / vol_minus_f)
print('ncut by hand', ncut_hand)
ncut_s = learner.ncut(W, f)
print('ncut simple', ncut_s)
assert_almost_equal(ncut_hand, ncut_s)
assert_almost_equal(ncut_hand, ncut_f)
# assert_almost_equal(W[0, 1] + W[2, 1], cut_f)
clustering = [[0, 2], [1]]
f_s = learner.f_assignment_from_clusters(W, clustering)
print('f from clustering', f_s)
assert_array_almost_equal(f_clu, f_s)
def test_spectral_structure_learner_labels():
labels = [0, 1, 1, 0, 1, 2, 1, 0, 2, 2]
ids = [12, 11, 1, 0, 56, 107, 12, 9, 70, 8]
learner = SpectralStructureLearner()
clustering = learner.from_labels_to_clustering(labels, ids)
print(clustering)
assert clustering == [[12, 0, 9], [11, 1, 56, 12], [107, 70, 8]]
#
# found a very old bug:
# synth_data = numpy.random.binomial(100, 0.5, (200, 15))
synth_data = numpy.random.binomial(1, 0.5, (200, 15))
synth_feats = numpy.zeros(15, dtype='int8')
synth_feats.fill(2)
def test_compare_spectral_performance():
print('Loading datasets')
train, valid, test = dataset.load_train_val_test_csvs('nltcs')
learner = SpectralStructureLearner()
k = 5
ids = [i for i in range(train.shape[1])]
labels, clusters, valid = \
learner.spectral_clustering(train.T, ids, k,
affinity_metric='gtest',
pair=True)
def test_spectral_clustering():
print('Loading datasets')
train, valid, test = dataset.load_train_val_test_csvs('nltcs')
learner = SpectralStructureLearner(sigma=4.0)
k = 5
ids = [i for i in range(train.shape[0])]
labels, clusters, valid = learner.spectral_clustering(train, ids, k)
print('labels:{0}\nclusters:{1}'.format(labels, clusters))
def test_spectral_cluster_learner():
print('Loading datasets')
train, valid, test = dataset.load_train_val_test_csvs('nltcs')
print('features', synth_feats)
learner = SpectralStructureLearner()
k = 5
spn = learner.fit_structure(synth_data,
synth_feats,
k_row_clusters=k,
min_instances_slice=2,
pairwise=True)
print(spn.stats())
def test_greedy_split_features():
print('Loading datasets')
train, valid, test = dataset.load_train_val_test_csvs('nltcs')
learner = SpectralStructureLearner()
k = 2
ids = [i for i in range(train.shape[1])]
g_factor = 9
seed = 1337
rand_gen = numpy.random.RandomState(seed)
data_slice = train[:100, :]
# splitting on the features
clustering = learner.greedy_split_features(data_slice.T,
ids,
g_factor,
rand_gen)
print(clustering)
labels, clustering, valid = \
learner.spectral_clustering(data_slice.T,
ids,
k,
affinity_metric='gtest',
validity_check=True,
threshold=0.8,
rand_gen=rand_gen)
print(clustering)
# some constants for the next tests
clusters_test = [[1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1],
[2, 2, 2, 1, 1, 0, 2, 3, 2, 1, 3, 1, 2, 0, 3, 2],
[4, 5, 5, 3, 3, 0, 5, 6, 5, 3, 7, 2, 5, 1, 8, 4],
[6, 9, 9, 3, 4, 0, 9, 10, 8, 5, 11, 2, 8, 1, 12, 7],
[6, 10, 10, 3, 4, 0, 11, 12, 8, 5, 13, 2, 9, 1, 14, 7],
[6, 10, 11, 3, 4, 0, 12, 13, 8, 5, 14, 2, 9, 1, 15, 7]]
n_features = 16
feature_sizes = [i for i in range(n_features)]
def test_coc_read_hierarchy_from_file():
# specify test file
filename = 'spn/linked/tests/coc/co_cluster.col'
# create learner
learner = CoClusteringStructureLearner()
cluster_ass = learner.read_hierarchy_from_file(filename)
print(cluster_ass)
assert clusters_test == cluster_ass
def test_coc_build_linked_hierarchy():
# specify test file
filename = 'spn/linked/tests/coc/co_cluster.col'
# create learner
learner = CoClusteringStructureLearner()
cluster_ass = learner.read_hierarchy_from_file(filename)
# building the linked representation
linked_hier = learner.build_linked_hierarchy(cluster_ass)
print(linked_hier)
def test_coc_build_spn_from_co_clusters():
# specify test file
col_filename = 'spn/linked/tests/coc/cc_test.col'
row_filename = 'spn/linked/tests/coc/cc_test.row'
data_filename = 'spn/linked/tests/coc/data_test.csv'
# create learner
learner = CoClusteringStructureLearner()
# build from file
cluster_ass_col = learner.read_hierarchy_from_file(col_filename)
cluster_ass_row = learner.read_hierarchy_from_file(row_filename)
data = dataset.csv_2_numpy(data_filename, path='')
row_h = learner.build_linked_hierarchy(cluster_ass_row)
col_h = learner.build_linked_hierarchy(cluster_ass_col)
print(row_h)
print(col_h)
spn = learner.build_spn_from_co_clusters(row_h,
col_h,
data,
feature_sizes,
min_instances_slice=1,
max_depth=10)
print(spn)
print(spn.stats())
import cProfile
import re
if __name__ == '__main__':
# print('ndrangheta')
# cProfile.run('profiling()')
profiling()
| 15,315
| 30.841996
| 88
|
py
|
normalizing_flows
|
normalizing_flows-master/test.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from torch.utils.data import DataLoader, Dataset
import unittest
from unittest.mock import MagicMock
from maf import MADE, MADEMOG, MAF, MAFMOG, RealNVP, BatchNorm, LinearMaskedCoupling, train
from glow import Actnorm, Invertible1x1Conv, AffineCoupling, Squeeze, Split, FlowStep, FlowLevel, Glow, train_epoch
from data import fetch_dataloaders
args = MagicMock()
args.input_size = 1000
args.batch_size = 100
args.device = torch.device('cpu')
NORM_TOL = 1e-4 # tolerance for difference in vector norms
torch.manual_seed(1)
# --------------------
# Test invertibility and log dets of individual layers
# --------------------
def test_layer(l, input_dims, cond_label_size=None, norm_tol=NORM_TOL):
x = torch.randn(input_dims)
batch_size = input_dims[0]
labels = None
if cond_label_size is not None: # make one hot labels
labels = torch.eye(cond_label_size).repeat(batch_size // cond_label_size + 1, 1)[:batch_size]
u, logd = l(x) if labels is None else l(x, labels)
recon_x, inv_logd = l.inverse(u) if labels is None else l.inverse(u, labels)
d_data, d_logd = (recon_x - x).norm(), (logd + inv_logd).norm()
assert d_data < norm_tol, 'Data reconstruction fail - norm of difference = {}.'.format(d_data)
assert d_logd < norm_tol, 'Log determinant inversion fail. - norm of difference = {}'.format(d_logd)
class TestFlowLayers(unittest.TestCase):
def test_batch_norm(self):
l = BatchNorm(args.input_size)
l.train()
for i in range(3):
with self.subTest(train_loop_iter=i):
test_layer(l, input_dims=(args.batch_size, args.input_size))
l.eval()
for i in range(2):
with self.subTest(eval_loop_iter=i):
test_layer(l, input_dims=(args.batch_size, args.input_size))
def test_linear_coupling(self):
mask = torch.arange(args.input_size).float() % 2
# unconditional
test_layer(LinearMaskedCoupling(args.input_size, hidden_size=10, n_hidden=1, mask=mask), input_dims=(args.batch_size, args.input_size))
test_layer(LinearMaskedCoupling(args.input_size, hidden_size=10, n_hidden=2, mask=mask), input_dims=(args.batch_size, args.input_size))
# conditional
cond_label_size = 10
test_layer(LinearMaskedCoupling(args.input_size, hidden_size=10, n_hidden=1, mask=mask, cond_label_size=cond_label_size),
input_dims=(args.batch_size, args.input_size), cond_label_size=cond_label_size)
test_layer(LinearMaskedCoupling(args.input_size, hidden_size=10, n_hidden=2, mask=mask, cond_label_size=cond_label_size),
input_dims=(args.batch_size, args.input_size), cond_label_size=cond_label_size)
def test_made(self):
test_layer(MADE(args.input_size, hidden_size=10, n_hidden=3), input_dims=(args.batch_size, args.input_size))
def test_actnorm(self):
test_layer(Actnorm(param_dim=(1,3,1,1)), input_dims=(args.batch_size, 3, 50, 50))
def test_invertible1x1conv(self):
test_layer(Invertible1x1Conv(n_channels=24), input_dims=(args.batch_size, 24, 50, 50), norm_tol=1e-3)
test_layer(Invertible1x1Conv(n_channels=12, lu_factorize=True), input_dims=(args.batch_size, 12, 50, 50), norm_tol=1e-3)
def test_affinecoupling(self):
test_layer(AffineCoupling(n_channels=4, width=12), input_dims=(args.batch_size, 4, 50, 50), norm_tol=5e-4)
def test_squeeze(self):
net = Squeeze()
x = torch.rand(args.batch_size, 12, 20, 30)
recon_x = net.inverse(net(x))
y = net(net.inverse(x))
assert torch.allclose(x, recon_x), 'Data reconstruction failed.'
assert torch.allclose(x, y)
def test_split(self):
net = Split(n_channels=10)
x = torch.randn(args.batch_size, 10, 20, 30)
x1, z2, logd = net(x)
recon_x, inv_logd = net.inverse(x1, z2)
d_data, d_logd = (recon_x - x).norm(), (logd + inv_logd).norm()
assert d_data < 1e-4, 'Data reconstruction fail - norm of difference = {}.'.format(d_data)
assert d_logd < 1e-4, 'Log determinant inversion fail. - norm of difference = {}'.format(d_logd)
def test_flowstep(self):
test_layer(FlowStep(n_channels=4, width=12), input_dims=(args.batch_size, 4, 50, 50), norm_tol=1e-3)
def test_flowlevel(self):
net = FlowLevel(n_channels=3, width=12, depth=2)
x = torch.randn(args.batch_size, 3, 32, 32)
x1, z2, logd = net(x)
recon_x, inv_logd = net.inverse(x1, z2)
d_data, d_logd = (recon_x - x).norm(), (logd + inv_logd).norm()
assert d_data < 5e-4, 'Data reconstruction fail - norm of difference = {}.'.format(d_data)
assert d_logd < 5e-4, 'Log determinant inversion fail. - norm of difference = {}'.format(d_logd)
def test_glow(self):
net = Glow(width=12, depth=3, n_levels=3)
x = torch.randn(args.batch_size, 3, 32, 32)
zs, logd = net(x)
recon_x, inv_logd = net.inverse(zs)
y, _ = net.inverse(batch_size=args.batch_size)
d_data, d_data_y, d_logd = (recon_x - x).norm(), (x - y).norm(), (logd + inv_logd).norm()
assert d_data < 1e-3, 'Data reconstruction fail - norm of difference = {}.'.format(d_data)
# assert d_data_y < 1e-3, 'Data reconstruction (inv > base > inv) fail - norm of difference = {}.'.format(d_data_y)
assert d_logd < 1e-3, 'Log determinant inversion fail. - norm of difference = {}'.format(d_logd)
# --------------------
# Test MAF
# --------------------
# Test flow invertibility (KL=0) at initalization
@torch.no_grad()
def test_untrained_model(model, cond_label_size=None):
# 1. sample Gaussian data;
# 2. run model forward and reverse;
# 3. roconstruct data;
# 4. measure KL between Gaussian fitted to the data and the base distribution
n_samples = 1000
data = model.base_dist.sample((n_samples,))
labels = None
if cond_label_size is not None: # make one hot labels
labels = torch.eye(cond_label_size).repeat(n_samples // cond_label_size + 1, 1)[:n_samples]
u, logd = model(data, labels)
recon_data, _ = model.inverse(u, labels)
recon_dist = D.Normal(recon_data.mean(0), recon_data.var(0).sqrt())
kl = D.kl.kl_divergence(recon_dist, model.base_dist).sum(-1)
print('KL (q || p) = {:.4f}'.format(kl))
# Test flow can train to random numbers in N(0,1) ie KL(random numbers driving flow || base distribution) = 0
@torch.no_grad()
def test_trained_model(model, dl, cond_label_size=None):
# 1. sample toy data;
# 2. run model forward and generate random numbers driving the model;
# 3. measure KL between Gaussian fitted to random numbers driving the model and the base distribution
data, labels = next(iter(dl))
labels = None
if cond_label_size is not None: # make one hot labels
labels = torch.eye(cond_label_size).repeat(n_samples // cond_label_size + 1, 1)[:n_samples]
u, logd = model(data, labels)
u_dist = D.Normal(u.mean(0), u.std(0))
kl = D.kl.kl_divergence(u_dist, model.base_dist).sum()
print('KL (u || p) = {:.4f}'.format(kl))
class TestMAFUntrained(unittest.TestCase):
def setUp(self):
self.cond_label_size = 2
def test_made_1_hidden(self):
test_untrained_model(MADE(input_size=2, hidden_size=10, n_hidden=1, cond_label_size=None, activation='relu', input_order='sequential'))
def test_made_1_hidden_conditional(self):
test_untrained_model(MADE(input_size=2, hidden_size=10, n_hidden=1, cond_label_size=self.cond_label_size, activation='relu',
input_order='sequential'), self.cond_label_size)
def test_made_2_hidden(self):
test_untrained_model(MADE(input_size=2, hidden_size=10, n_hidden=2, cond_label_size=None, activation='relu', input_order='sequential'))
def test_made_2_hidden_conditional(self):
test_untrained_model(MADE(input_size=2, hidden_size=10, n_hidden=2, cond_label_size=self.cond_label_size, activation='relu',
input_order='sequential'), self.cond_label_size)
def test_made_200_inputs_random_mask(self):
test_untrained_model(MADE(input_size=200, hidden_size=10, n_hidden=2, cond_label_size=None, activation='relu', input_order='random'))
def test_maf_1_blocks_no_bn(self):
test_untrained_model(MAF(n_blocks=1, input_size=2, hidden_size=10, n_hidden=1, cond_label_size=None,
activation='relu', input_order='sequential', batch_norm=False))
def test_maf_1_blocks_bn(self):
test_untrained_model(MAF(n_blocks=1, input_size=2, hidden_size=10, n_hidden=1, cond_label_size=None,
activation='relu', input_order='sequential', batch_norm=True))
def test_maf_2_blocks(self):
test_untrained_model(MAF(n_blocks=2, input_size=2, hidden_size=10, n_hidden=1, cond_label_size=None,
activation='relu', input_order='sequential', batch_norm=True))
def test_maf_1_blocks_conditional(self):
test_untrained_model(MAF(n_blocks=1, input_size=2, hidden_size=10, n_hidden=1, cond_label_size=self.cond_label_size,
activation='relu', input_order='sequential', batch_norm=True), self.cond_label_size)
def test_maf_2_blocks_conditional(self):
test_untrained_model(MAF(n_blocks=2, input_size=2, hidden_size=10, n_hidden=1, cond_label_size=self.cond_label_size,
activation='relu', input_order='sequential', batch_norm=True), self.cond_label_size)
def test_realnvp_1_block_200_inputs(self):
test_untrained_model(RealNVP(n_blocks=1, input_size=200, hidden_size=10, n_hidden=2, cond_label_size=None))
def test_realnvp_2_block_200_inputs(self):
test_untrained_model(RealNVP(n_blocks=2, input_size=200, hidden_size=10, n_hidden=2, cond_label_size=None))
def test_realnvp_2_blocks_conditional(self):
test_untrained_model(RealNVP(n_blocks=2, input_size=200, hidden_size=10, n_hidden=2, cond_label_size=self.cond_label_size),
self.cond_label_size)
def test_mademog_1_comp(self):
test_untrained_model(MADEMOG(n_components=1, input_size=10, hidden_size=10, n_hidden=2, cond_label_size=None, activation='relu',
input_order='sequential'))
def test_mademog_10_comp(self):
test_untrained_model(MADEMOG(n_components=10, input_size=200, hidden_size=10, n_hidden=2, cond_label_size=None, activation='relu',
input_order='sequential'))
def test_mafmog_1_block_1_comp(self):
test_untrained_model(MAFMOG(n_blocks=1, n_components=1, input_size=2, hidden_size=10, n_hidden=1, cond_label_size=self.cond_label_size,
activation='relu', input_order='sequential', batch_norm=True), self.cond_label_size)
def test_mafmog_2_blocks_10_comp_conditional(self):
test_untrained_model(MAFMOG(n_blocks=2, n_components=10, input_size=2, hidden_size=10, n_hidden=1, cond_label_size=self.cond_label_size,
activation='relu', input_order='sequential', batch_norm=True), self.cond_label_size)
class TestMAFTrained(unittest.TestCase):
def setUp(self):
args = MagicMock()
args.cond_label_size = None
args.batch_size = 100
args.device = torch.device('cpu')
dl, _ = fetch_dataloaders('TOY', args.batch_size, args.device, flip_toy_var_order=False)
def _train(model, n_steps):
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-6)
print('Untrained: ')
test_trained_model(model, dl)
for _ in range(n_steps):
train(model, dl, optimizer, 0, args)
print('Trained: ')
test_trained_model(model, dl)
self._train = _train
def test_made(self):
model = MADE(input_size=2, hidden_size=100, n_hidden=1, cond_label_size=None, activation='relu', input_order='sequential')
self._train(model, 10)
def test_mademog(self):
model = MADEMOG(n_components=10, input_size=2, hidden_size=100, n_hidden=1, cond_label_size=None, activation='relu', input_order='sequential')
self._train(model, 5)
def test_maf_5(self):
model = MAF(n_blocks=5, input_size=2, hidden_size=100, n_hidden=1, cond_label_size=None,
activation='relu', input_order='sequential', batch_norm=True)
self._train(model, 1)
def test_mafmog_5_comp_1(self):
model = MAFMOG(n_blocks=5, n_components=1, input_size=2, hidden_size=100, n_hidden=1,
cond_label_size=None, activation='relu', input_order='sequential', batch_norm=True)
self._train(model, 5)
def test_mafmog_5_comp_10(self):
model = MAFMOG(n_blocks=5, n_components=10, input_size=2, hidden_size=100, n_hidden=1,
cond_label_size=None, activation='relu', input_order='sequential', batch_norm=True)
self._train(model, 10)
def test_train_realnvp_5(self):
model = RealNVP(n_blocks=5, input_size=2, hidden_size=100, n_hidden=1, cond_label_size=None, batch_norm=True)
self._train(model, 5)
# --------------------
# Test Glow
# --------------------
# Generate a dataset from a 2-dim Gaussian distribution and expand to `image size` of (3,32,32)
class ToyDistribution(D.Distribution):
def __init__(self, flip_var_order):
super().__init__()
self.flip_var_order = flip_var_order
self.p_x2 = D.Normal(0, 4)
self.p_x1 = lambda x2: D.Normal(0.25 * x2**2, 1)
def rsample(self, sample_shape=torch.Size()):
x2 = self.p_x2.sample(sample_shape)
x1 = self.p_x1(x2).sample()
if self.flip_var_order:
return torch.stack((x2, x1), dim=-1).expand(3,-1,-1)
else:
return torch.stack((x1, x2), dim=0).repeat(16,1).expand(3,-1,-1)
def log_prob(self, value):
if self.flip_var_order:
value = value.flip(1)
return self.p_x1(value[:,1]).log_prob(value[:,0]) + self.p_x2.log_prob(value[:,1])
class TOY(Dataset):
def __init__(self, dataset_size=2500, flip_var_order=False):
self.input_size = 32
self.label_size = 1
self.dataset_size = dataset_size
self.base_dist = ToyDistribution(flip_var_order)
def __len__(self):
return self.dataset_size
def __getitem__(self, i):
return self.base_dist.sample((32,)), torch.zeros(self.label_size)
class TestGlowUntrained(unittest.TestCase):
def setUp(self):
def test_kl(model):
n_samples = 1000
data = model.base_dist.sample((n_samples,3,32,32)).squeeze()
zs, logd = model(data)
recon_data, _ = model.inverse(zs)
recon_dist = D.Normal(recon_data.mean(0), recon_data.var(0).sqrt())
kl = D.kl.kl_divergence(recon_dist, model.base_dist).mean()
print('Model: depth {}, levels {}; Avg per pixel KL (q||p) = {:.4f}'.format(
len(model.flowstep), len(model.flowlevels), kl))
self.test_kl = test_kl
def test_glow_depth_1_levels_1(self):
# 1. sample data; 2. run model forward and reverse; 3. roconstruct data; 4. measure KL between Gaussian fitted to the data and the base distribution
self.test_kl(Glow(width=12, depth=1, n_levels=1))
def test_glow_depth_2_levels_2(self):
# 1. sample data; 2. run model forward and reverse; 3. roconstruct data; 4. measure KL between Gaussian fitted to the data and the base distribution
self.test_kl(Glow(width=12, depth=2, n_levels=2))
class TestGlowTrained(unittest.TestCase):
def setUp(self):
args = MagicMock()
args.device = torch.device('cpu')
dl = DataLoader(TOY(), batch_size=100)
@torch.no_grad()
def _test_trained_model(model, dl, cond_label_size=None):
data, _ = next(iter(dl))
zs, logd = model(data)
zs = torch.cat([z.flatten(1) for z in zs], dim=1) # flatten the z's and concat
zs_dist = D.Normal(zs.mean(0), zs.std(0))
kl = D.kl.kl_divergence(zs_dist, model.base_dist).mean()
print('Mean per pixel KL (zs || N(0,1)) = {:.4f}'.format(kl))
print('Mean data bits per pixel: {:.4f}'.format(-model.log_prob(data, bits_per_pixel=True).mean(0)))
def _train(model, n_steps):
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-6)
print('Model: depth {}, levels {}. Untrained:'.format(len(model.flowstep), len(model.flowlevels)))
_test_trained_model(model, dl)
for _ in range(n_steps):
train_epoch(model, dl, optimizer, 0, args)
print('Trained: ')
_test_trained_model(model, dl)
self._train = _train
def test_glow_1_1(self):
model = Glow(width=12, depth=1, n_levels=1)
self._train(model, 3)
def test_glow_3_3(self):
model = Glow(width=24, depth=3, n_levels=3)
self._train(model, 3)
if __name__ == '__main__':
unittest.main()
| 17,209
| 45.016043
| 156
|
py
|
normalizing_flows
|
normalizing_flows-master/data.py
|
from functools import partial
import numpy as np
import torch
import torchvision.transforms as T
from torch.utils.data import DataLoader, TensorDataset
import datasets
# --------------------
# Helper functions
# --------------------
def logit(x, eps=1e-5):
x.clamp_(eps, 1 - eps)
return x.log() - (1 - x).log()
def one_hot(x, label_size):
out = torch.zeros(len(x), label_size).to(x.device)
out[torch.arange(len(x)), x] = 1
return out
def load_dataset(name):
exec('from datasets.{} import {}'.format(name.lower(), name))
return locals()[name]
# --------------------
# Dataloaders
# --------------------
def fetch_dataloaders(dataset_name, batch_size, device, flip_toy_var_order=False, toy_train_size=25000, toy_test_size=5000):
# grab datasets
if dataset_name in ['GAS', 'POWER', 'HEPMASS', 'MINIBOONE', 'BSDS300']: # use the constructors by MAF authors
dataset = load_dataset(dataset_name)()
# join train and val data again
train_data = np.concatenate((dataset.trn.x, dataset.val.x), axis=0)
# construct datasets
train_dataset = TensorDataset(torch.from_numpy(train_data.astype(np.float32)))
test_dataset = TensorDataset(torch.from_numpy(dataset.tst.x.astype(np.float32)))
input_dims = dataset.n_dims
label_size = None
lam = None
elif dataset_name in ['MNIST']:
dataset = load_dataset(dataset_name)()
# join train and val data again
train_x = np.concatenate((dataset.trn.x, dataset.val.x), axis=0).astype(np.float32)
train_y = np.concatenate((dataset.trn.y, dataset.val.y), axis=0).astype(np.float32)
# construct datasets
train_dataset = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))
test_dataset = TensorDataset(torch.from_numpy(dataset.tst.x.astype(np.float32)),
torch.from_numpy(dataset.tst.y.astype(np.float32)))
input_dims = dataset.n_dims
label_size = 10
lam = dataset.alpha
elif dataset_name in ['TOY', 'MOONS']: # use own constructors
train_dataset = load_dataset(dataset_name)(toy_train_size, flip_toy_var_order)
test_dataset = load_dataset(dataset_name)(toy_test_size, flip_toy_var_order)
input_dims = train_dataset.input_size
label_size = train_dataset.label_size
lam = None
# imaging dataset pulled from torchvision
elif dataset_name in ['CIFAR10']:
label_size = 10
# MAF logit trainform parameter (cf. MAF paper 4.3
lam = 1e-6 if dataset_name == 'mnist' else 5e-2
# MAF paper converts image data to logit space via transform described in section 4.3
image_transforms = T.Compose([T.ToTensor(),
T.Lambda(lambda x: x + torch.rand(*x.shape) / 256.), # dequantize (cf MAF paper)
T.Lambda(lambda x: logit(lam + (1 - 2 * lam) * x))]) # to logit space (cf MAF paper)
target_transforms = T.Lambda(lambda x: partial(one_hot, label_size=label_size)(x))
train_dataset = load_dataset(dataset_name)(root=datasets.root, train=True, transform=image_transforms, target_transform=target_transforms)
test_dataset = load_dataset(dataset_name)(root=datasets.root, train=True, transform=image_transforms, target_transform=target_transforms)
input_dims = train_dataset[0][0].shape
else:
raise ValueError('Unrecognized dataset.')
# keep input dims, input size and label size
train_dataset.input_dims = input_dims
train_dataset.input_size = int(np.prod(input_dims))
train_dataset.label_size = label_size
train_dataset.lam = lam
test_dataset.input_dims = input_dims
test_dataset.input_size = int(np.prod(input_dims))
test_dataset.label_size = label_size
test_dataset.lam = lam
# construct dataloaders
kwargs = {'num_workers': 1, 'pin_memory': True} if device.type is 'cuda' else {}
train_loader = DataLoader(train_dataset, batch_size, shuffle=True, **kwargs)
test_loader = DataLoader(test_dataset, batch_size, shuffle=False, **kwargs)
return train_loader, test_loader
| 4,218
| 36.336283
| 146
|
py
|
normalizing_flows
|
normalizing_flows-master/glow.py
|
"""
Glow: Generative Flow with Invertible 1x1 Convolutions
arXiv:1807.03039v2
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
import torchvision.transforms as T
from torchvision.utils import save_image, make_grid
from torch.utils.data import DataLoader
from torch.utils.checkpoint import checkpoint
from torchvision.datasets import MNIST
from datasets.celeba import CelebA
import numpy as np
from tensorboardX import SummaryWriter
import os
import time
import math
import argparse
import pprint
parser = argparse.ArgumentParser()
# action
parser.add_argument('--train', action='store_true', help='Train a flow.')
parser.add_argument('--evaluate', action='store_true', help='Evaluate a flow.')
parser.add_argument('--generate', action='store_true', help='Generate samples from a model.')
parser.add_argument('--visualize', action='store_true', help='Visualize manipulated attribures.')
parser.add_argument('--restore_file', type=str, help='Path to model to restore.')
parser.add_argument('--seed', type=int, help='Random seed to use.')
# paths and reporting
parser.add_argument('--data_dir', default='/mnt/disks/data/', help='Location of datasets.')
parser.add_argument('--output_dir', default='./results/{}'.format(os.path.splitext(__file__)[0]))
parser.add_argument('--results_file', default='results.txt', help='Filename where to store settings and test results.')
parser.add_argument('--log_interval', type=int, default=2, help='How often to show loss statistics and save samples.')
parser.add_argument('--save_interval', type=int, default=50, help='How often to save during training.')
parser.add_argument('--eval_interval', type=int, default=1, help='Number of epochs to eval model and save model checkpoint.')
# data
parser.add_argument('--dataset', type=str, help='Which dataset to use.')
# model parameters
parser.add_argument('--depth', type=int, default=32, help='Depth of the network (cf Glow figure 2).')
parser.add_argument('--n_levels', type=int, default=3, help='Number of levels of of the network (cf Glow figure 2).')
parser.add_argument('--width', type=int, default=512, help='Dimension of the hidden layers.')
parser.add_argument('--z_std', type=float, help='Pass specific standard devition during generation/sampling.')
# training params
parser.add_argument('--batch_size', type=int, default=16, help='Training batch size.')
parser.add_argument('--batch_size_init', type=int, default=256, help='Batch size for the data dependent initialization.')
parser.add_argument('--n_epochs', type=int, default=10, help='Number of epochs to train.')
parser.add_argument('--n_epochs_warmup', type=int, default=2, help='Number of warmup epochs for linear learning rate annealing.')
parser.add_argument('--start_epoch', default=0, help='Starting epoch (for logging; to be overwritten when restoring file.')
parser.add_argument('--lr', type=float, default=1e-3, help='Learning rate.')
parser.add_argument('--mini_data_size', type=int, default=None, help='Train only on this number of datapoints.')
parser.add_argument('--grad_norm_clip', default=50, type=float, help='Clip gradients during training.')
parser.add_argument('--checkpoint_grads', action='store_true', default=False, help='Whether to use gradient checkpointing in forward pass.')
parser.add_argument('--n_bits', default=5, type=int, help='Number of bits for input images.')
# distributed training params
parser.add_argument('--distributed', action='store_true', default=False, help='Whether to use DistributedDataParallels on multiple machines and GPUs.')
parser.add_argument('--world_size', type=int, default=1, help='Number of nodes for distributed training.')
parser.add_argument('--local_rank', type=int, help='When provided, run model on this cuda device. When None, used by torch.distributed.launch utility to manage multi-GPU training.')
# visualize
parser.add_argument('--vis_img', type=str, help='Path to image file to manipulate attributes and visualize.')
parser.add_argument('--vis_attrs', nargs='+', type=int, help='Which attribute to manipulate.')
parser.add_argument('--vis_alphas', nargs='+', type=float, help='Step size on the manipulation direction.')
best_eval_logprob = float('-inf')
# --------------------
# Data
# --------------------
def fetch_dataloader(args, train=True, data_dependent_init=False):
args.input_dims = {'mnist': (3,32,32), 'celeba': (3,64,64)}[args.dataset]
transforms = {'mnist': T.Compose([T.Pad(2), # image to 32x32 same as CIFAR
T.RandomAffine(degrees=0, translate=(0.1, 0.1)), # random shifts to fill the padded pixels
T.ToTensor(),
T.Lambda(lambda t: t + torch.rand_like(t)/2**8), # dequantize
T.Lambda(lambda t: t.expand(3,-1,-1))]), # expand to 3 channels
'celeba': T.Compose([T.CenterCrop(148), # RealNVP preprocessing
T.Resize(64),
T.Lambda(lambda im: np.array(im, dtype=np.float32)), # to numpy
T.Lambda(lambda x: np.floor(x / 2**(8 - args.n_bits)) / 2**args.n_bits), # lower bits
T.ToTensor(), # note: if input to this transform is uint8, it divides by 255 and returns float
T.Lambda(lambda t: t + torch.rand_like(t) / 2**args.n_bits)]) # dequantize
}[args.dataset]
dataset = {'mnist': MNIST, 'celeba': CelebA}[args.dataset]
# load the specific dataset
dataset = dataset(root=args.data_dir, train=train, transform=transforms)
if args.mini_data_size:
dataset.data = dataset.data[:args.mini_data_size]
# load sampler and dataloader
if args.distributed and train is True and not data_dependent_init: # distributed training; but exclude initialization
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
sampler = None
batch_size = args.batch_size_init if data_dependent_init else args.batch_size # if data dependent init use init batch size
kwargs = {'num_workers': 1, 'pin_memory': True} if args.device.type is 'cuda' else {}
return DataLoader(dataset, batch_size=batch_size, shuffle=(sampler is None), drop_last=True, sampler=sampler, **kwargs)
# --------------------
# Model component layers
# --------------------
class Actnorm(nn.Module):
""" Actnorm layer; cf Glow section 3.1 """
def __init__(self, param_dim=(1,3,1,1)):
super().__init__()
self.scale = nn.Parameter(torch.ones(param_dim))
self.bias = nn.Parameter(torch.zeros(param_dim))
self.register_buffer('initialized', torch.tensor(0).byte())
def forward(self, x):
if not self.initialized:
# per channel mean and variance where x.shape = (B, C, H, W)
self.bias.squeeze().data.copy_(x.transpose(0,1).flatten(1).mean(1)).view_as(self.scale)
self.scale.squeeze().data.copy_(x.transpose(0,1).flatten(1).std(1, False) + 1e-6).view_as(self.bias)
self.initialized += 1
z = (x - self.bias) / self.scale
logdet = - self.scale.abs().log().sum() * x.shape[2] * x.shape[3]
return z, logdet
def inverse(self, z):
return z * self.scale + self.bias, self.scale.abs().log().sum() * z.shape[2] * z.shape[3]
class Invertible1x1Conv(nn.Module):
""" Invertible 1x1 convolution layer; cf Glow section 3.2 """
def __init__(self, n_channels=3, lu_factorize=False):
super().__init__()
self.lu_factorize = lu_factorize
# initiaize a 1x1 convolution weight matrix
w = torch.randn(n_channels, n_channels)
w = torch.qr(w)[0] # note: nn.init.orthogonal_ returns orth matrices with dets +/- 1 which complicates the inverse call below
if lu_factorize:
# compute LU factorization
p, l, u = torch.btriunpack(*w.unsqueeze(0).btrifact())
# initialize model parameters
self.p, self.l, self.u = nn.Parameter(p.squeeze()), nn.Parameter(l.squeeze()), nn.Parameter(u.squeeze())
s = self.u.diag()
self.log_s = nn.Parameter(s.abs().log())
self.register_buffer('sign_s', s.sign()) # note: not optimizing the sign; det W remains the same sign
self.register_buffer('l_mask', torch.tril(torch.ones_like(self.l), -1)) # store mask to compute LU in forward/inverse pass
else:
self.w = nn.Parameter(w)
def forward(self, x):
B,C,H,W = x.shape
if self.lu_factorize:
l = self.l * self.l_mask + torch.eye(C).to(self.l.device)
u = self.u * self.l_mask.t() + torch.diag(self.sign_s * self.log_s.exp())
self.w = self.p @ l @ u
logdet = self.log_s.sum() * H * W
else:
logdet = torch.slogdet(self.w)[-1] * H * W
return F.conv2d(x, self.w.view(C,C,1,1)), logdet
def inverse(self, z):
B,C,H,W = z.shape
if self.lu_factorize:
l = torch.inverse(self.l * self.l_mask + torch.eye(C).to(self.l.device))
u = torch.inverse(self.u * self.l_mask.t() + torch.diag(self.sign_s * self.log_s.exp()))
w_inv = u @ l @ self.p.inverse()
logdet = - self.log_s.sum() * H * W
else:
w_inv = self.w.inverse()
logdet = - torch.slogdet(self.w)[-1] * H * W
return F.conv2d(z, w_inv.view(C,C,1,1)), logdet
class AffineCoupling(nn.Module):
""" Affine coupling layer; cf Glow section 3.3; RealNVP figure 2 """
def __init__(self, n_channels, width):
super().__init__()
# network layers;
# per realnvp, network splits input, operates on half of it, and returns shift and scale of dim = half the input channels
self.conv1 = nn.Conv2d(n_channels//2, width, kernel_size=3, padding=1, bias=False) # input is split along channel dim
self.actnorm1 = Actnorm(param_dim=(1, width, 1, 1))
self.conv2 = nn.Conv2d(width, width, kernel_size=1, padding=1, bias=False)
self.actnorm2 = Actnorm(param_dim=(1, width, 1, 1))
self.conv3 = nn.Conv2d(width, n_channels, kernel_size=3) # output is split into scale and shift components
self.log_scale_factor = nn.Parameter(torch.zeros(n_channels,1,1)) # learned scale (cf RealNVP sec 4.1 / Glow official code
# initialize last convolution with zeros, such that each affine coupling layer performs an identity function
self.conv3.weight.data.zero_()
self.conv3.bias.data.zero_()
def forward(self, x):
x_a, x_b = x.chunk(2, 1) # split along channel dim
h = F.relu(self.actnorm1(self.conv1(x_b))[0])
h = F.relu(self.actnorm2(self.conv2(h))[0])
h = self.conv3(h) * self.log_scale_factor.exp()
t = h[:,0::2,:,:] # shift; take even channels
s = h[:,1::2,:,:] # scale; take odd channels
s = torch.sigmoid(s + 2.) # at initalization, s is 0 and sigmoid(2) is near identity
z_a = s * x_a + t
z_b = x_b
z = torch.cat([z_a, z_b], dim=1) # concat along channel dim
logdet = s.log().sum([1, 2, 3])
return z, logdet
def inverse(self, z):
z_a, z_b = z.chunk(2, 1) # split along channel dim
h = F.relu(self.actnorm1(self.conv1(z_b))[0])
h = F.relu(self.actnorm2(self.conv2(h))[0])
h = self.conv3(h) * self.log_scale_factor.exp()
t = h[:,0::2,:,:] # shift; take even channels
s = h[:,1::2,:,:] # scale; take odd channels
s = torch.sigmoid(s + 2.)
x_a = (z_a - t) / s
x_b = z_b
x = torch.cat([x_a, x_b], dim=1) # concat along channel dim
logdet = - s.log().sum([1, 2, 3])
return x, logdet
class Squeeze(nn.Module):
""" RealNVP squeezing operation layer (cf RealNVP section 3.6; Glow figure 2b):
For each channel, it divides the image into subsquares of shape 2 × 2 × c, then reshapes them into subsquares of
shape 1 × 1 × 4c. The squeezing operation transforms an s × s × c tensor into an s × s × 4c tensor """
def __init__(self):
super().__init__()
def forward(self, x):
B,C,H,W = x.shape
x = x.reshape(B, C, H//2, 2, W//2, 2) # factor spatial dim
x = x.permute(0, 1, 3, 5, 2, 4) # transpose to (B, C, 2, 2, H//2, W//2)
x = x.reshape(B, 4*C, H//2, W//2) # aggregate spatial dim factors into channels
return x
def inverse(self, x):
B,C,H,W = x.shape
x = x.reshape(B, C//4, 2, 2, H, W) # factor channel dim
x = x.permute(0, 1, 4, 2, 5, 3) # transpose to (B, C//4, H, 2, W, 2)
x = x.reshape(B, C//4, 2*H, 2*W) # aggregate channel dim factors into spatial dims
return x
class Split(nn.Module):
""" Split layer; cf Glow figure 2 / RealNVP figure 4b
Based on RealNVP multi-scale architecture: splits an input in half along the channel dim; half the vars are
directly modeled as Gaussians while the other half undergo further transformations (cf RealNVP figure 4b).
"""
def __init__(self, n_channels):
super().__init__()
self.gaussianize = Gaussianize(n_channels//2)
def forward(self, x):
x1, x2 = x.chunk(2, dim=1) # split input along channel dim
z2, logdet = self.gaussianize(x1, x2)
return x1, z2, logdet
def inverse(self, x1, z2):
x2, logdet = self.gaussianize.inverse(x1, z2)
x = torch.cat([x1, x2], dim=1) # cat along channel dim
return x, logdet
class Gaussianize(nn.Module):
""" Gaussianization per ReanNVP sec 3.6 / fig 4b -- at each step half the variables are directly modeled as Gaussians.
Model as Gaussians:
x2 = z2 * exp(logs) + mu, so x2 ~ N(mu, exp(logs)^2) where mu, logs = f(x1)
then to recover the random numbers z driving the model:
z2 = (x2 - mu) * exp(-logs)
Here f(x1) is a conv layer initialized to identity.
"""
def __init__(self, n_channels):
super().__init__()
self.net = nn.Conv2d(n_channels, 2*n_channels, kernel_size=3, padding=1) # computes the parameters of Gaussian
self.log_scale_factor = nn.Parameter(torch.zeros(2*n_channels,1,1)) # learned scale (cf RealNVP sec 4.1 / Glow official code
# initialize to identity
self.net.weight.data.zero_()
self.net.bias.data.zero_()
def forward(self, x1, x2):
h = self.net(x1) * self.log_scale_factor.exp() # use x1 to model x2 as Gaussians; learnable scale
m, logs = h[:,0::2,:,:], h[:,1::2,:,:] # split along channel dims
z2 = (x2 - m) * torch.exp(-logs) # center and scale; log prob is computed at the model forward
logdet = - logs.sum([1,2,3])
return z2, logdet
def inverse(self, x1, z2):
h = self.net(x1) * self.log_scale_factor.exp()
m, logs = h[:,0::2,:,:], h[:,1::2,:,:]
x2 = m + z2 * torch.exp(logs)
logdet = logs.sum([1,2,3])
return x2, logdet
class Preprocess(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
logdet = - math.log(256) * x[0].numel() # processing each image dim from [0, 255] to [0,1]; per RealNVP sec 4.1 taken into account
return x - 0.5, logdet # center x at 0
def inverse(self, x):
logdet = math.log(256) * x[0].numel()
return x + 0.5, logdet
# --------------------
# Container layers
# --------------------
class FlowSequential(nn.Sequential):
""" Container for layers of a normalizing flow """
def __init__(self, *args, **kwargs):
self.checkpoint_grads = kwargs.pop('checkpoint_grads', None)
super().__init__(*args, **kwargs)
def forward(self, x):
sum_logdets = 0.
for module in self:
x, logdet = module(x) if not self.checkpoint_grads else checkpoint(module, x)
sum_logdets = sum_logdets + logdet
return x, sum_logdets
def inverse(self, z):
sum_logdets = 0.
for module in reversed(self):
z, logdet = module.inverse(z)
sum_logdets = sum_logdets + logdet
return z, sum_logdets
class FlowStep(FlowSequential):
""" One step of Glow flow (Actnorm -> Invertible 1x1 conv -> Affine coupling); cf Glow Figure 2a """
def __init__(self, n_channels, width, lu_factorize=False):
super().__init__(Actnorm(param_dim=(1,n_channels,1,1)),
Invertible1x1Conv(n_channels, lu_factorize),
AffineCoupling(n_channels, width))
class FlowLevel(nn.Module):
""" One depth level of Glow flow (Squeeze -> FlowStep x K -> Split); cf Glow figure 2b """
def __init__(self, n_channels, width, depth, checkpoint_grads=False, lu_factorize=False):
super().__init__()
# network layers
self.squeeze = Squeeze()
self.flowsteps = FlowSequential(*[FlowStep(4*n_channels, width, lu_factorize) for _ in range(depth)], checkpoint_grads=checkpoint_grads)
self.split = Split(4*n_channels)
def forward(self, x):
x = self.squeeze(x)
x, logdet_flowsteps = self.flowsteps(x)
x1, z2, logdet_split = self.split(x)
logdet = logdet_flowsteps + logdet_split
return x1, z2, logdet
def inverse(self, x1, z2):
x, logdet_split = self.split.inverse(x1, z2)
x, logdet_flowsteps = self.flowsteps.inverse(x)
x = self.squeeze.inverse(x)
logdet = logdet_flowsteps + logdet_split
return x, logdet
# --------------------
# Model
# --------------------
class Glow(nn.Module):
""" Glow multi-scale architecture with depth of flow K and number of levels L; cf Glow figure 2; section 3"""
def __init__(self, width, depth, n_levels, input_dims=(3,32,32), checkpoint_grads=False, lu_factorize=False):
super().__init__()
# calculate output dims
in_channels, H, W = input_dims
out_channels = int(in_channels * 4**(n_levels+1) / 2**n_levels) # each Squeeze results in 4x in_channels (cf RealNVP section 3.6); each Split in 1/2x in_channels
out_HW = int(H / 2**(n_levels+1)) # each Squeeze is 1/2x HW dim (cf RealNVP section 3.6)
self.output_dims = out_channels, out_HW, out_HW
# preprocess images
self.preprocess = Preprocess()
# network layers cf Glow figure 2b: (Squeeze -> FlowStep x depth -> Split) x n_levels -> Squeeze -> FlowStep x depth
self.flowlevels = nn.ModuleList([FlowLevel(in_channels * 2**i, width, depth, checkpoint_grads, lu_factorize) for i in range(n_levels)])
self.squeeze = Squeeze()
self.flowstep = FlowSequential(*[FlowStep(out_channels, width, lu_factorize) for _ in range(depth)], checkpoint_grads=checkpoint_grads)
# gaussianize the final z output; initialize to identity
self.gaussianize = Gaussianize(out_channels)
# base distribution of the flow
self.register_buffer('base_dist_mean', torch.zeros(1))
self.register_buffer('base_dist_var', torch.ones(1))
def forward(self, x):
x, sum_logdets = self.preprocess(x)
# pass through flow
zs = []
for m in self.flowlevels:
x, z, logdet = m(x)
sum_logdets = sum_logdets + logdet
zs.append(z)
x = self.squeeze(x)
z, logdet = self.flowstep(x)
sum_logdets = sum_logdets + logdet
# gaussianize the final z
z, logdet = self.gaussianize(torch.zeros_like(z), z)
sum_logdets = sum_logdets + logdet
zs.append(z)
return zs, sum_logdets
def inverse(self, zs=None, batch_size=None, z_std=1.):
if zs is None: # if no random numbers are passed, generate new from the base distribution
assert batch_size is not None, 'Must either specify batch_size or pass a batch of z random numbers.'
zs = [z_std * self.base_dist.sample((batch_size, *self.output_dims)).squeeze()]
# pass through inverse flow
z, sum_logdets = self.gaussianize.inverse(torch.zeros_like(zs[-1]), zs[-1])
x, logdet = self.flowstep.inverse(z)
sum_logdets = sum_logdets + logdet
x = self.squeeze.inverse(x)
for i, m in enumerate(reversed(self.flowlevels)):
z = z_std * (self.base_dist.sample(x.shape).squeeze() if len(zs)==1 else zs[-i-2]) # if no z's are passed, generate new random numbers from the base dist
x, logdet = m.inverse(x, z)
sum_logdets = sum_logdets + logdet
# postprocess
x, logdet = self.preprocess.inverse(x)
sum_logdets = sum_logdets + logdet
return x, sum_logdets
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def log_prob(self, x, bits_per_pixel=False):
zs, logdet = self.forward(x)
log_prob = sum(self.base_dist.log_prob(z).sum([1,2,3]) for z in zs) + logdet
if bits_per_pixel:
log_prob /= (math.log(2) * x[0].numel())
return log_prob
# --------------------
# Train and evaluate
# --------------------
@torch.no_grad()
def data_dependent_init(model, args):
# set up an iterator with batch size = batch_size_init and run through model
dataloader = fetch_dataloader(args, train=True, data_dependent_init=True)
model(next(iter(dataloader))[0].requires_grad_(True if args.checkpoint_grads else False).to(args.device))
del dataloader
return True
def train_epoch(model, dataloader, optimizer, writer, epoch, args):
model.train()
tic = time.time()
for i, (x,y) in enumerate(dataloader):
args.step += args.world_size
# warmup learning rate
if epoch <= args.n_epochs_warmup:
optimizer.param_groups[0]['lr'] = args.lr * min(1, args.step / (len(dataloader) * args.world_size * args.n_epochs_warmup))
x = x.requires_grad_(True if args.checkpoint_grads else False).to(args.device) # requires_grad needed for checkpointing
loss = - model.log_prob(x, bits_per_pixel=True).mean(0)
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_norm_clip)
optimizer.step()
# report stats
if i % args.log_interval == 0:
# compute KL divergence between base and each of the z's that the model produces
with torch.no_grad():
zs, _ = model(x)
kls = [D.kl.kl_divergence(D.Normal(z.mean(), z.std()), model.base_dist) for z in zs]
# write stats
if args.on_main_process:
et = time.time() - tic # elapsed time
tt = len(dataloader) * et / (i+1) # total time per epoch
print('Epoch: [{}/{}][{}/{}]\tStep: {}\tTime: elapsed {:.0f}m{:02.0f}s / total {:.0f}m{:02.0f}s\tLoss {:.4f}\t'.format(
epoch, args.start_epoch + args.n_epochs, i+1, len(dataloader), args.step, et//60, et%60, tt//60, tt%60, loss.item()))
# update writer
for j, kl in enumerate(kls):
writer.add_scalar('kl_level_{}'.format(j), kl.item(), args.step)
writer.add_scalar('train_bits_x', loss.item(), args.step)
# save and generate
if i % args.save_interval == 0:
# generate samples
samples = generate(model, n_samples=4, z_stds=[0., 0.25, 0.7, 1.0])
images = make_grid(samples.cpu(), nrow=4, pad_value=1)
# write stats and save checkpoints
if args.on_main_process:
save_image(images, os.path.join(args.output_dir, 'generated_sample_{}.png'.format(args.step)))
# save training checkpoint
torch.save({'epoch': epoch,
'global_step': args.step,
'state_dict': model.state_dict()},
os.path.join(args.output_dir, 'checkpoint.pt'))
torch.save(optimizer.state_dict(), os.path.join(args.output_dir, 'optim_checkpoint.pt'))
@torch.no_grad()
def evaluate(model, dataloader, args):
model.eval()
print('Evaluating ...', end='\r')
logprobs = []
for x,y in dataloader:
x = x.to(args.device)
logprobs.append(model.log_prob(x, bits_per_pixel=True))
logprobs = torch.cat(logprobs, dim=0).to(args.device)
logprob_mean, logprob_std = logprobs.mean(0), 2 * logprobs.std(0) / math.sqrt(len(dataloader.dataset))
return logprob_mean, logprob_std
@torch.no_grad()
def generate(model, n_samples, z_stds):
model.eval()
print('Generating ...', end='\r')
samples = []
for z_std in z_stds:
sample, _ = model.inverse(batch_size=n_samples, z_std=z_std)
log_probs = model.log_prob(sample, bits_per_pixel=True)
samples.append(sample[log_probs.argsort().flip(0)]) # sort by log_prob; flip high (left) to low (right)
return torch.cat(samples,0)
def train_and_evaluate(model, train_dataloader, test_dataloader, optimizer, writer, args):
global best_eval_logprob
for epoch in range(args.start_epoch, args.start_epoch + args.n_epochs):
if args.distributed:
train_dataloader.sampler.set_epoch(epoch)
train_epoch(model, train_dataloader, optimizer, writer, epoch, args)
# evaluate
if False:#epoch % args.eval_interval == 0:
eval_logprob_mean, eval_logprob_std = evaluate(model, test_dataloader, args)
print('Evaluate at epoch {}: bits_x = {:.3f} +/- {:.3f}'.format(epoch, eval_logprob_mean, eval_logprob_std))
# save best state
if args.on_main_process and eval_logprob_mean > best_eval_logprob:
best_eval_logprob = eval_logprob_mean
torch.save({'epoch': epoch,
'global_step': args.step,
'state_dict': model.state_dict()},
os.path.join(args.output_dir, 'best_model_checkpoint.pt'))
# --------------------
# Visualizations
# --------------------
def encode_dataset(model, dataloader):
model.eval()
zs = []
attrs = []
for i, (x,y) in enumerate(dataloader):
print('Encoding [{}/{}]'.format(i+1, len(dataloader)), end='\r')
x = x.to(args.device)
zs_i, _ = model(x)
zs.append(torch.cat([z.flatten(1) for z in zs_i], dim=1))
attrs.append(y)
zs = torch.cat(zs, dim=0)
attrs = torch.cat(attrs, dim=0)
print('Encoding completed.')
return zs, attrs
def compute_dz(zs, attrs, idx):
""" for a given attribute idx, compute the mean for all encoded z's corresponding to the positive and negative attribute """
z_pos = [zs[i] for i in range(len(zs)) if attrs[i][idx] == +1]
z_neg = [zs[i] for i in range(len(zs)) if attrs[i][idx] == -1]
# dz = z_pos - z_neg; where z_pos is mean of all encoded datapoints where attr is present;
return torch.stack(z_pos).mean(0) - torch.stack(z_neg).mean(0) # out tensor of shape (flattened zs dim,)
def get_manipulators(zs, attrs):
""" compute dz (= z_pos - z_neg) for each attribute """
print('Extracting manipulators...', end=' ')
dzs = 1.6 * torch.stack([compute_dz(zs, attrs, i) for i in range(attrs.shape[1])], dim=0) # compute dz for each attribute official code multiplies by 1.6 scalar here
print('Completed.')
return dzs # out (n_attributes, flattened zs dim)
def manipulate(model, z, dz, z_std, alpha):
# 1. record incoming shapes
z_dims = [z_.squeeze().shape for z_ in z]
z_numels = [z_.numel() for z_ in z]
# 2. flatten z into a vector and manipulate by alpha in the direction of dz
z = torch.cat([z_.flatten(1) for z_ in z], dim=1).to(dz.device)
z = z + dz * torch.tensor(alpha).float().view(-1,1).to(dz.device) # out (n_alphas, flattened zs dim)
# 3. reshape back to z shapes from each level of the model
zs = [z_.view((len(alpha), *dim)) for z_, dim in zip(z.split(z_numels, dim=1), z_dims)]
# 4. decode
return model.inverse(zs, z_std=z_std)[0]
def load_manipulators(model, args):
# construct dataloader with limited number of images
args.mini_data_size = 30000
# load z manipulators for each attribute
if os.path.exists(os.path.join(args.output_dir, 'z_manipulate.pt')):
z_manipulate = torch.load(os.path.join(args.output_dir, 'z_manipulate.pt'), map_location=args.device)
else:
# encode dataset, compute manipulators, store zs, attributes, and dzs
dataloader = fetch_dataloader(args, train=True)
zs, attrs = encode_dataset(model, dataloader)
z_manipulate = get_manipulators(zs, attrs)
torch.save(zs, os.path.join(args.output_dir, 'zs.pt'))
torch.save(attrs, os.path.join(args.output_dir, 'attrs.pt'))
torch.save(z_manipulate, os.path.join(args.output_dir, 'z_manipulate.pt'))
return z_manipulate
@torch.no_grad()
def visualize(model, args, attrs=None, alphas=None, img_path=None, n_examples=1):
""" manipulate an input image along a given attribute """
dataset = fetch_dataloader(args, train=False).dataset # pull the dataset to access transforms and attrs
# if no attrs passed, manipulate all of them
if not attrs:
attrs = list(range(len(dataset.attr_names)))
# if image is passed, manipulate only the image
if img_path:
from PIL import Image
img = Image.open(img_path)
x = dataset.transform(img) # transform image to tensor and encode
else: # take first n_examples from the dataset
x, _ = dataset[0]
z, _ = model(x.unsqueeze(0).to(args.device))
# get manipulors
z_manipulate = load_manipulators(model, args)
# decode the varied attributes
dec_x =[]
for attr_idx in attrs:
dec_x.append(manipulate(model, z, z_manipulate[attr_idx].unsqueeze(0), args.z_std, alphas))
return torch.stack(dec_x).cpu()
# --------------------
# Main
# --------------------
if __name__ == '__main__':
args = parser.parse_args()
args.step = 0 # global step
args.output_dir = os.path.dirname(args.restore_file) if args.restore_file else os.path.join(args.output_dir, time.strftime('%Y-%m-%d_%H-%M-%S', time.gmtime()))
writer = None # init as None in case of multiprocessing; only main process performs write ops
# setup device and distributed training
if args.distributed:
torch.cuda.set_device(args.local_rank)
args.device = torch.device('cuda:{}'.format(args.local_rank))
# initialize
torch.distributed.init_process_group(backend='nccl', init_method='env://')
# compute total world size (used to keep track of global step)
args.world_size = int(os.environ['WORLD_SIZE']) # torch.distributed.launch sets this to nproc_per_node * nnodes
else:
if torch.cuda.is_available(): args.local_rank = 0
args.device = torch.device('cuda:{}'.format(args.local_rank) if args.local_rank is not None else 'cpu')
# write ops only when on_main_process
# NOTE: local_rank unique only to the machine; only 1 process on each node is on_main_process;
# if shared file system, args.local_rank below should be replaced by global rank e.g. torch.distributed.get_rank()
args.on_main_process = (args.distributed and args.local_rank == 0) or not args.distributed
# setup seed
if args.seed:
torch.manual_seed(args.seed)
if args.device.type == 'cuda': torch.cuda.manual_seed(args.seed)
# load data; sets args.input_dims needed for setting up the model
train_dataloader = fetch_dataloader(args, train=True)
test_dataloader = fetch_dataloader(args, train=False)
# load model
model = Glow(args.width, args.depth, args.n_levels, args.input_dims, args.checkpoint_grads).to(args.device)
if args.distributed:
# NOTE: DistributedDataParallel will divide and allocate batch_size to all available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank)
else:
# for compatibility of saving/loading models, wrap non-distributed cpu/gpu model as well;
# ie state dict is based on model.module.layer keys, which now match between training distributed and running then locally
model = torch.nn.parallel.DataParallel(model)
# DataParalle and DistributedDataParallel are wrappers around the model; expose functions of the model directly
model.base_dist = model.module.base_dist
model.log_prob = model.module.log_prob
model.inverse = model.module.inverse
# load optimizers
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
# load checkpoint if provided
if args.restore_file:
model_checkpoint = torch.load(args.restore_file, map_location=args.device)
model.load_state_dict(model_checkpoint['state_dict'])
optimizer.load_state_dict(torch.load(os.path.dirname(args.restore_file) + '/optim_checkpoint.pt', map_location=args.device))
args.start_epoch = model_checkpoint['epoch']
args.step = model_checkpoint['global_step']
# setup writer and outputs
if args.on_main_process:
writer = SummaryWriter(log_dir = args.output_dir)
# save settings
config = 'Parsed args:\n{}\n\n'.format(pprint.pformat(args.__dict__)) + \
'Num trainable params: {:,.0f}\n\n'.format(sum(p.numel() for p in model.parameters())) + \
'Model:\n{}'.format(model)
config_path = os.path.join(args.output_dir, 'config.txt')
writer.add_text('model_config', config)
if not os.path.exists(config_path):
with open(config_path, 'a') as f:
print(config, file=f)
if args.train:
# run data dependent init and train
data_dependent_init(model, args)
train_and_evaluate(model, train_dataloader, test_dataloader, optimizer, writer, args)
if args.evaluate:
logprob_mean, logprob_std = evaluate(model, test_dataloader, args)
print('Evaluate: bits_x = {:.3f} +/- {:.3f}'.format(logprob_mean, logprob_std))
if args.generate:
n_samples = 4
z_std = [0., 0.25, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] if not args.z_std else n_samples * [args.z_std]
samples = generate(model, n_samples, z_std)
images = make_grid(samples.cpu(), nrow=n_samples, pad_value=1)
save_image(images, os.path.join(args.output_dir,
'generated_samples_at_z_std_{}.png'.format('range' if args.z_std is None else args.z_std)))
if args.visualize:
if not args.z_std: args.z_std = 0.6
if not args.vis_alphas: args.vis_alphas = [-2,-1,0,1,2]
dec_x = visualize(model, args, args.vis_attrs, args.vis_alphas, args.vis_img) # output (n_attr, n_alpha, 3, H, W)
filename = 'manipulated_sample' if not args.vis_img else \
'manipulated_img_{}'.format(os.path.basename(args.vis_img).split('.')[0])
if args.vis_attrs:
filename += '_attr_' + ','.join(map(str, args.vis_attrs))
save_image(dec_x.view(-1, *args.input_dims), os.path.join(args.output_dir, filename + '.png'), nrow=dec_x.shape[1])
if args.on_main_process:
writer.close()
| 35,698
| 45.302205
| 181
|
py
|
normalizing_flows
|
normalizing_flows-master/bnaf.py
|
"""
Implementation of Block Neural Autoregressive Flow
http://arxiv.org/abs/1904.04676
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from torch.utils.data import DataLoader, TensorDataset
import math
import os
import time
import argparse
import pprint
from functools import partial
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from tqdm import tqdm
parser = argparse.ArgumentParser()
# action
parser.add_argument('--train', action='store_true', help='Train a flow.')
parser.add_argument('--plot', action='store_true', help='Plot a flow and target density.')
parser.add_argument('--restore_file', type=str, help='Path to model to restore.')
parser.add_argument('--output_dir', default='./results/{}'.format(os.path.splitext(__file__)[0]))
parser.add_argument('--cuda', type=int, help='Which GPU to run on.')
parser.add_argument('--seed', type=int, default=0, help='Random seed.')
# target density
parser.add_argument('--dataset', type=str, help='Which potential function to approximate.')
# model parameters
parser.add_argument('--data_dim', type=int, default=2, help='Dimension of the data.')
parser.add_argument('--hidden_dim', type=int, default=100, help='Dimensions of hidden layers.')
parser.add_argument('--n_hidden', type=int, default=3, help='Number of hidden layers.')
# training parameters
parser.add_argument('--step', type=int, default=0, help='Current step of training (number of minibatches processed).')
parser.add_argument('--n_steps', type=int, default=1, help='Number of steps to train.')
parser.add_argument('--batch_size', type=int, default=200, help='Training batch size.')
parser.add_argument('--lr', type=float, default=1e-1, help='Initial learning rate.')
parser.add_argument('--lr_decay', type=float, default=0.5, help='Learning rate decay.')
parser.add_argument('--lr_patience', type=float, default=2000, help='Number of steps before decaying learning rate.')
parser.add_argument('--log_interval', type=int, default=50, help='How often to save model and samples.')
# --------------------
# Data
# --------------------
def potential_fn(dataset):
# NF paper table 1 energy functions
w1 = lambda z: torch.sin(2 * math.pi * z[:,0] / 4)
w2 = lambda z: 3 * torch.exp(-0.5 * ((z[:,0] - 1)/0.6)**2)
w3 = lambda z: 3 * torch.sigmoid((z[:,0] - 1) / 0.3)
if dataset == 'u1':
return lambda z: 0.5 * ((torch.norm(z, p=2, dim=1) - 2) / 0.4)**2 - \
torch.log(torch.exp(-0.5*((z[:,0] - 2) / 0.6)**2) + \
torch.exp(-0.5*((z[:,0] + 2) / 0.6)**2) + 1e-10)
elif dataset == 'u2':
return lambda z: 0.5 * ((z[:,1] - w1(z)) / 0.4)**2
elif dataset == 'u3':
return lambda z: - torch.log(torch.exp(-0.5*((z[:,1] - w1(z))/0.35)**2) + \
torch.exp(-0.5*((z[:,1] - w1(z) + w2(z))/0.35)**2) + 1e-10)
elif dataset == 'u4':
return lambda z: - torch.log(torch.exp(-0.5*((z[:,1] - w1(z))/0.4)**2) + \
torch.exp(-0.5*((z[:,1] - w1(z) + w3(z))/0.35)**2) + 1e-10)
else:
raise RuntimeError('Invalid potential name to sample from.')
def sample_2d_data(dataset, n_samples):
z = torch.randn(n_samples, 2)
if dataset == '8gaussians':
scale = 4
sq2 = 1/math.sqrt(2)
centers = [(1,0), (-1,0), (0,1), (0,-1), (sq2,sq2), (-sq2,sq2), (sq2,-sq2), (-sq2,-sq2)]
centers = torch.tensor([(scale * x, scale * y) for x,y in centers])
return sq2 * (0.5 * z + centers[torch.randint(len(centers), size=(n_samples,))])
elif dataset == '2spirals':
n = torch.sqrt(torch.rand(n_samples // 2)) * 540 * (2 * math.pi) / 360
d1x = - torch.cos(n) * n + torch.rand(n_samples // 2) * 0.5
d1y = torch.sin(n) * n + torch.rand(n_samples // 2) * 0.5
x = torch.cat([torch.stack([ d1x, d1y], dim=1),
torch.stack([-d1x, -d1y], dim=1)], dim=0) / 3
return x + 0.1*z
elif dataset == 'checkerboard':
x1 = torch.rand(n_samples) * 4 - 2
x2_ = torch.rand(n_samples) - torch.randint(0, 2, (n_samples,), dtype=torch.float) * 2
x2 = x2_ + x1.floor() % 2
return torch.stack([x1, x2], dim=1) * 2
elif dataset == 'rings':
n_samples4 = n_samples3 = n_samples2 = n_samples // 4
n_samples1 = n_samples - n_samples4 - n_samples3 - n_samples2
# so as not to have the first point = last point, set endpoint=False in np; here shifted by one
linspace4 = torch.linspace(0, 2 * math.pi, n_samples4 + 1)[:-1]
linspace3 = torch.linspace(0, 2 * math.pi, n_samples3 + 1)[:-1]
linspace2 = torch.linspace(0, 2 * math.pi, n_samples2 + 1)[:-1]
linspace1 = torch.linspace(0, 2 * math.pi, n_samples1 + 1)[:-1]
circ4_x = torch.cos(linspace4)
circ4_y = torch.sin(linspace4)
circ3_x = torch.cos(linspace4) * 0.75
circ3_y = torch.sin(linspace3) * 0.75
circ2_x = torch.cos(linspace2) * 0.5
circ2_y = torch.sin(linspace2) * 0.5
circ1_x = torch.cos(linspace1) * 0.25
circ1_y = torch.sin(linspace1) * 0.25
x = torch.stack([torch.cat([circ4_x, circ3_x, circ2_x, circ1_x]),
torch.cat([circ4_y, circ3_y, circ2_y, circ1_y])], dim=1) * 3.0
# random sample
x = x[torch.randint(0, n_samples, size=(n_samples,))]
# Add noise
return x + torch.normal(mean=torch.zeros_like(x), std=0.08*torch.ones_like(x))
else:
raise RuntimeError('Invalid `dataset` to sample from.')
# --------------------
# Model components
# --------------------
class MaskedLinear(nn.Module):
def __init__(self, in_features, out_features, data_dim):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.data_dim = data_dim
# Notation:
# BNAF weight calculation for (eq 8): W = g(W) * M_d + W * M_o
# where W is block lower triangular so model is autoregressive,
# g = exp function; M_d is block diagonal mask; M_o is block off-diagonal mask.
# Weight Normalization (Salimans & Kingma, eq 2): w = g * v / ||v||
# where g is scalar, v is k-dim vector, ||v|| is Euclidean norm
# ------
# Here: pre-weight norm matrix is v; then: v = exp(weight) * mask_d + weight * mask_o
# weight-norm scalar is g: out_features dimensional vector (here logg is used instead to avoid taking logs in the logdet calc.
# then weight-normed weight matrix is w = g * v / ||v||
#
# log det jacobian of block lower triangular is taking block diagonal mask of
# log(g*v/||v||) = log(g) + log(v) - log(||v||)
# = log(g) + weight - log(||v||) since v = exp(weight) * mask_d + weight * mask_o
weight = torch.zeros(out_features, in_features)
mask_d = torch.zeros_like(weight)
mask_o = torch.zeros_like(weight)
for i in range(data_dim):
# select block slices
h = slice(i * out_features // data_dim, (i+1) * out_features // data_dim)
w = slice(i * in_features // data_dim, (i+1) * in_features // data_dim)
w_row = slice(0, (i+1) * in_features // data_dim)
# initialize block-lower-triangular weight and construct block diagonal mask_d and lower triangular mask_o
nn.init.kaiming_uniform_(weight[h,w_row], a=math.sqrt(5)) # default nn.Linear weight init only block-wise
mask_d[h,w] = 1
mask_o[h,w_row] = 1
mask_o = mask_o - mask_d # remove diagonal so mask_o is lower triangular 1-off the diagonal
self.weight = nn.Parameter(weight) # pre-mask, pre-weight-norm
self.logg = nn.Parameter(torch.rand(out_features, 1).log()) # weight-norm parameter
self.bias = nn.Parameter(nn.init.uniform_(torch.rand(out_features), -1/math.sqrt(in_features), 1/math.sqrt(in_features))) # default nn.Linear bias init
self.register_buffer('mask_d', mask_d)
self.register_buffer('mask_o', mask_o)
def forward(self, x, sum_logdets):
# 1. compute BNAF masked weight eq 8
v = self.weight.exp() * self.mask_d + self.weight * self.mask_o
# 2. weight normalization
v_norm = v.norm(p=2, dim=1, keepdim=True)
w = self.logg.exp() * v / v_norm
# 3. compute output and logdet of the layer
out = F.linear(x, w, self.bias)
logdet = self.logg + self.weight - 0.5 * v_norm.pow(2).log()
logdet = logdet[self.mask_d.byte()]
logdet = logdet.view(1, self.data_dim, out.shape[1]//self.data_dim, x.shape[1]//self.data_dim) \
.expand(x.shape[0],-1,-1,-1) # output (B, data_dim, out_dim // data_dim, in_dim // data_dim)
# 4. sum with sum_logdets from layers before (BNAF section 3.3)
# Compute log det jacobian of the flow (eq 9, 10, 11) using log-matrix multiplication of the different layers.
# Specifically for two successive MaskedLinear layers A -> B with logdets A and B of shapes
# logdet A is (B, data_dim, outA_dim, inA_dim)
# logdet B is (B, data_dim, outB_dim, inB_dim) where outA_dim = inB_dim
#
# Note -- in the first layer, inA_dim = in_features//data_dim = 1 since in_features == data_dim.
# thus logdet A is (B, data_dim, outA_dim, 1)
#
# Then:
# logsumexp(A.transpose(2,3) + B) = logsumexp( (B, data_dim, 1, outA_dim) + (B, data_dim, outB_dim, inB_dim) , dim=-1)
# = logsumexp( (B, data_dim, 1, outA_dim) + (B, data_dim, outB_dim, outA_dim), dim=-1)
# = logsumexp( (B, data_dim, outB_dim, outA_dim), dim=-1) where dim2 of tensor1 is broadcasted
# = (B, data_dim, outB_dim, 1)
sum_logdets = torch.logsumexp(sum_logdets.transpose(2,3) + logdet, dim=-1, keepdim=True)
return out, sum_logdets
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class Tanh(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, sum_logdets):
# derivation of logdet:
# d/dx tanh = 1 / cosh^2; cosh = (1 + exp(-2x)) / (2*exp(-x))
# log d/dx tanh = - 2 * log cosh = -2 * (x - log 2 + log(1 + exp(-2x)))
logdet = -2 * (x - math.log(2) + F.softplus(-2*x))
sum_logdets = sum_logdets + logdet.view_as(sum_logdets)
return x.tanh(), sum_logdets
class FlowSequential(nn.Sequential):
""" Container for layers of a normalizing flow """
def forward(self, x):
sum_logdets = torch.zeros(1, x.shape[1], 1, 1, device=x.device)
for module in self:
x, sum_logdets = module(x, sum_logdets)
return x, sum_logdets.squeeze()
# --------------------
# Model
# --------------------
class BNAF(nn.Module):
def __init__(self, data_dim, n_hidden, hidden_dim):
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(data_dim))
self.register_buffer('base_dist_var', torch.ones(data_dim))
# construct model
modules = []
modules += [MaskedLinear(data_dim, hidden_dim, data_dim), Tanh()]
for _ in range(n_hidden):
modules += [MaskedLinear(hidden_dim, hidden_dim, data_dim), Tanh()]
modules += [MaskedLinear(hidden_dim, data_dim, data_dim)]
self.net = FlowSequential(*modules)
# TODO -- add permutation
# add residual gate
# add stack of flows
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x):
return self.net(x)
def compute_kl_qp_loss(model, target_potential_fn, batch_size):
""" Compute BNAF eq 3 & 20:
KL(q_inv||p) where q_inv is the inverse flow transform (log_q_inv = log_q_base - logdet), p is the target distribution (energy potential)
Returns the minimization objective for density matching. """
z = model.base_dist.sample((batch_size,))
q_log_prob = model.base_dist.log_prob(z)
zk, logdet = model(z)
p_log_prob = - target_potential_fn(zk) # p = exp(-potential) => log_p = - potential
return q_log_prob.sum(1) - logdet.sum(1) - p_log_prob # BNAF eq 20
def compute_kl_pq_loss(model, sample_2d_data_fn, batch_size):
""" Compute BNAF eq 2 & 16:
KL(p||q_fwd) where q_fwd is the forward flow transform (log_q_fwd = log_q_base + logdet), p is the target distribution.
Returns the minimization objective for density estimation (NLL under the flow since the entropy of the target dist is fixed wrt the optimization) """
sample = sample_2d_data_fn(batch_size).to(model.base_dist.loc.device)
z, logdet = model(sample)
return - torch.sum(model.base_dist.log_prob(z) + logdet, dim=1)
# --------------------
# Training
# --------------------
def train_flow(model, potential_or_sampling_fn, loss_fn, optimizer, scheduler, args):
model.train()
with tqdm(total=args.n_steps, desc='Start step {}; Training for {} steps'.format(args.step, args.n_steps)) as pbar:
for _ in range(args.n_steps):
args.step += 1
loss = loss_fn(model, potential_or_sampling_fn, args.batch_size).mean(0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step(loss)
pbar.set_postfix(loss = '{:.3f}'.format(loss.item()))
pbar.update()
if args.step % args.log_interval == 0:
# save model
torch.save({'step': args.step,
'state_dict': model.state_dict()},
os.path.join(args.output_dir, 'checkpoint.pt'))
torch.save({'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict()},
os.path.join(args.output_dir, 'optim_checkpoint.pt'))
# plot and save results
plot(model, potential_or_sampling_fn, args)
# --------------------
# Plotting
# --------------------
@torch.no_grad()
def plot(model, potential_or_sampling_fn, args):
n_pts = 1000
range_lim = 4
# construct test points
test_grid = setup_grid(range_lim, n_pts, args)
# plot
if args.samples:
fig, axs = plt.subplots(1, 2, figsize=(8,4), subplot_kw={'aspect': 'equal'})
plot_samples(potential_or_sampling_fn, axs[0], range_lim, n_pts)
plot_fwd_flow_density(model, axs[1], test_grid, n_pts, args.batch_size)
else:
fig, axs = plt.subplots(1, 3, figsize=(12,4.3), subplot_kw={'aspect': 'equal'})
plot_potential(potential_or_sampling_fn, axs[0], test_grid, n_pts)
plot_inv_flow_density(model, axs[1], test_grid, n_pts, args.batch_size)
plot_flow_samples(model, axs[2], n_pts, args.batch_size)
# format
for ax in plt.gcf().axes: format_ax(ax, range_lim)
plt.tight_layout()
# save
plt.savefig(os.path.join(args.output_dir, 'vis_step_{}.png'.format(args.step)))
plt.close()
def setup_grid(range_lim, n_pts, args):
x = torch.linspace(-range_lim, range_lim, n_pts)
xx, yy = torch.meshgrid((x, x))
zz = torch.stack((xx.flatten(), yy.flatten()), dim=1)
return xx, yy, zz.to(args.device)
def format_ax(ax, range_lim):
ax.set_xlim(-range_lim, range_lim)
ax.set_ylim(-range_lim, range_lim)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.invert_yaxis()
def plot_potential(potential_fn, ax, test_grid, n_pts):
xx, yy, zz = test_grid
ax.pcolormesh(xx, yy, torch.exp(-potential_fn(zz)).view(n_pts,n_pts).cpu().data, cmap=plt.cm.jet)
ax.set_title('Target density')
def plot_samples(samples_fn, ax, range_lim, n_pts):
samples = samples_fn(n_pts**2).numpy()
ax.hist2d(samples[:,0], samples[:,1], range=[[-range_lim, range_lim], [-range_lim, range_lim]], bins=n_pts, cmap=plt.cm.jet)
ax.set_title('Target samples')
def plot_flow_samples(model, ax, n_pts, batch_size):
z = model.base_dist.sample((n_pts**2,))
zk = torch.cat([model(z_)[0] for z_ in z.split(batch_size, dim=0)], 0)
zk = zk.cpu().numpy()
# plot
ax.hist2d(zk[:,0], zk[:,1], bins=n_pts, cmap=plt.cm.jet)
ax.set_facecolor(plt.cm.jet(0.))
ax.set_title('Flow samples')
def plot_fwd_flow_density(model, ax, test_grid, n_pts, batch_size):
""" plots square grid and flow density; where density under the flow is exp(log_flow_base_dist + logdet) """
xx, yy, zz = test_grid
# compute posterior approx density
zzk, logdets = [], []
for zz_i in zz.split(batch_size, dim=0):
zzk_i, logdets_i = model(zz_i)
zzk += [zzk_i]
logdets += [logdets_i]
zzk, logdets = torch.cat(zzk, 0), torch.cat(logdets, 0)
log_prob = model.base_dist.log_prob(zzk) + logdets
prob = log_prob.sum(1).exp().cpu()
# plot
ax.pcolormesh(xx, yy, prob.view(n_pts,n_pts), cmap=plt.cm.jet)
ax.set_facecolor(plt.cm.jet(0.))
ax.set_title('Flow density')
def plot_inv_flow_density(model, ax, test_grid, n_pts, batch_size):
""" plots transformed grid and density; where density is exp(loq_flow_base_dist - logdet) """
xx, yy, zz = test_grid
# compute posterior approx density
zzk, logdets = [], []
for zz_i in zz.split(batch_size, dim=0):
zzk_i, logdets_i = model(zz_i)
zzk += [zzk_i]
logdets += [logdets_i]
zzk, logdets = torch.cat(zzk, 0), torch.cat(logdets, 0)
log_q0 = model.base_dist.log_prob(zz)
log_qk = log_q0 - logdets
qk = log_qk.sum(1).exp().cpu()
zzk = zzk.cpu()
# plot
ax.pcolormesh(zzk[:,0].view(n_pts,n_pts), zzk[:,1].view(n_pts,n_pts), qk.view(n_pts,n_pts), cmap=plt.cm.jet)
ax.set_facecolor(plt.cm.jet(0.))
ax.set_title('Flow density')
if __name__ == '__main__':
args = parser.parse_args()
args.output_dir = os.path.dirname(args.restore_file) if args.restore_file else os.path.join(args.output_dir, time.strftime('%Y-%m-%d_%H-%M-%S', time.gmtime()))
if not os.path.isdir(args.output_dir): os.makedirs(args.output_dir)
args.device = torch.device('cuda:{}'.format(args.cuda) if args.cuda is not None and torch.cuda.is_available() else 'cpu')
torch.manual_seed(args.seed)
if args.device.type == 'cuda': torch.cuda.manual_seed(args.seed)
model = BNAF(args.data_dim, args.n_hidden, args.hidden_dim).to(args.device)
if args.restore_file:
model_checkpoint = torch.load(args.restore_file, map_location=args.device)
model.load_state_dict(model_checkpoint['state_dict'])
args.step = model_checkpoint['step']
# save settings
config = 'Parsed args:\n{}\n\n'.format(pprint.pformat(args.__dict__)) + \
'Num trainable params: {:,.0f}\n\n'.format(sum(p.numel() for p in model.parameters())) + \
'Model:\n{}'.format(model)
config_path = os.path.join(args.output_dir, 'config.txt')
if not os.path.exists(config_path):
with open(config_path, 'a') as f:
print(config, file=f)
# setup data -- density to estimate/match
args.samples = not (args.dataset.startswith('u') and len(args.dataset) == 2)
if args.samples:
# target is density to estimate
potential_or_sampling_fn = partial(sample_2d_data, args.dataset)
loss_fn = compute_kl_pq_loss
else:
# target is energy potential to match
potential_or_sampling_fn = potential_fn(args.dataset)
loss_fn = compute_kl_qp_loss
if args.train:
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=args.lr_decay, patience=args.lr_patience, verbose=True)
if args.restore_file:
optim_checkpoint = torch.load(os.path.dirname(args.restore_file) + '/optim_checkpoint.pt', map_location=args.device)
optimizer.load_state_dict(optim_checkpoint['optimizer'])
scheduler.load_state_dict(optim_checkpoint['scheduler'])
train_flow(model, potential_or_sampling_fn, loss_fn, optimizer, scheduler, args)
if args.plot:
plot(model, potential_or_sampling_fn, args)
| 20,690
| 42.836864
| 163
|
py
|
normalizing_flows
|
normalizing_flows-master/maf.py
|
"""
Masked Autoregressive Flow for Density Estimation
arXiv:1705.07057v4
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
import torchvision.transforms as T
from torchvision.utils import save_image
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import math
import argparse
import pprint
import copy
from data import fetch_dataloaders
parser = argparse.ArgumentParser()
# action
parser.add_argument('--train', action='store_true', help='Train a flow.')
parser.add_argument('--evaluate', action='store_true', help='Evaluate a flow.')
parser.add_argument('--restore_file', type=str, help='Path to model to restore.')
parser.add_argument('--generate', action='store_true', help='Generate samples from a model.')
parser.add_argument('--data_dir', default='./data/', help='Location of datasets.')
parser.add_argument('--output_dir', default='./results/{}'.format(os.path.splitext(__file__)[0]))
parser.add_argument('--results_file', default='results.txt', help='Filename where to store settings and test results.')
parser.add_argument('--no_cuda', action='store_true', help='Do not use cuda.')
# data
parser.add_argument('--dataset', default='toy', help='Which dataset to use.')
parser.add_argument('--flip_toy_var_order', action='store_true', help='Whether to flip the toy dataset variable order to (x2, x1).')
parser.add_argument('--seed', type=int, default=1, help='Random seed to use.')
# model
parser.add_argument('--model', default='maf', help='Which model to use: made, maf.')
# made parameters
parser.add_argument('--n_blocks', type=int, default=5, help='Number of blocks to stack in a model (MADE in MAF; Coupling+BN in RealNVP).')
parser.add_argument('--n_components', type=int, default=1, help='Number of Gaussian clusters for mixture of gaussians models.')
parser.add_argument('--hidden_size', type=int, default=100, help='Hidden layer size for MADE (and each MADE block in an MAF).')
parser.add_argument('--n_hidden', type=int, default=1, help='Number of hidden layers in each MADE.')
parser.add_argument('--activation_fn', type=str, default='relu', help='What activation function to use in the MADEs.')
parser.add_argument('--input_order', type=str, default='sequential', help='What input order to use (sequential | random).')
parser.add_argument('--conditional', default=False, action='store_true', help='Whether to use a conditional model.')
parser.add_argument('--no_batch_norm', action='store_true')
# training params
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--n_epochs', type=int, default=50)
parser.add_argument('--start_epoch', default=0, help='Starting epoch (for logging; to be overwritten when restoring file.')
parser.add_argument('--lr', type=float, default=1e-4, help='Learning rate.')
parser.add_argument('--log_interval', type=int, default=1000, help='How often to show loss statistics and save samples.')
# --------------------
# Model layers and helpers
# --------------------
def create_masks(input_size, hidden_size, n_hidden, input_order='sequential', input_degrees=None):
# MADE paper sec 4:
# degrees of connections between layers -- ensure at most in_degree - 1 connections
degrees = []
# set input degrees to what is provided in args (the flipped order of the previous layer in a stack of mades);
# else init input degrees based on strategy in input_order (sequential or random)
if input_order == 'sequential':
degrees += [torch.arange(input_size)] if input_degrees is None else [input_degrees]
for _ in range(n_hidden + 1):
degrees += [torch.arange(hidden_size) % (input_size - 1)]
degrees += [torch.arange(input_size) % input_size - 1] if input_degrees is None else [input_degrees % input_size - 1]
elif input_order == 'random':
degrees += [torch.randperm(input_size)] if input_degrees is None else [input_degrees]
for _ in range(n_hidden + 1):
min_prev_degree = min(degrees[-1].min().item(), input_size - 1)
degrees += [torch.randint(min_prev_degree, input_size, (hidden_size,))]
min_prev_degree = min(degrees[-1].min().item(), input_size - 1)
degrees += [torch.randint(min_prev_degree, input_size, (input_size,)) - 1] if input_degrees is None else [input_degrees - 1]
# construct masks
masks = []
for (d0, d1) in zip(degrees[:-1], degrees[1:]):
masks += [(d1.unsqueeze(-1) >= d0.unsqueeze(0)).float()]
return masks, degrees[0]
class MaskedLinear(nn.Linear):
""" MADE building block layer """
def __init__(self, input_size, n_outputs, mask, cond_label_size=None):
super().__init__(input_size, n_outputs)
self.register_buffer('mask', mask)
self.cond_label_size = cond_label_size
if cond_label_size is not None:
self.cond_weight = nn.Parameter(torch.rand(n_outputs, cond_label_size) / math.sqrt(cond_label_size))
def forward(self, x, y=None):
out = F.linear(x, self.weight * self.mask, self.bias)
if y is not None:
out = out + F.linear(y, self.cond_weight)
return out
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
) + (self.cond_label_size != None) * ', cond_features={}'.format(self.cond_label_size)
class LinearMaskedCoupling(nn.Module):
""" Modified RealNVP Coupling Layers per the MAF paper """
def __init__(self, input_size, hidden_size, n_hidden, mask, cond_label_size=None):
super().__init__()
self.register_buffer('mask', mask)
# scale function
s_net = [nn.Linear(input_size + (cond_label_size if cond_label_size is not None else 0), hidden_size)]
for _ in range(n_hidden):
s_net += [nn.Tanh(), nn.Linear(hidden_size, hidden_size)]
s_net += [nn.Tanh(), nn.Linear(hidden_size, input_size)]
self.s_net = nn.Sequential(*s_net)
# translation function
self.t_net = copy.deepcopy(self.s_net)
# replace Tanh with ReLU's per MAF paper
for i in range(len(self.t_net)):
if not isinstance(self.t_net[i], nn.Linear): self.t_net[i] = nn.ReLU()
def forward(self, x, y=None):
# apply mask
mx = x * self.mask
# run through model
s = self.s_net(mx if y is None else torch.cat([y, mx], dim=1))
t = self.t_net(mx if y is None else torch.cat([y, mx], dim=1))
u = mx + (1 - self.mask) * (x - t) * torch.exp(-s) # cf RealNVP eq 8 where u corresponds to x (here we're modeling u)
log_abs_det_jacobian = - (1 - self.mask) * s # log det du/dx; cf RealNVP 8 and 6; note, sum over input_size done at model log_prob
return u, log_abs_det_jacobian
def inverse(self, u, y=None):
# apply mask
mu = u * self.mask
# run through model
s = self.s_net(mu if y is None else torch.cat([y, mu], dim=1))
t = self.t_net(mu if y is None else torch.cat([y, mu], dim=1))
x = mu + (1 - self.mask) * (u * s.exp() + t) # cf RealNVP eq 7
log_abs_det_jacobian = (1 - self.mask) * s # log det dx/du
return x, log_abs_det_jacobian
class BatchNorm(nn.Module):
""" RealNVP BatchNorm layer """
def __init__(self, input_size, momentum=0.9, eps=1e-5):
super().__init__()
self.momentum = momentum
self.eps = eps
self.log_gamma = nn.Parameter(torch.zeros(input_size))
self.beta = nn.Parameter(torch.zeros(input_size))
self.register_buffer('running_mean', torch.zeros(input_size))
self.register_buffer('running_var', torch.ones(input_size))
def forward(self, x, cond_y=None):
if self.training:
self.batch_mean = x.mean(0)
self.batch_var = x.var(0) # note MAF paper uses biased variance estimate; ie x.var(0, unbiased=False)
# update running mean
self.running_mean.mul_(self.momentum).add_(self.batch_mean.data * (1 - self.momentum))
self.running_var.mul_(self.momentum).add_(self.batch_var.data * (1 - self.momentum))
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
# compute normalized input (cf original batch norm paper algo 1)
x_hat = (x - mean) / torch.sqrt(var + self.eps)
y = self.log_gamma.exp() * x_hat + self.beta
# compute log_abs_det_jacobian (cf RealNVP paper)
log_abs_det_jacobian = self.log_gamma - 0.5 * torch.log(var + self.eps)
# print('in sum log var {:6.3f} ; out sum log var {:6.3f}; sum log det {:8.3f}; mean log_gamma {:5.3f}; mean beta {:5.3f}'.format(
# (var + self.eps).log().sum().data.numpy(), y.var(0).log().sum().data.numpy(), log_abs_det_jacobian.mean(0).item(), self.log_gamma.mean(), self.beta.mean()))
return y, log_abs_det_jacobian.expand_as(x)
def inverse(self, y, cond_y=None):
if self.training:
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
x_hat = (y - self.beta) * torch.exp(-self.log_gamma)
x = x_hat * torch.sqrt(var + self.eps) + mean
log_abs_det_jacobian = 0.5 * torch.log(var + self.eps) - self.log_gamma
return x, log_abs_det_jacobian.expand_as(x)
class FlowSequential(nn.Sequential):
""" Container for layers of a normalizing flow """
def forward(self, x, y):
sum_log_abs_det_jacobians = 0
for module in self:
x, log_abs_det_jacobian = module(x, y)
sum_log_abs_det_jacobians = sum_log_abs_det_jacobians + log_abs_det_jacobian
return x, sum_log_abs_det_jacobians
def inverse(self, u, y):
sum_log_abs_det_jacobians = 0
for module in reversed(self):
u, log_abs_det_jacobian = module.inverse(u, y)
sum_log_abs_det_jacobians = sum_log_abs_det_jacobians + log_abs_det_jacobian
return u, sum_log_abs_det_jacobians
# --------------------
# Models
# --------------------
class MADE(nn.Module):
def __init__(self, input_size, hidden_size, n_hidden, cond_label_size=None, activation='relu', input_order='sequential', input_degrees=None):
"""
Args:
input_size -- scalar; dim of inputs
hidden_size -- scalar; dim of hidden layers
n_hidden -- scalar; number of hidden layers
activation -- str; activation function to use
input_order -- str or tensor; variable order for creating the autoregressive masks (sequential|random)
or the order flipped from the previous layer in a stack of mades
conditional -- bool; whether model is conditional
"""
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(input_size))
self.register_buffer('base_dist_var', torch.ones(input_size))
# create masks
masks, self.input_degrees = create_masks(input_size, hidden_size, n_hidden, input_order, input_degrees)
# setup activation
if activation == 'relu':
activation_fn = nn.ReLU()
elif activation == 'tanh':
activation_fn = nn.Tanh()
else:
raise ValueError('Check activation function.')
# construct model
self.net_input = MaskedLinear(input_size, hidden_size, masks[0], cond_label_size)
self.net = []
for m in masks[1:-1]:
self.net += [activation_fn, MaskedLinear(hidden_size, hidden_size, m)]
self.net += [activation_fn, MaskedLinear(hidden_size, 2 * input_size, masks[-1].repeat(2,1))]
self.net = nn.Sequential(*self.net)
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
# MAF eq 4 -- return mean and log std
m, loga = self.net(self.net_input(x, y)).chunk(chunks=2, dim=1)
u = (x - m) * torch.exp(-loga)
# MAF eq 5
log_abs_det_jacobian = - loga
return u, log_abs_det_jacobian
def inverse(self, u, y=None, sum_log_abs_det_jacobians=None):
# MAF eq 3
D = u.shape[1]
x = torch.zeros_like(u)
# run through reverse model
for i in self.input_degrees:
m, loga = self.net(self.net_input(x, y)).chunk(chunks=2, dim=1)
x[:,i] = u[:,i] * torch.exp(loga[:,i]) + m[:,i]
log_abs_det_jacobian = loga
return x, log_abs_det_jacobian
def log_prob(self, x, y=None):
u, log_abs_det_jacobian = self.forward(x, y)
return torch.sum(self.base_dist.log_prob(u) + log_abs_det_jacobian, dim=1)
class MADEMOG(nn.Module):
""" Mixture of Gaussians MADE """
def __init__(self, n_components, input_size, hidden_size, n_hidden, cond_label_size=None, activation='relu', input_order='sequential', input_degrees=None):
"""
Args:
n_components -- scalar; number of gauassian components in the mixture
input_size -- scalar; dim of inputs
hidden_size -- scalar; dim of hidden layers
n_hidden -- scalar; number of hidden layers
activation -- str; activation function to use
input_order -- str or tensor; variable order for creating the autoregressive masks (sequential|random)
or the order flipped from the previous layer in a stack of mades
conditional -- bool; whether model is conditional
"""
super().__init__()
self.n_components = n_components
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(input_size))
self.register_buffer('base_dist_var', torch.ones(input_size))
# create masks
masks, self.input_degrees = create_masks(input_size, hidden_size, n_hidden, input_order, input_degrees)
# setup activation
if activation == 'relu':
activation_fn = nn.ReLU()
elif activation == 'tanh':
activation_fn = nn.Tanh()
else:
raise ValueError('Check activation function.')
# construct model
self.net_input = MaskedLinear(input_size, hidden_size, masks[0], cond_label_size)
self.net = []
for m in masks[1:-1]:
self.net += [activation_fn, MaskedLinear(hidden_size, hidden_size, m)]
self.net += [activation_fn, MaskedLinear(hidden_size, n_components * 3 * input_size, masks[-1].repeat(n_components * 3,1))]
self.net = nn.Sequential(*self.net)
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
# shapes
N, L = x.shape
C = self.n_components
# MAF eq 2 -- parameters of Gaussians - mean, logsigma, log unnormalized cluster probabilities
m, loga, logr = self.net(self.net_input(x, y)).view(N, C, 3 * L).chunk(chunks=3, dim=-1) # out 3 x (N, C, L)
# MAF eq 4
x = x.repeat(1, C).view(N, C, L) # out (N, C, L)
u = (x - m) * torch.exp(-loga) # out (N, C, L)
# MAF eq 5
log_abs_det_jacobian = - loga # out (N, C, L)
# normalize cluster responsibilities
self.logr = logr - logr.logsumexp(1, keepdim=True) # out (N, C, L)
return u, log_abs_det_jacobian
def inverse(self, u, y=None, sum_log_abs_det_jacobians=None):
# shapes
N, C, L = u.shape
# init output
x = torch.zeros(N, L).to(u.device)
# MAF eq 3
# run through reverse model along each L
for i in self.input_degrees:
m, loga, logr = self.net(self.net_input(x, y)).view(N, C, 3 * L).chunk(chunks=3, dim=-1) # out 3 x (N, C, L)
# normalize cluster responsibilities and sample cluster assignments from a categorical dist
logr = logr - logr.logsumexp(1, keepdim=True) # out (N, C, L)
z = D.Categorical(logits=logr[:,:,i]).sample().unsqueeze(-1) # out (N, 1)
u_z = torch.gather(u[:,:,i], 1, z).squeeze() # out (N, 1)
m_z = torch.gather(m[:,:,i], 1, z).squeeze() # out (N, 1)
loga_z = torch.gather(loga[:,:,i], 1, z).squeeze()
x[:,i] = u_z * torch.exp(loga_z) + m_z
log_abs_det_jacobian = loga
return x, log_abs_det_jacobian
def log_prob(self, x, y=None):
u, log_abs_det_jacobian = self.forward(x, y) # u = (N,C,L); log_abs_det_jacobian = (N,C,L)
# marginalize cluster probs
log_probs = torch.logsumexp(self.logr + self.base_dist.log_prob(u) + log_abs_det_jacobian, dim=1) # sum over C; out (N, L)
return log_probs.sum(1) # sum over L; out (N,)
class MAF(nn.Module):
def __init__(self, n_blocks, input_size, hidden_size, n_hidden, cond_label_size=None, activation='relu', input_order='sequential', batch_norm=True):
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(input_size))
self.register_buffer('base_dist_var', torch.ones(input_size))
# construct model
modules = []
self.input_degrees = None
for i in range(n_blocks):
modules += [MADE(input_size, hidden_size, n_hidden, cond_label_size, activation, input_order, self.input_degrees)]
self.input_degrees = modules[-1].input_degrees.flip(0)
modules += batch_norm * [BatchNorm(input_size)]
self.net = FlowSequential(*modules)
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
return self.net(x, y)
def inverse(self, u, y=None):
return self.net.inverse(u, y)
def log_prob(self, x, y=None):
u, sum_log_abs_det_jacobians = self.forward(x, y)
return torch.sum(self.base_dist.log_prob(u) + sum_log_abs_det_jacobians, dim=1)
class MAFMOG(nn.Module):
""" MAF on mixture of gaussian MADE """
def __init__(self, n_blocks, n_components, input_size, hidden_size, n_hidden, cond_label_size=None, activation='relu',
input_order='sequential', batch_norm=True):
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(input_size))
self.register_buffer('base_dist_var', torch.ones(input_size))
self.maf = MAF(n_blocks, input_size, hidden_size, n_hidden, cond_label_size, activation, input_order, batch_norm)
# get reversed input order from the last layer (note in maf model, input_degrees are already flipped in for-loop model constructor
input_degrees = self.maf.input_degrees#.flip(0)
self.mademog = MADEMOG(n_components, input_size, hidden_size, n_hidden, cond_label_size, activation, input_order, input_degrees)
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
u, maf_log_abs_dets = self.maf(x, y)
u, made_log_abs_dets = self.mademog(u, y)
sum_log_abs_det_jacobians = maf_log_abs_dets.unsqueeze(1) + made_log_abs_dets
return u, sum_log_abs_det_jacobians
def inverse(self, u, y=None):
x, made_log_abs_dets = self.mademog.inverse(u, y)
x, maf_log_abs_dets = self.maf.inverse(x, y)
sum_log_abs_det_jacobians = maf_log_abs_dets.unsqueeze(1) + made_log_abs_dets
return x, sum_log_abs_det_jacobians
def log_prob(self, x, y=None):
u, log_abs_det_jacobian = self.forward(x, y) # u = (N,C,L); log_abs_det_jacobian = (N,C,L)
# marginalize cluster probs
log_probs = torch.logsumexp(self.mademog.logr + self.base_dist.log_prob(u) + log_abs_det_jacobian, dim=1) # out (N, L)
return log_probs.sum(1) # out (N,)
class RealNVP(nn.Module):
def __init__(self, n_blocks, input_size, hidden_size, n_hidden, cond_label_size=None, batch_norm=True):
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(input_size))
self.register_buffer('base_dist_var', torch.ones(input_size))
# construct model
modules = []
mask = torch.arange(input_size).float() % 2
for i in range(n_blocks):
modules += [LinearMaskedCoupling(input_size, hidden_size, n_hidden, mask, cond_label_size)]
mask = 1 - mask
modules += batch_norm * [BatchNorm(input_size)]
self.net = FlowSequential(*modules)
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
return self.net(x, y)
def inverse(self, u, y=None):
return self.net.inverse(u, y)
def log_prob(self, x, y=None):
u, sum_log_abs_det_jacobians = self.forward(x, y)
return torch.sum(self.base_dist.log_prob(u) + sum_log_abs_det_jacobians, dim=1)
# --------------------
# Train and evaluate
# --------------------
def train(model, dataloader, optimizer, epoch, args):
for i, data in enumerate(dataloader):
model.train()
# check if labeled dataset
if len(data) == 1:
x, y = data[0], None
else:
x, y = data
y = y.to(args.device)
x = x.view(x.shape[0], -1).to(args.device)
loss = - model.log_prob(x, y if args.cond_label_size else None).mean(0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % args.log_interval == 0:
print('epoch {:3d} / {}, step {:4d} / {}; loss {:.4f}'.format(
epoch, args.start_epoch + args.n_epochs, i, len(dataloader), loss.item()))
@torch.no_grad()
def evaluate(model, dataloader, epoch, args):
model.eval()
# conditional model
if args.cond_label_size is not None:
logprior = torch.tensor(1 / args.cond_label_size).log().to(args.device)
loglike = [[] for _ in range(args.cond_label_size)]
for i in range(args.cond_label_size):
# make one-hot labels
labels = torch.zeros(args.batch_size, args.cond_label_size).to(args.device)
labels[:,i] = 1
for x, y in dataloader:
x = x.view(x.shape[0], -1).to(args.device)
loglike[i].append(model.log_prob(x, labels))
loglike[i] = torch.cat(loglike[i], dim=0) # cat along data dim under this label
loglike = torch.stack(loglike, dim=1) # cat all data along label dim
# log p(x) = log ∑_y p(x,y) = log ∑_y p(x|y)p(y)
# assume uniform prior = log p(y) ∑_y p(x|y) = log p(y) + log ∑_y p(x|y)
logprobs = logprior + loglike.logsumexp(dim=1)
# TODO -- measure accuracy as argmax of the loglike
# unconditional model
else:
logprobs = []
for data in dataloader:
x = data[0].view(data[0].shape[0], -1).to(args.device)
logprobs.append(model.log_prob(x))
logprobs = torch.cat(logprobs, dim=0).to(args.device)
logprob_mean, logprob_std = logprobs.mean(0), 2 * logprobs.var(0).sqrt() / math.sqrt(len(dataloader.dataset))
output = 'Evaluate ' + (epoch != None)*'(epoch {}) -- '.format(epoch) + 'logp(x) = {:.3f} +/- {:.3f}'.format(logprob_mean, logprob_std)
print(output)
print(output, file=open(args.results_file, 'a'))
return logprob_mean, logprob_std
@torch.no_grad()
def generate(model, dataset_lam, args, step=None, n_row=10):
model.eval()
# conditional model
if args.cond_label_size:
samples = []
labels = torch.eye(args.cond_label_size).to(args.device)
for i in range(args.cond_label_size):
# sample model base distribution and run through inverse model to sample data space
u = model.base_dist.sample((n_row, args.n_components)).squeeze()
labels_i = labels[i].expand(n_row, -1)
sample, _ = model.inverse(u, labels_i)
log_probs = model.log_prob(sample, labels_i).sort(0)[1].flip(0) # sort by log_prob; take argsort idxs; flip high to low
samples.append(sample[log_probs])
samples = torch.cat(samples, dim=0)
# unconditional model
else:
u = model.base_dist.sample((n_row**2, args.n_components)).squeeze()
samples, _ = model.inverse(u)
log_probs = model.log_prob(samples).sort(0)[1].flip(0) # sort by log_prob; take argsort idxs; flip high to low
samples = samples[log_probs]
# convert and save images
samples = samples.view(samples.shape[0], *args.input_dims)
samples = (torch.sigmoid(samples) - dataset_lam) / (1 - 2 * dataset_lam)
filename = 'generated_samples' + (step != None)*'_epoch_{}'.format(step) + '.png'
save_image(samples, os.path.join(args.output_dir, filename), nrow=n_row, normalize=True)
def train_and_evaluate(model, train_loader, test_loader, optimizer, args):
best_eval_logprob = float('-inf')
for i in range(args.start_epoch, args.start_epoch + args.n_epochs):
train(model, train_loader, optimizer, i, args)
eval_logprob, _ = evaluate(model, test_loader, i, args)
# save training checkpoint
torch.save({'epoch': i,
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict()},
os.path.join(args.output_dir, 'model_checkpoint.pt'))
# save model only
torch.save(model.state_dict(), os.path.join(args.output_dir, 'model_state.pt'))
# save best state
if eval_logprob > best_eval_logprob:
best_eval_logprob = eval_logprob
torch.save({'epoch': i,
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict()},
os.path.join(args.output_dir, 'best_model_checkpoint.pt'))
# plot sample
if args.dataset == 'TOY':
plot_sample_and_density(model, train_loader.dataset.base_dist, args, step=i)
if args.dataset == 'MNIST':
generate(model, train_loader.dataset.lam, args, step=i)
# --------------------
# Plot
# --------------------
def plot_density(dist, ax, ranges, flip_var_order=False):
(xmin, xmax), (ymin, ymax) = ranges
# sample uniform grid
n = 200
xx1 = torch.linspace(xmin, xmax, n)
xx2 = torch.linspace(ymin, ymax, n)
xx, yy = torch.meshgrid(xx1, xx2)
xy = torch.stack((xx.flatten(), yy.flatten()), dim=-1).squeeze()
if flip_var_order:
xy = xy.flip(1)
# run uniform grid through model and plot
density = dist.log_prob(xy).exp()
ax.contour(xx, yy, density.view(n,n).data.numpy())
# format
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xticks([xmin, xmax])
ax.set_yticks([ymin, ymax])
def plot_dist_sample(data, ax, ranges):
ax.scatter(data[:,0].data.numpy(), data[:,1].data.numpy(), s=10, alpha=0.4)
# format
(xmin, xmax), (ymin, ymax) = ranges
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xticks([xmin, xmax])
ax.set_yticks([ymin, ymax])
def plot_sample_and_density(model, target_dist, args, ranges_density=[[-5,20],[-10,10]], ranges_sample=[[-4,4],[-4,4]], step=None):
model.eval()
fig, axs = plt.subplots(1, 2, figsize=(6,3))
# sample target distribution and pass through model
data = target_dist.sample((2000,))
u, _ = model(data)
# plot density and sample
plot_density(model, axs[0], ranges_density, args.flip_var_order)
plot_dist_sample(u, axs[1], ranges_sample)
# format and save
matplotlib.rcParams.update({'xtick.labelsize': 'xx-small', 'ytick.labelsize': 'xx-small'})
plt.tight_layout()
plt.savefig(os.path.join(args.output_dir, 'sample' + (step != None)*'_epoch_{}'.format(step) + '.png'))
plt.close()
# --------------------
# Run
# --------------------
if __name__ == '__main__':
args = parser.parse_args()
# setup file ops
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
# setup device
args.device = torch.device('cuda:0' if torch.cuda.is_available() and not args.no_cuda else 'cpu')
torch.manual_seed(args.seed)
if args.device.type == 'cuda': torch.cuda.manual_seed(args.seed)
# load data
if args.conditional: assert args.dataset in ['MNIST', 'CIFAR10'], 'Conditional inputs only available for labeled datasets MNIST and CIFAR10.'
train_dataloader, test_dataloader = fetch_dataloaders(args.dataset, args.batch_size, args.device, args.flip_toy_var_order)
args.input_size = train_dataloader.dataset.input_size
args.input_dims = train_dataloader.dataset.input_dims
args.cond_label_size = train_dataloader.dataset.label_size if args.conditional else None
# model
if args.model == 'made':
model = MADE(args.input_size, args.hidden_size, args.n_hidden, args.cond_label_size,
args.activation_fn, args.input_order)
elif args.model == 'mademog':
assert args.n_components > 1, 'Specify more than 1 component for mixture of gaussians models.'
model = MADEMOG(args.n_components, args.input_size, args.hidden_size, args.n_hidden, args.cond_label_size,
args.activation_fn, args.input_order)
elif args.model == 'maf':
model = MAF(args.n_blocks, args.input_size, args.hidden_size, args.n_hidden, args.cond_label_size,
args.activation_fn, args.input_order, batch_norm=not args.no_batch_norm)
elif args.model == 'mafmog':
assert args.n_components > 1, 'Specify more than 1 component for mixture of gaussians models.'
model = MAFMOG(args.n_blocks, args.n_components, args.input_size, args.hidden_size, args.n_hidden, args.cond_label_size,
args.activation_fn, args.input_order, batch_norm=not args.no_batch_norm)
elif args.model =='realnvp':
model = RealNVP(args.n_blocks, args.input_size, args.hidden_size, args.n_hidden, args.cond_label_size,
batch_norm=not args.no_batch_norm)
else:
raise ValueError('Unrecognized model.')
model = model.to(args.device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-6)
if args.restore_file:
# load model and optimizer states
state = torch.load(args.restore_file, map_location=args.device)
model.load_state_dict(state['model_state'])
optimizer.load_state_dict(state['optimizer_state'])
args.start_epoch = state['epoch'] + 1
# set up paths
args.output_dir = os.path.dirname(args.restore_file)
args.results_file = os.path.join(args.output_dir, args.results_file)
print('Loaded settings and model:')
print(pprint.pformat(args.__dict__))
print(model)
print(pprint.pformat(args.__dict__), file=open(args.results_file, 'a'))
print(model, file=open(args.results_file, 'a'))
if args.train:
train_and_evaluate(model, train_dataloader, test_dataloader, optimizer, args)
if args.evaluate:
evaluate(model, test_dataloader, None, args)
if args.generate:
if args.dataset == 'TOY':
base_dist = train_dataloader.dataset.base_dist
plot_sample_and_density(model, base_dist, args, ranges_density=[[-15,4],[-3,3]], ranges_sample=[[-1.5,1.5],[-3,3]])
elif args.dataset == 'MNIST':
generate(model, train_dataloader.dataset.lam, args)
| 31,985
| 41.762032
| 169
|
py
|
normalizing_flows
|
normalizing_flows-master/planar_flow.py
|
"""
Variational Inference with Normalizing Flows
arXiv:1505.05770v6
"""
import torch
import torch.nn as nn
import torch.distributions as D
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import argparse
parser = argparse.ArgumentParser()
# action
parser.add_argument('--train', action='store_true', help='Train a flow.')
parser.add_argument('--evaluate', action='store_true', help='Evaluate a flow.')
parser.add_argument('--plot', action='store_true', help='Plot a flow and target density.')
parser.add_argument('--restore_file', type=str, help='Path to model to restore.')
parser.add_argument('--output_dir', default='.', help='Path to output folder.')
parser.add_argument('--no_cuda', action='store_true', help='Do not use cuda.')
# target potential
parser.add_argument('--target_potential', choices=['u_z0', 'u_z5', 'u_z1', 'u_z2', 'u_z3', 'u_z4'], help='Which potential function to approximate.')
# flow params
parser.add_argument('--base_sigma', type=float, default=4, help='Std of the base isotropic 0-mean Gaussian distribution.')
parser.add_argument('--learn_base', default=False, action='store_true', help='Whether to learn a mu-sigma affine transform of the base distribution.')
parser.add_argument('--flow_length', type=int, default=2, help='Length of the flow.')
# training params
parser.add_argument('--init_sigma', type=float, default=1, help='Initialization std for the trainable flow parameters.')
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--start_step', type=int, default=0, help='Starting step (if resuming training will be overwrite from filename).')
parser.add_argument('--n_steps', type=int, default=1000000, help='Optimization steps.')
parser.add_argument('--lr', type=float, default=1e-5, help='Learning rate.')
parser.add_argument('--weight_decay', type=float, default=1e-3, help='Weight decay.')
parser.add_argument('--beta', type=float, default=1, help='Multiplier for the target potential loss.')
parser.add_argument('--seed', type=int, default=2, help='Random seed.')
# --------------------
# Flow
# --------------------
class PlanarTransform(nn.Module):
def __init__(self, init_sigma=0.01):
super().__init__()
self.u = nn.Parameter(torch.randn(1, 2).normal_(0, init_sigma))
self.w = nn.Parameter(torch.randn(1, 2).normal_(0, init_sigma))
self.b = nn.Parameter(torch.randn(1).fill_(0))
def forward(self, x, normalize_u=True):
# allow for a single forward pass over all the transforms in the flows with a Sequential container
if isinstance(x, tuple):
z, sum_log_abs_det_jacobians = x
else:
z, sum_log_abs_det_jacobians = x, 0
# normalize u s.t. w @ u >= -1; sufficient condition for invertibility
u_hat = self.u
if normalize_u:
wtu = (self.w @ self.u.t()).squeeze()
m_wtu = - 1 + torch.log1p(wtu.exp())
u_hat = self.u + (m_wtu - wtu) * self.w / (self.w @ self.w.t())
# compute transform
f_z = z + u_hat * torch.tanh(z @ self.w.t() + self.b)
# compute log_abs_det_jacobian
psi = (1 - torch.tanh(z @ self.w.t() + self.b)**2) @ self.w
det = 1 + psi @ u_hat.t()
log_abs_det_jacobian = torch.log(torch.abs(det) + 1e-6).squeeze()
sum_log_abs_det_jacobians = sum_log_abs_det_jacobians + log_abs_det_jacobian
return f_z, sum_log_abs_det_jacobians
class AffineTransform(nn.Module):
def __init__(self, learnable=False):
super().__init__()
self.mu = nn.Parameter(torch.zeros(2)).requires_grad_(learnable)
self.logsigma = nn.Parameter(torch.zeros(2)).requires_grad_(learnable)
def forward(self, x):
z = self.mu + self.logsigma.exp() * x
sum_log_abs_det_jacobians = self.logsigma.sum()
return z, sum_log_abs_det_jacobians
# --------------------
# Test energy functions -- NF paper table 1
# --------------------
w1 = lambda z: torch.sin(2 * math.pi * z[:,0] / 4)
w2 = lambda z: 3 * torch.exp(-0.5 * ((z[:,0] - 1)/0.6)**2)
w3 = lambda z: 3 * torch.sigmoid((z[:,0] - 1) / 0.3)
u_z1 = lambda z: 0.5 * ((torch.norm(z, p=2, dim=1) - 2) / 0.4)**2 - \
torch.log(torch.exp(-0.5*((z[:,0] - 2) / 0.6)**2) + torch.exp(-0.5*((z[:,0] + 2) / 0.6)**2) + 1e-10)
u_z2 = lambda z: 0.5 * ((z[:,1] - w1(z)) / 0.4)**2
u_z3 = lambda z: - torch.log(torch.exp(-0.5*((z[:,1] - w1(z))/0.35)**2) + torch.exp(-0.5*((z[:,1] - w1(z) + w2(z))/0.35)**2) + 1e-10)
u_z4 = lambda z: - torch.log(torch.exp(-0.5*((z[:,1] - w1(z))/0.4)**2) + torch.exp(-0.5*((z[:,1] - w1(z) + w3(z))/0.35)**2) + 1e-10)
# --------------------
# Training
# --------------------
def optimize_flow(base_dist, flow, target_energy_potential, optimizer, args):
# anneal rate for free energy
temp = lambda i: min(1, 0.01 + i/10000)
for i in range(args.start_step, args.n_steps):
# sample base dist
z = base_dist.sample((args.batch_size, )).to(args.device)
# pass through flow:
# 1. compute expected log_prob of data under base dist -- nothing tied to parameters here so irrelevant to grads
base_log_prob = base_dist.log_prob(z)
# 2. compute sum of log_abs_det_jacobian through the flow
zk, sum_log_abs_det_jacobians = flow(z)
# 3. compute expected log_prob of z_k the target_energy potential
p_log_prob = - temp(i) * target_energy_potential(zk) # p = exp(-potential) ==> p_log_prob = - potential
loss = base_log_prob - sum_log_abs_det_jacobians - args.beta * p_log_prob
loss = loss.mean(0)
# compute loss and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 10000 == 0:
# display loss
log_qk = base_dist.log_prob(z) - sum_log_abs_det_jacobians
print('{}: step {:5d} / {}; loss {:.3f}; base_log_prob {:.3f}, sum log dets {:.3f}, p_log_prob {:.3f}, max base = {:.3f}; max qk = {:.3f} \
zk_mean {}, zk_sigma {}; base_mu {}, base_log_sigma {}'.format(
args.target_potential, i, args.n_steps, loss.item(), base_log_prob.mean(0).item(), sum_log_abs_det_jacobians.mean(0).item(),
p_log_prob.mean(0).item(), base_log_prob.exp().max().item(), log_qk.exp().max().item(),
zk.mean(0).cpu().data.numpy(), zk.var(0).sqrt().cpu().data.numpy(),
base_dist.loc.cpu().data.numpy() if not args.learn_base else flow[0].mu.cpu().data.numpy(),
base_dist.covariance_matrix.cpu().diag().data.numpy() if not args.learn_base else flow[0].logsigma.cpu().data.numpy()))
# save model
torch.save({'step': i,
'flow_state': flow.state_dict(),
'optimizer_state': optimizer.state_dict()},
os.path.join(args.output_dir, 'model_state_flow_length_{}.pt'.format(args.flow_length)))
# plot and save results
with torch.no_grad():
plot_flow(base_dist, flow, os.path.join(args.output_dir, 'approximating_flow_step{}.png'.format(i)), args)
# --------------------
# Plotting
# --------------------
def plot_flow(base_dist, flow, filename, args):
n = 200
lim = 4
fig, axs = plt.subplots(2, 2, subplot_kw={'aspect': 'equal'})
# plot target density we're trying to approx
plot_target_density(u_z, axs[0,0], lim, n)
# plot posterior approx density
plot_flow_density(base_dist, flow, axs[0,1], lim, n)
# plot flow-transformed base dist sample and histogram
z = base_dist.sample((10000,))
zk, _ = flow(z)
zk = zk.cpu().data.numpy()
axs[1,0].scatter(zk[:,0], zk[:,1], s=10, alpha=0.4)
axs[1,1].hist2d(zk[:,0], zk[:,1], bins=lim*50, cmap=plt.cm.jet)
for ax in plt.gcf().axes:
ax.get_xaxis().set_visible(True)
ax.get_yaxis().set_visible(True)
ax.invert_yaxis()
plt.tight_layout()
plt.savefig(filename)
plt.close()
def plot_target_density(u_z, ax, range_lim=4, n=200, output_dir=None):
x = torch.linspace(-range_lim, range_lim, n)
xx, yy = torch.meshgrid((x, x))
zz = torch.stack((xx.flatten(), yy.flatten()), dim=-1).squeeze().to(args.device)
ax.pcolormesh(xx, yy, torch.exp(-u_z(zz)).view(n,n).data, cmap=plt.cm.jet)
for ax in plt.gcf().axes:
ax.set_xlim(-range_lim, range_lim)
ax.set_ylim(-range_lim, range_lim)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.invert_yaxis()
if output_dir:
plt.tight_layout()
plt.savefig(os.path.join(output_dir, 'target_potential_density.png'))
plt.close()
def plot_flow_density(base_dist, flow, ax, range_lim=4, n=200, output_dir=None):
x = torch.linspace(-range_lim, range_lim, n)
xx, yy = torch.meshgrid((x, x))
zz = torch.stack((xx.flatten(), yy.flatten()), dim=-1).squeeze().to(args.device)
# plot posterior approx density
zzk, sum_log_abs_det_jacobians = flow(zz)
log_q0 = base_dist.log_prob(zz)
log_qk = log_q0 - sum_log_abs_det_jacobians
qk = log_qk.exp().cpu()
zzk = zzk.cpu()
ax.pcolormesh(zzk[:,0].view(n,n).data, zzk[:,1].view(n,n).data, qk.view(n,n).data, cmap=plt.cm.jet)
ax.set_facecolor(plt.cm.jet(0.))
for ax in plt.gcf().axes:
ax.set_xlim(-range_lim, range_lim)
ax.set_ylim(-range_lim, range_lim)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.invert_yaxis()
if output_dir:
plt.tight_layout()
plt.savefig(os.path.join(output_dir, 'flow_k{}_density.png'.format(len(flow)-1)))
plt.close()
# --------------------
# Run
# --------------------
if __name__ == '__main__':
args = parser.parse_args()
args.device = torch.device('cuda:0' if torch.cuda.is_available() and not args.no_cuda else 'cpu')
torch.manual_seed(args.seed)
if args.device.type == 'cuda': torch.cuda.manual_seed(args.seed)
# setup flow
flow = nn.Sequential(AffineTransform(args.learn_base), *[PlanarTransform() for _ in range(args.flow_length)]).to(args.device)
# setup target potential to approx
u_z = vars()[args.target_potential]
# setup base distribution
base_dist = D.MultivariateNormal(torch.zeros(2).to(args.device), args.base_sigma * torch.eye(2).to(args.device))
if args.restore_file:
# get filename
filename = os.path.basename(args.restore_file)
args.flow_length = int(filename.partition('length_')[-1].rpartition('.')[0])
# reset output dir
args.output_dir = os.path.dirname(args.restore_file)
# load state
state = torch.load(args.restore_file, map_location=args.device)
# compatibility code;
# 1/ earlier models did not include step and optimizer checkpoints;
try:
flow_state = state['flow_state']
optimizer_state = state['optimizer_state']
args.start_step = state['step']
except KeyError:
# if state is not a dict, load just the model state
flow_state = state
optimizer_state = None
# 2/ some saved checkpoints may not have a first affine layer
try:
flow_state['0.mu']
except KeyError:
# if no first affine layer, reload a flow model without one
flow = nn.Sequential(*[PlanarTransform(args.init_sigma) for _ in range(args.flow_length)])
flow.load_state_dict(flow_state)
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
if args.train:
optimizer = torch.optim.RMSprop(flow.parameters(), lr=args.lr, momentum=0.9, alpha=0.90, eps=1e-6, weight_decay=args.weight_decay)
if args.restore_file and optimizer_state:
optimizer.load_state_dict(optimizer_state)
args.n_steps = args.start_step + args.n_steps
optimize_flow(base_dist, flow, u_z, optimizer, args)
if args.evaluate:
plot_flow(base_dist, flow, os.path.join(args.output_dir, 'approximating_flow.png'), args)
if args.plot:
plot_target_density(u_z, plt.gca(), output_dir=args.output_dir)
plot_flow_density(base_dist, flow, plt.gca(), output_dir=args.output_dir)
| 12,324
| 39.811258
| 151
|
py
|
normalizing_flows
|
normalizing_flows-master/datasets/power.py
|
import numpy as np
import matplotlib.pyplot as plt
import datasets
import datasets.util
class POWER:
class Data:
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
trn, val, tst = load_data_normalised()
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
def show_histograms(self, split):
data_split = getattr(self, split, None)
if data_split is None:
raise ValueError('Invalid data split')
util.plot_hist_marginals(data_split.x)
plt.show()
def load_data():
return np.load(datasets.root + 'power/data.npy')
def load_data_split_with_noise():
rng = np.random.RandomState(42)
data = load_data()
rng.shuffle(data)
N = data.shape[0]
data = np.delete(data, 3, axis=1)
data = np.delete(data, 1, axis=1)
############################
# Add noise
############################
# global_intensity_noise = 0.1*rng.rand(N, 1)
voltage_noise = 0.01*rng.rand(N, 1)
# grp_noise = 0.001*rng.rand(N, 1)
gap_noise = 0.001*rng.rand(N, 1)
sm_noise = rng.rand(N, 3)
time_noise = np.zeros((N, 1))
# noise = np.hstack((gap_noise, grp_noise, voltage_noise, global_intensity_noise, sm_noise, time_noise))
# noise = np.hstack((gap_noise, grp_noise, voltage_noise, sm_noise, time_noise))
noise = np.hstack((gap_noise, voltage_noise, sm_noise, time_noise))
data = data + noise
N_test = int(0.1*data.shape[0])
data_test = data[-N_test:]
data = data[0:-N_test]
N_validate = int(0.1*data.shape[0])
data_validate = data[-N_validate:]
data_train = data[0:-N_validate]
return data_train, data_validate, data_test
def load_data_normalised():
data_train, data_validate, data_test = load_data_split_with_noise()
data = np.vstack((data_train, data_validate))
mu = data.mean(axis=0)
s = data.std(axis=0)
data_train = (data_train - mu)/s
data_validate = (data_validate - mu)/s
data_test = (data_test - mu)/s
return data_train, data_validate, data_test
| 2,216
| 24.77907
| 108
|
py
|
normalizing_flows
|
normalizing_flows-master/datasets/moons.py
|
import torch
import torch.distributions as D
from torch.utils.data import Dataset
from sklearn.datasets import make_moons
class MOONS(Dataset):
def __init__(self, dataset_size=25000, **kwargs):
self.x, self.y = make_moons(n_samples=dataset_size, shuffle=True, noise=0.05)
self.input_size = 2
self.label_size = 2
self.dataset_size = dataset_size
def __len__(self):
return self.dataset_size
def __getitem__(self, i):
return self.x[i], self.y[i]
| 512
| 20.375
| 85
|
py
|
normalizing_flows
|
normalizing_flows-master/datasets/hepmass.py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
from os.path import join
import datasets
import datasets.util
class HEPMASS:
"""
The HEPMASS data set.
http://archive.ics.uci.edu/ml/datasets/HEPMASS
"""
class Data:
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
path = datasets.root + 'hepmass/'
trn, val, tst = load_data_no_discrete_normalised_as_array(path)
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
def show_histograms(self, split, vars):
data_split = getattr(self, split, None)
if data_split is None:
raise ValueError('Invalid data split')
util.plot_hist_marginals(data_split.x[:, vars])
plt.show()
def load_data(path):
data_train = pd.read_csv(filepath_or_buffer=join(path, "1000_train.csv"), index_col=False)
data_test = pd.read_csv(filepath_or_buffer=join(path, "1000_test.csv"), index_col=False)
return data_train, data_test
def load_data_no_discrete(path):
"""
Loads the positive class examples from the first 10 percent of the dataset.
"""
data_train, data_test = load_data(path)
# Gets rid of any background noise examples i.e. class label 0.
data_train = data_train[data_train[data_train.columns[0]] == 1]
data_train = data_train.drop(data_train.columns[0], axis=1)
data_test = data_test[data_test[data_test.columns[0]] == 1]
data_test = data_test.drop(data_test.columns[0], axis=1)
# Because the data set is messed up!
data_test = data_test.drop(data_test.columns[-1], axis=1)
return data_train, data_test
def load_data_no_discrete_normalised(path):
data_train, data_test = load_data_no_discrete(path)
mu = data_train.mean()
s = data_train.std()
data_train = (data_train - mu)/s
data_test = (data_test - mu)/s
return data_train, data_test
def load_data_no_discrete_normalised_as_array(path):
data_train, data_test = load_data_no_discrete_normalised(path)
data_train, data_test = data_train.as_matrix(), data_test.as_matrix()
i = 0
# Remove any features that have too many re-occurring real values.
features_to_remove = []
for feature in data_train.T:
c = Counter(feature)
max_count = np.array([v for k, v in sorted(c.items())])[0]
if max_count > 5:
features_to_remove.append(i)
i += 1
data_train = data_train[:, np.array([i for i in range(data_train.shape[1]) if i not in features_to_remove])]
data_test = data_test[:, np.array([i for i in range(data_test.shape[1]) if i not in features_to_remove])]
N = data_train.shape[0]
N_validate = int(N*0.1)
data_validate = data_train[-N_validate:]
data_train = data_train[0:-N_validate]
return data_train, data_validate, data_test
| 3,029
| 28.134615
| 112
|
py
|
normalizing_flows
|
normalizing_flows-master/datasets/toy.py
|
import torch
import torch.distributions as D
from torch.utils.data import Dataset
class ToyDistribution(D.Distribution):
def __init__(self, flip_var_order):
super().__init__()
self.flip_var_order = flip_var_order
self.p_x2 = D.Normal(0, 4)
self.p_x1 = lambda x2: D.Normal(0.25 * x2**2, 1)
def rsample(self, sample_shape=torch.Size()):
x2 = self.p_x2.sample(sample_shape)
x1 = self.p_x1(x2).sample()
if self.flip_var_order:
return torch.stack((x2, x1), dim=-1).squeeze()
else:
return torch.stack((x1, x2), dim=-1).squeeze()
def log_prob(self, value):
if self.flip_var_order:
value = value.flip(1)
return self.p_x1(value[:,1]).log_prob(value[:,0]) + self.p_x2.log_prob(value[:,1])
class TOY(Dataset):
def __init__(self, dataset_size=25000, flip_var_order=False):
self.input_size = 2
self.label_size = 1
self.dataset_size = dataset_size
self.base_dist = ToyDistribution(flip_var_order)
def __len__(self):
return self.dataset_size
def __getitem__(self, i):
return self.base_dist.sample(), torch.zeros(self.label_size)
| 1,214
| 27.255814
| 90
|
py
|
normalizing_flows
|
normalizing_flows-master/datasets/gas.py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datasets
import datasets.util
class GAS:
class Data:
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
file = datasets.root + 'gas/ethylene_CO.pickle'
trn, val, tst = load_data_and_clean_and_split(file)
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
def show_histograms(self, split):
data_split = getattr(self, split, None)
if data_split is None:
raise ValueError('Invalid data split')
util.plot_hist_marginals(data_split.x)
plt.show()
def load_data(file):
data = pd.read_pickle(file)
# data = pd.read_pickle(file).sample(frac=0.25)
# data.to_pickle(file)
data.drop("Meth", axis=1, inplace=True)
data.drop("Eth", axis=1, inplace=True)
data.drop("Time", axis=1, inplace=True)
return data
def get_correlation_numbers(data):
C = data.corr()
A = C > 0.98
B = A.as_matrix().sum(axis=1)
return B
def load_data_and_clean(file):
data = load_data(file)
B = get_correlation_numbers(data)
while np.any(B > 1):
col_to_remove = np.where(B > 1)[0][0]
col_name = data.columns[col_to_remove]
data.drop(col_name, axis=1, inplace=True)
B = get_correlation_numbers(data)
# print(data.corr())
data = (data-data.mean())/data.std()
return data
def load_data_and_clean_and_split(file):
data = load_data_and_clean(file).as_matrix()
N_test = int(0.1*data.shape[0])
data_test = data[-N_test:]
data_train = data[0:-N_test]
N_validate = int(0.1*data_train.shape[0])
data_validate = data_train[-N_validate:]
data_train = data_train[0:-N_validate]
return data_train, data_validate, data_test
| 1,954
| 22.27381
| 59
|
py
|
normalizing_flows
|
normalizing_flows-master/datasets/bsds300.py
|
import numpy as np
import h5py
import matplotlib.pyplot as plt
import datasets
import datasets.util
class BSDS300:
"""
A dataset of patches from BSDS300.
"""
class Data:
"""
Constructs the dataset.
"""
def __init__(self, data):
self.x = data[:]
self.N = self.x.shape[0]
def __init__(self):
# load dataset
f = h5py.File(datasets.root + 'BSDS300/BSDS300.hdf5', 'r')
self.trn = self.Data(f['train'])
self.val = self.Data(f['validation'])
self.tst = self.Data(f['test'])
self.n_dims = self.trn.x.shape[1]
self.image_size = [int(np.sqrt(self.n_dims + 1))] * 2
f.close()
def show_pixel_histograms(self, split, pixel=None):
"""
Shows the histogram of pixel values, or of a specific pixel if given.
"""
data_split = getattr(self, split, None)
if data_split is None:
raise ValueError('Invalid data split')
if pixel is None:
data = data_split.x.flatten()
else:
row, col = pixel
idx = row * self.image_size[0] + col
data = data_split.x[:, idx]
n_bins = int(np.sqrt(data_split.N))
fig, ax = plt.subplots(1, 1)
ax.hist(data, n_bins, normed=True)
plt.show()
def show_images(self, split):
"""
Displays the images in a given split.
:param split: string
"""
# get split
data_split = getattr(self, split, None)
if data_split is None:
raise ValueError('Invalid data split')
# add a pixel at the bottom right
last_pixel = -np.sum(data_split.x, axis=1)
images = np.hstack([data_split.x, last_pixel[:, np.newaxis]])
# display images
util.disp_imdata(images, self.image_size, [6, 10])
plt.show()
| 1,905
| 23.435897
| 77
|
py
|
normalizing_flows
|
normalizing_flows-master/datasets/download_celeba.py
|
""" Source -- https://github.com/nperraud/download-celebA-HQ/ """
import requests
import tarfile
import zipfile
import gzip
import os
import hashlib
import sys
from glob import glob
from urllib.request import urlretrieve
from subprocess import Popen
import argparse
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Download celebA helper')
parser.add_argument('path', type=str)
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination, chunk_size=32 * 1024):
total_size = int(response.headers.get('content-length', 0))
with open(destination, "wb") as f:
for chunk in tqdm(
response.iter_content(chunk_size),
total=total_size,
unit='B',
unit_scale=True,
desc=destination):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def require_dir(path):
if not os.path.exists(path):
os.makedirs(path)
return None
def checksum(filename, method='sha1'):
data = open(filename, 'rb').read()
if method == 'sha1':
return hashlib.sha1(data).hexdigest()
elif method == 'md5':
return hashlib.md5(data).hexdigest()
else:
raise ValueError('Invalid method: %s' % method)
return None
def download(url, target_dir, filename=None):
require_dir(target_dir)
if filename is None:
filename = url_filename(url)
filepath = os.path.join(target_dir, filename)
urlretrieve(url, filepath)
return filepath
def url_filename(url):
return url.split('/')[-1].split('#')[0].split('?')[0]
def archive_extract(filepath, target_dir):
target_dir = os.path.abspath(target_dir)
if tarfile.is_tarfile(filepath):
with tarfile.open(filepath, 'r') as tarf:
# Check that no files get extracted outside target_dir
for name in tarf.getnames():
abs_path = os.path.abspath(os.path.join(target_dir, name))
if not abs_path.startswith(target_dir):
raise RuntimeError('Archive tries to extract files '
'outside target_dir.')
tarf.extractall(target_dir)
elif zipfile.is_zipfile(filepath):
with zipfile.ZipFile(filepath, 'r') as zipf:
zipf.extractall(target_dir)
elif filepath[-3:].lower() == '.gz':
with gzip.open(filepath, 'rb') as gzipf:
with open(filepath[:-3], 'wb') as outf:
outf.write(gzipf.read())
elif '.7z' in filepath:
if os.name != 'posix':
raise NotImplementedError('Only Linux and Mac OS X support .7z '
'compression.')
print('Using 7z!!!')
cmd = '7z x {} -o{}'.format(filepath, target_dir)
retval = Popen(cmd, shell=True).wait()
if retval != 0:
raise RuntimeError(
'Archive file extraction failed for {}.'.format(filepath))
elif filepath[-2:].lower() == '.z':
if os.name != 'posix':
raise NotImplementedError('Only Linux and Mac OS X support .Z '
'compression.')
cmd = 'gzip -d {}'.format(filepath)
retval = Popen(cmd, shell=True).wait()
if retval != 0:
raise RuntimeError(
'Archive file extraction failed for {}.'.format(filepath))
else:
raise ValueError('{} is not a supported archive file.'.format(filepath))
def download_and_check(drive_data, path):
save_paths = list()
n_files = len(drive_data["filenames"])
for i in range(n_files):
drive_id = drive_data["drive_ids"][i]
filename = drive_data["filenames"][i]
save_path = os.path.join(path, filename)
require_dir(os.path.dirname(save_path))
print('Downloading {} to {}'.format(filename, save_path))
download_file_from_google_drive(drive_id, save_path)
print('Done!')
if "sha1" in drive_data:
sha1 = drive_data["sha1"][i]
print('Check SHA1 {}'.format(save_path))
if sha1 != checksum(save_path, 'sha1'):
raise RuntimeError('Checksum mismatch for %s.' % save_path)
save_paths.append(save_path)
return save_paths
def download_celabA(dataset_dir):
_IMGS_DRIVE = dict(
filenames = [
'img_celeba.7z.001', 'img_celeba.7z.002', 'img_celeba.7z.003',
'img_celeba.7z.004', 'img_celeba.7z.005', 'img_celeba.7z.006',
'img_celeba.7z.007', 'img_celeba.7z.008', 'img_celeba.7z.009',
'img_celeba.7z.010', 'img_celeba.7z.011', 'img_celeba.7z.012',
'img_celeba.7z.013', 'img_celeba.7z.014'
],
drive_ids = [
'0B7EVK8r0v71pQy1YUGtHeUM2dUE', '0B7EVK8r0v71peFphOHpxODd5SjQ',
'0B7EVK8r0v71pMk5FeXRlOXcxVVU', '0B7EVK8r0v71peXc4WldxZGFUbk0',
'0B7EVK8r0v71pMktaV1hjZUJhLWM', '0B7EVK8r0v71pbWFfbGRDOVZxOUU',
'0B7EVK8r0v71pQlZrOENSOUhkQ3c', '0B7EVK8r0v71pLVltX2F6dzVwT0E',
'0B7EVK8r0v71pVlg5SmtLa1ZiU0k', '0B7EVK8r0v71pa09rcFF4THRmSFU',
'0B7EVK8r0v71pNU9BZVBEMF9KN28', '0B7EVK8r0v71pTVd3R2NpQ0FHaGM',
'0B7EVK8r0v71paXBad2lfSzlzSlk', '0B7EVK8r0v71pcTFwT1VFZzkzZk0'
]
)
_ALIGNED_IMGS_DRIVE = dict(
filenames = [
'img_align_celeba.zip'
],
drive_ids = [
'0B7EVK8r0v71pZjFTYXZWM3FlRnM'
],
sha1 = [
'b7e1990e1f046969bd4e49c6d804b93cd9be1646'
]
)
_PARTITIONS_DRIVE = dict(
filenames = [
'Eval/list_eval_partition.txt'
],
drive_ids = [
'0B7EVK8r0v71pY0NSMzRuSXJEVkk'
],
sha1 = [
'fb3d89825c49a2d389601eacb10d73815fd3c52d'
]
)
_ALIGNED_ATTRIBUTES_DRIVE = dict(
filenames = [
'Anno/list_attr_celeba.txt'
],
drive_ids = [
'0B7EVK8r0v71pblRyaVFSWGxPY0U'
],
sha1 = [
'225788ff6c9d0b96dc21144147456e0388195617'
]
)
_ATTRIBUTES_DRIVE = dict(
filenames = [
'Anno/list_landmarks_celeba.txt'
],
drive_ids = [
'0B7EVK8r0v71pTzJIdlJWdHczRlU'
],
sha1 = [
'ea255cd0ffe98ca88bff23767f7a5ece7710db57'
]
)
n_imgs = 202599
img_dir_align = os.path.join(dataset_dir, 'Img', 'img_align_celeba')
img_dir = os.path.join(dataset_dir, 'Img', 'img_celeba')
filepaths = download_and_check(_ALIGNED_IMGS_DRIVE, dataset_dir)
filepath = filepaths[0]
print('Extract archive {}'.format(filepath))
archive_extract(filepath, os.path.join(dataset_dir, 'Img'))
print('Done!')
os.remove(filepath)
n_imgsd = sum([1 for file in os.listdir(img_dir_align) if file[-4:] == '.jpg'])
assert (n_imgsd == n_imgs)
filepaths = download_and_check(_PARTITIONS_DRIVE, dataset_dir)
filepaths = download_and_check(_ATTRIBUTES_DRIVE, dataset_dir)
filepaths = download_and_check(_ALIGNED_ATTRIBUTES_DRIVE, dataset_dir)
filepaths = download_and_check(_IMGS_DRIVE, dataset_dir)
filepath = filepaths[0]
print('Extract archive {}'.format(filepath))
archive_extract(filepath, os.path.join(dataset_dir, 'Img'))
print('Done!')
for filepath in filepaths:
print('Remove: {}'.format(filepath))
os.remove(filepath)
n_imgsd = len(glob(os.path.join(img_dir, '*.jpg')))
assert (n_imgsd == n_imgs)
return True
if __name__ == '__main__':
args = parser.parse_args()
dirpath = args.path
dataset_dir = os.path.join(dirpath, 'CelebA')
download_celabA(dataset_dir)
| 8,603
| 32.478599
| 83
|
py
|
normalizing_flows
|
normalizing_flows-master/datasets/util.py
|
"""
Select dataset functions from MAF repo
https://github.com/gpapamak/maf/blob/master/util.py
"""
import numpy as np
import matplotlib.pyplot as plt
def plot_hist_marginals(data, lims=None, gt=None):
"""
Plots marginal histograms and pairwise scatter plots of a dataset.
"""
n_bins = int(np.sqrt(data.shape[0]))
if data.ndim == 1:
fig, ax = plt.subplots(1, 1)
ax.hist(data, n_bins, normed=True)
ax.set_ylim([0, ax.get_ylim()[1]])
if lims is not None: ax.set_xlim(lims)
if gt is not None: ax.vlines(gt, 0, ax.get_ylim()[1], color='r')
else:
n_dim = data.shape[1]
fig, ax = plt.subplots(n_dim, n_dim)
ax = np.array([[ax]]) if n_dim == 1 else ax
if lims is not None:
lims = np.asarray(lims)
lims = np.tile(lims, [n_dim, 1]) if lims.ndim == 1 else lims
for i in xrange(n_dim):
for j in xrange(n_dim):
if i == j:
ax[i, j].hist(data[:, i], n_bins, normed=True)
ax[i, j].set_ylim([0, ax[i, j].get_ylim()[1]])
if lims is not None: ax[i, j].set_xlim(lims[i])
if gt is not None: ax[i, j].vlines(gt[i], 0, ax[i, j].get_ylim()[1], color='r')
else:
ax[i, j].plot(data[:, i], data[:, j], 'k.', ms=2)
if lims is not None:
ax[i, j].set_xlim(lims[i])
ax[i, j].set_ylim(lims[j])
if gt is not None: ax[i, j].plot(gt[i], gt[j], 'r.', ms=8)
plt.show(block=False)
return fig, ax
def one_hot_encode(labels, n_labels):
"""
Transforms numeric labels to 1-hot encoded labels. Assumes numeric labels are in the range 0, 1, ..., n_labels-1.
"""
assert np.min(labels) >= 0 and np.max(labels) < n_labels
y = np.zeros([labels.size, n_labels])
y[range(labels.size), labels] = 1
return y
def logit(x):
"""
Elementwise logit (inverse logistic sigmoid).
:param x: numpy array
:return: numpy array
"""
return np.log(x / (1.0 - x))
| 2,135
| 27.864865
| 117
|
py
|
normalizing_flows
|
normalizing_flows-master/datasets/miniboone.py
|
import numpy as np
import matplotlib.pyplot as plt
import datasets
import datasets.util
class MINIBOONE:
class Data:
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
file = datasets.root + 'miniboone/data.npy'
trn, val, tst = load_data_normalised(file)
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
def show_histograms(self, split, vars):
data_split = getattr(self, split, None)
if data_split is None:
raise ValueError('Invalid data split')
util.plot_hist_marginals(data_split.x[:, vars])
plt.show()
def load_data(root_path):
# NOTE: To remember how the pre-processing was done.
# data = pd.read_csv(root_path, names=[str(x) for x in range(50)], delim_whitespace=True)
# print data.head()
# data = data.as_matrix()
# # Remove some random outliers
# indices = (data[:, 0] < -100)
# data = data[~indices]
#
# i = 0
# # Remove any features that have too many re-occuring real values.
# features_to_remove = []
# for feature in data.T:
# c = Counter(feature)
# max_count = np.array([v for k, v in sorted(c.iteritems())])[0]
# if max_count > 5:
# features_to_remove.append(i)
# i += 1
# data = data[:, np.array([i for i in range(data.shape[1]) if i not in features_to_remove])]
# np.save("~/data/miniboone/data.npy", data)
data = np.load(root_path)
N_test = int(0.1*data.shape[0])
data_test = data[-N_test:]
data = data[0:-N_test]
N_validate = int(0.1*data.shape[0])
data_validate = data[-N_validate:]
data_train = data[0:-N_validate]
return data_train, data_validate, data_test
def load_data_normalised(root_path):
data_train, data_validate, data_test = load_data(root_path)
data = np.vstack((data_train, data_validate))
mu = data.mean(axis=0)
s = data.std(axis=0)
data_train = (data_train - mu)/s
data_validate = (data_validate - mu)/s
data_test = (data_test - mu)/s
return data_train, data_validate, data_test
| 2,250
| 26.790123
| 96
|
py
|
normalizing_flows
|
normalizing_flows-master/datasets/__init__.py
|
root = 'data/'
#from .power import POWER
#from .gas import GAS
#from .hepmass import HEPMASS
#from .miniboone import MINIBOONE
#from .bsds300 import BSDS300
#from .toy import TOY
#from .moons import MOONS
#from .mnist import MNIST
#from torchvision.datasets import MNIST, CIFAR10
| 283
| 19.285714
| 48
|
py
|
normalizing_flows
|
normalizing_flows-master/datasets/celeba.py
|
import os
from PIL import Image
import numpy as np
import torch
from torch.utils.data import Dataset
class CelebA(Dataset):
processed_file = 'processed.pt'
partition_file = 'Eval/list_eval_partition.txt'
attr_file = 'Anno/list_attr_celeba.txt'
img_folder = 'Img/img_align_celeba'
attr_names = '5_o_Clock_Shadow Arched_Eyebrows Attractive Bags_Under_Eyes Bald Bangs Big_Lips Big_Nose Black_Hair Blond_Hair Blurry Brown_Hair Bushy_Eyebrows Chubby Double_Chin Eyeglasses Goatee Gray_Hair Heavy_Makeup High_Cheekbones Male Mouth_Slightly_Open Mustache Narrow_Eyes No_Beard Oval_Face Pale_Skin Pointy_Nose Receding_Hairline Rosy_Cheeks Sideburns Smiling Straight_Hair Wavy_Hair Wearing_Earrings Wearing_Hat Wearing_Lipstick Wearing_Necklace Wearing_Necktie Young'.split()
def __init__(self, root, train=True, transform=None, mini_data_size=None):
self.root = os.path.join(os.path.expanduser(root), self.__class__.__name__)
self.transform = transform
# check if processed
if not os.path.exists(os.path.join(self.root, self.processed_file)):
self._process_and_save()
data = torch.load(os.path.join(self.root, self.processed_file))
if train:
self.data = data['train']
else:
self.data = data['val']
if mini_data_size != None:
self.data = self.data[:mini_data_size]
def __getitem__(self, idx):
filename, attr = self.data[idx]
img = Image.open(os.path.join(self.root, self.img_folder, filename)) # loads in RGB mode
if self.transform is not None:
img = self.transform(img)
attr = torch.from_numpy(attr)
return img, attr
def __len__(self):
return len(self.data)
def _process_and_save(self):
if not os.path.exists(os.path.join(self.root, self.attr_file)):
raise RuntimeError('Dataset attributes file not found at {}.'.format(os.path.join(self.root, self.attr_file)))
if not os.path.exists(os.path.join(self.root, self.partition_file)):
raise RuntimeError('Dataset evaluation partitions file not found at {}.'.format(os.path.join(self.root, self.partition_file)))
if not os.path.isdir(os.path.join(self.root, self.img_folder)):
raise RuntimeError('Dataset image folder not found at {}.'.format(os.path.join(self.root, self.img_folder)))
# read attributes file: list_attr_celeba.txt
# First Row: number of images
# Second Row: attribute names
# Rest of the Rows: <image_id> <attribute_labels>
with open(os.path.join(self.root, self.attr_file), 'r') as f:
lines = f.readlines()
n_files = int(lines[0])
attr = [[l.split()[0], l.split()[1:]] for l in lines[2:]] # [image_id.jpg, <attr_labels>]
assert len(attr) == n_files, \
'Mismatch b/n num entries in attributes file {} and reported num files {}'.format(len(attr), n_files)
# read partition file: list_eval_partition.txt;
# All Rows: <image_id> <evaluation_status>
# "0" represents training image,
# "1" represents validation image,
# "2" represents testing image;
data = [[], [], []] # train, val, test
unmatched = 0
with open(os.path.join(self.root, self.partition_file), 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
fname, split = line.split()
if attr[i][0] != fname:
unmatched += 1
continue
data[int(split)].append([fname, np.array(attr[i][1], dtype=np.float32)]) # [image_id.jpg, <attr_labels>] by train/val/test
if unmatched > 0: print('Unmatched partition filenames to attribute filenames: ', unmatched)
assert sum(len(s) for s in data) == n_files, \
'Mismatch b/n num entries in partition {} and reported num files {}'.format(sum(len(s) for s in filenames), n_files)
# check image folder
filenames = os.listdir(os.path.join(self.root, self.img_folder))
assert len(filenames) == n_files, \
'Mismatch b/n num files in image folder {} and report num files {}'.format(len(filenames), n_files)
# save
data = {'train': data[0], 'val': data[1], 'test': data[2]}
with open(os.path.join(self.root, self.processed_file), 'wb') as f:
torch.save(data, f)
if __name__ == '__main__':
d = CelebA('~/Data/')
print('Length: ', len(d))
print('Image: ', d[0][0])
print('Attr: ', d[0][1])
import timeit
t = timeit.timeit('d[np.random.randint(0,len(d))]', number=1000, globals=globals())
print('Retrieval time: ', t)
import torchvision.transforms as T
import matplotlib.pyplot as plt
n_bits = 5
t = T.Compose([T.CenterCrop(148), # RealNVP preprocessing
T.Resize(64),
T.Lambda(lambda im: np.array(im, dtype=np.float32)), # to numpy
T.Lambda(lambda x: np.floor(x / 2**(8 - n_bits)) / 2**n_bits), # lower bits
T.ToTensor(),
T.Lambda(lambda t: t + torch.rand(t.shape)/ 2**n_bits)]) # dequantize
d_ = CelebA('~/Data/', transform=t)
fig, axs = plt.subplots(1,2)
axs[0].imshow(np.array(d[0][0]))
axs[1].imshow(d_[0][0].numpy().transpose(1,2,0))
plt.show()
| 5,419
| 43.793388
| 490
|
py
|
normalizing_flows
|
normalizing_flows-master/datasets/mnist.py
|
import numpy as np
import gzip
import pickle
import matplotlib.pyplot as plt
import datasets
import datasets.util as util
class MNIST:
"""
The MNIST dataset of handwritten digits.
"""
alpha = 1.0e-6
class Data:
"""
Constructs the dataset.
"""
def __init__(self, data, logit, dequantize, rng):
x = self._dequantize(data[0], rng) if dequantize else data[0] # dequantize pixels
self.x = self._logit_transform(x) if logit else x # logit
self.labels = data[1] # numeric labels
self.y = util.one_hot_encode(self.labels, 10) # 1-hot encoded labels
self.N = self.x.shape[0] # number of datapoints
@staticmethod
def _dequantize(x, rng):
"""
Adds noise to pixels to dequantize them.
"""
return x + rng.rand(*x.shape) / 256.0
@staticmethod
def _logit_transform(x):
"""
Transforms pixel values with logit to be unconstrained.
"""
return util.logit(MNIST.alpha + (1 - 2*MNIST.alpha) * x)
def __init__(self, logit=True, dequantize=True):
# load dataset
f = gzip.open(datasets.root + 'uci_mnist/mnist.pkl.gz', 'rb')
trn, val, tst = pickle.load(f, encoding='latin1')
f.close()
rng = np.random.RandomState(42)
self.trn = self.Data(trn, logit, dequantize, rng)
self.val = self.Data(val, logit, dequantize, rng)
self.tst = self.Data(tst, logit, dequantize, rng)
im_dim = int(np.sqrt(self.trn.x.shape[1]))
self.n_dims = (1, im_dim, im_dim)
self.n_labels = self.trn.y.shape[1]
self.image_size = [im_dim, im_dim]
def show_pixel_histograms(self, split, pixel=None):
"""
Shows the histogram of pixel values, or of a specific pixel if given.
"""
data_split = getattr(self, split, None)
if data_split is None:
raise ValueError('Invalid data split')
if pixel is None:
data = data_split.x.flatten()
else:
row, col = pixel
idx = row * self.image_size[0] + col
data = data_split.x[:, idx]
n_bins = int(np.sqrt(data_split.N))
fig, ax = plt.subplots(1, 1)
ax.hist(data, n_bins, normed=True)
plt.show()
def show_images(self, split):
"""
Displays the images in a given split.
:param split: string
"""
# get split
data_split = getattr(self, split, None)
if data_split is None:
raise ValueError('Invalid data split')
# display images
util.disp_imdata(data_split.x, self.image_size, [6, 10])
plt.show()
| 2,886
| 28.459184
| 97
|
py
|
SpinalNet
|
SpinalNet-master/Regression/Regression_NN_and_SpinalNet.py
|
# -*- coding: utf-8 -*-
"""
This script performs regression on toy datasets.
There exist several relations between inputs and output.
We investigate both of the traditional feed-forward and SpinalNet
for all of these input-output relations.
----------
Multiplication:
y = x1*x2*x3*x4*x5*x6*x7*x8 + 0.2*torch.rand(x1.size())
Spinal
Epoch [100/200], Loss: 0.0573, Minimum Loss 0.003966
Epoch [200/200], Loss: 0.0170, Minimum Loss 0.002217
Normal
Epoch [100/200], Loss: 0.0212, Minimum Loss 0.003875
Epoch [200/200], Loss: 0.0373, Minimum Loss 0.003875
Sine multiplication:
y = torch.sin(x1*x2*x3*x4*x5*x6*x7*x8) + 0.2*torch.rand(x1.size())
Spinal
Epoch [100/200], Loss: 0.0013, Minimum Loss 0.000910
Epoch [200/200], Loss: 0.0023, Minimum Loss 0.000910
Normal
Epoch [100/200], Loss: 0.0090, Minimum Loss 0.003403
Epoch [200/200], Loss: 0.0041, Minimum Loss 0.001554
Addition:
y = (x1+x2+x3+x4+x5+x6+x7+x8) + 0.2*torch.rand(x1.size())
Spinal
Epoch [100/200], Loss: 0.0038, Minimum Loss 0.001007
Epoch [200/200], Loss: 0.0022, Minimum Loss 0.000855
Normal
Epoch [100/200], Loss: 0.0024, Minimum Loss 0.001178
Epoch [200/200], Loss: 0.0021, Minimum Loss 0.000887
Sine Addition:
y = torch.sin(x1+x2+x3+x4+x5+x6+x7+x8) + 0.2*torch.rand(x1.size())
Spinal
Epoch [100/200], Loss: 0.0254, Minimum Loss 0.001912
Epoch [200/200], Loss: 0.0029, Minimum Loss 0.001219
Normal
Epoch [100/200], Loss: 0.0019, Minimum Loss 0.001918
Epoch [200/200], Loss: 0.0038, Minimum Loss 0.001086
@author: Dipu
"""
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torch.utils.data as Data
import matplotlib.pyplot as plt
import numpy as np
#import imageio
torch.manual_seed(0)
size_x=1000
x1 = torch.unsqueeze(torch.randn(size_x), dim=1)
x2 = torch.unsqueeze(torch.randn(size_x), dim=1)
x3 = torch.unsqueeze(torch.randn(size_x), dim=1)
x4 = torch.unsqueeze(torch.randn(size_x), dim=1)
x5 = torch.unsqueeze(torch.randn(size_x), dim=1)
x6 = torch.unsqueeze(torch.randn(size_x), dim=1)
x7 = torch.unsqueeze(torch.randn(size_x), dim=1)
x8 = torch.unsqueeze(torch.randn(size_x), dim=1)
half_in_size=4
y = (x1*x2*x3*x4*x5*x6*x7*x8) + 0.2*torch.rand(size_x)
# noisy y data (tensor), shape=(100, 1)
x=torch.cat([x1,x2,x3,x4,x5,x6,x7,x8], dim=1)
x, y = Variable(x), Variable(y)
# another way to define a network
net = torch.nn.Sequential(
torch.nn.Linear(half_in_size*2, 200),
torch.nn.LeakyReLU(),
torch.nn.Linear(200, 100),
torch.nn.LeakyReLU(),
torch.nn.Linear(100, 1),
)
import torch.nn as nn
first_HL = 50
class SpinalNet(nn.Module):
def __init__(self):
super(SpinalNet, self).__init__()
self.lru = nn.LeakyReLU()
self.fc1 = nn.Linear(half_in_size, first_HL)
self.fc2 = nn.Linear(half_in_size+first_HL, first_HL)
self.fc3 = nn.Linear(half_in_size+first_HL, first_HL)
self.fc4 = nn.Linear(half_in_size+first_HL, first_HL)
self.fc5 = nn.Linear(half_in_size+first_HL, first_HL)
self.fc6 = nn.Linear(half_in_size+first_HL, first_HL)
self.fcx = nn.Linear(first_HL*6, 1)
def forward(self, x):
x1 = x[:,0:half_in_size]
x1 = self.lru(self.fc1(x1))
x2= torch.cat([ x[:,half_in_size:half_in_size*2], x1], dim=1)
x2 = self.lru(self.fc2(x2))
x3= torch.cat([x[:,0:half_in_size], x2], dim=1)
x3 = self.lru(self.fc3(x3))
x4= torch.cat([x[:,half_in_size:half_in_size*2], x3], dim=1)
x4 = self.lru(self.fc4(x4))
x5= torch.cat([x[:,0:half_in_size], x4], dim=1)
x5 = self.lru(self.fc3(x5))
x6= torch.cat([x[:,half_in_size:half_in_size*2], x5], dim=1)
x6 = self.lru(self.fc4(x6))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = self.fcx(x)
return x
#------------------------------------------------------------------------------
"""
Comment these two lines for traditional NN training.
"""
net = SpinalNet()
print('SpinalNet')
#------------------------------------------------------------------------------
optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
loss_func = torch.nn.MSELoss() # this is for regression mean squared loss
BATCH_SIZE = 64
EPOCH = 200
torch_dataset = Data.TensorDataset(x, y)
loader = Data.DataLoader(
dataset=torch_dataset,
batch_size=BATCH_SIZE,
shuffle=True, num_workers=0,)
min_loss =100
# start training
for epoch in range(EPOCH):
for step, (batch_x, batch_y) in enumerate(loader): # for each training step
b_x = Variable(batch_x)
b_y = Variable(batch_y)
prediction = net(b_x) # input x and predict based on x
loss = loss_func(prediction, b_y) # must be (1. nn output, 2. target)
optimizer.zero_grad() # clear gradients for next train
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
loss = loss.item()
if loss<min_loss:
min_loss = loss
net_opt = net
if epoch%100 == 99:
print ("Epoch [{}/{}], Loss: {:.4f}, Minimum Loss {:.6f}" .format(epoch+1, EPOCH, loss, min_loss))
| 5,420
| 28.302703
| 105
|
py
|
SpinalNet
|
SpinalNet-master/Transfer Learning/Transfer_Learning_hymenoptera.py
|
'''
Most part of the code and dataset is copied from PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'data/hymenoptera_data'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=0)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
#%%
half_in_size = 256
first_HL = 5
class SpinalNet(nn.Module):
def __init__(self):
super(SpinalNet, self).__init__()
self.lru = nn.LeakyReLU()
self.fc1 = nn.Linear(half_in_size, first_HL)
self.fc2 = nn.Linear(half_in_size+first_HL, first_HL)
self.fc3 = nn.Linear(half_in_size+first_HL, first_HL)
self.fc4 = nn.Linear(half_in_size+first_HL, first_HL)
self.fc5 = nn.Linear(half_in_size+first_HL, first_HL)
self.fc6 = nn.Linear(half_in_size+first_HL, first_HL)
self.fcx = nn.Linear(first_HL*6, 2)
def forward(self, x):
x1 = x[:,0:half_in_size]
x1 = self.lru(self.fc1(x1))
x2= torch.cat([ x[:,half_in_size:half_in_size*2], x1], dim=1)
x2 = self.lru(self.fc2(x2))
x3= torch.cat([x[:,0:half_in_size], x2], dim=1)
x3 = self.lru(self.fc3(x3))
x4= torch.cat([x[:,half_in_size:half_in_size*2], x3], dim=1)
x4 = self.lru(self.fc4(x4))
x5= torch.cat([x[:,0:half_in_size], x4], dim=1)
x5 = self.lru(self.fc3(x5))
x6= torch.cat([x[:,half_in_size:half_in_size*2], x5], dim=1)
x6 = self.lru(self.fc4(x6))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = self.fcx(x)
return x
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def visualize_model(model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
'''
Changing the fully connected layer to SpinalNet
'''
#model_ft.fc = nn.Linear(num_ftrs, 2)
model_ft.fc = SpinalNet()
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=25)
| 7,504
| 29.384615
| 78
|
py
|
SpinalNet
|
SpinalNet-master/Transfer Learning/Transfer_Learning_STL10.py
|
'''
We write this code with the help of PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
Data is downloaded from pytorch and divided into folders
using script 'Pytorch_data_to_folders.py'
Effects:
transforms.Resize((272,272)),
transforms.RandomRotation(15,),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(),
wide_resnet101_2 Spinal FC gives 98.23% test accuracy
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((272,272)),
transforms.RandomRotation(15,),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
data_dir = 'data/stl10'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=24,
shuffle=True, num_workers=0)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
#%%
# model_ft = models.vgg19_bn(pretrained=True)
# num_ftrs = model_ft.classifier[0].in_features
model_ft = models.wide_resnet101_2(pretrained=True)
num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 512
Num_class=10
class SpinalNet(nn.Module):
def __init__(self):
super(SpinalNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
net_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Time from Start {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
'''
Changing the fully connected layer to SpinalNet
'''
#model_ft.fc = nn.Linear(num_ftrs, 10)
model_ft.fc = SpinalNet()
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=20)
| 7,704
| 30.068548
| 93
|
py
|
SpinalNet
|
SpinalNet-master/Transfer Learning/Transfer_Learning_CIFAR100.py
|
'''
We write this code with the help of PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
Dataset is distributed in folders with following script:
https://au.mathworks.com/matlabcentral/answers/329597-save-cifar-100-images
Performances:
Data augmentation:
transforms.Resize((136,136)),
transforms.RandomRotation(10,),
transforms.RandomCrop(128),
Cifar-10
resnet101 Spinal FC (1024*4 neurons) gives 97.03% Accuracy
Cifar-100
resnet101 Spinal FC (512*4 neurons) gives 84.04% Accuracy
Data augmentation:
transforms.Resize((272,272)),
transforms.RandomRotation(15,),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(),
Cifar-10
wide_resnet101_2 Spinal FC (512*4 neurons) gives 98.12% Accuracy
Cifar-100
wide_resnet101_2 Spinal FC (512*4 neurons) gives 88.34% Accuracy
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((272,272)),
transforms.RandomRotation(15,),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
data_dir = 'data/cifar10'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=24,
shuffle=True, num_workers=0)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
#%%
# model_ft = models.vgg19_bn(pretrained=True)
# num_ftrs = model_ft.classifier[0].in_features
model_ft = models.wide_resnet101_2(pretrained=True)
num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 512
Num_class=100
class SpinalNet(nn.Module):
def __init__(self):
super(SpinalNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
net_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Time from Start {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
'''
Changing the fully connected layer to SpinalNet
'''
#model_ft.fc = nn.Linear(num_ftrs, 10)
model_ft.fc = SpinalNet()
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=50)
| 8,199
| 28.818182
| 93
|
py
|
SpinalNet
|
SpinalNet-master/Transfer Learning/Transfer_Learning_CIFAR10.py
|
'''
We write this code with the help of PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
The dataset is downloaded from https://www.kaggle.com/swaroopkml/cifar10-pngs-in-folders
Performances:
Data augmentation:
transforms.Resize((272,272)),
transforms.RandomRotation(15,),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(),
Cifar-10
wide_resnet101_2 gives 98.22% Accuracy
wide_resnet101_2 Spinal FC (20*4 neurons dropout_bn) gives 98.12% Accuracy
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((272,272)),
transforms.RandomRotation(15,),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
data_dir = 'data/cifar10'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=28,
shuffle=True, num_workers=0)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
#%%
model_ft = models.vgg19_bn(pretrained=True)
num_ftrs = model_ft.classifier[0].in_features
# model_ft = models.wide_resnet101_2(pretrained=True)
# num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 20 #Small for Resnet, large for VGG
Num_class=10
class SpinalNet_ResNet(nn.Module):
def __init__(self):
super(SpinalNet_ResNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
class SpinalNet_VGG(nn.Module):
def __init__(self):
super(SpinalNet_VGG, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
VGG_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
'''
Changing the fully connected layer to SpinalNet or VGG or ResNet
'''
#model_ft.fc = nn.Linear(num_ftrs, 10)
model_ft.fc = SpinalNet_VGG() #SpinalNet_VGG
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Time from Start {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=50)
| 9,644
| 30.314935
| 93
|
py
|
SpinalNet
|
SpinalNet-master/Transfer Learning/Transfer_Learning_SVHN.py
|
'''
Data is downloaded from pytorch and divided into folders
using script 'Pytorch_data_to_folders.py'
Effects:
transforms.Resize((272,320)),
transforms.RandomRotation(15,),
transforms.CenterCrop(272),
transforms.RandomCrop(256),
transforms.ToTensor(),
wide_resnet101_2 Spinal ResNet FC gives 97.87% test accuracy
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((272,320)),
transforms.RandomRotation(15,),
transforms.CenterCrop(272),
transforms.RandomCrop(256),
transforms.ToTensor(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'val': transforms.Compose([
transforms.Resize((272,320)),
transforms.CenterCrop((256,256)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
data_dir = 'data/SVHN'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=28,
shuffle=True, num_workers=0)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
#%%
model_ft = models.vgg19_bn(pretrained=True)
num_ftrs = model_ft.classifier[0].in_features
# model_ft = models.wide_resnet101_2(pretrained=True)
# num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 20 #Small for Resnet, large for VGG
Num_class=10
class SpinalNet_ResNet(nn.Module):
def __init__(self):
super(SpinalNet_ResNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
class SpinalNet_VGG(nn.Module):
def __init__(self):
super(SpinalNet_VGG, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
VGG_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
'''
Changing the fully connected layer to SpinalNet or VGG or ResNet
'''
#model_ft.fc = nn.Linear(num_ftrs, 10)
model_ft.fc = SpinalNet_VGG() #SpinalNet_VGG
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Time from Start {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=50)
| 9,514
| 30.611296
| 93
|
py
|
SpinalNet
|
SpinalNet-master/Transfer Learning/Transfer_Learning_CINIC10.py
|
'''
We write this code with the help of PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
The Dataset is downloaded from https://www.kaggle.com/mengcius/cinic10
Effects:
transforms.Resize((272,272)),
transforms.RandomRotation(15,),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(),
wide_resnet101_2 Spinal FC gives 93.60% test accuracy
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((456,456)),
transforms.RandomRotation(15,),
transforms.RandomCrop(448),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.47889522, 0.47227842, 0.43047404], std=[0.24205776, 0.23828046, 0.25874835])
]),
'val': transforms.Compose([
transforms.Resize((448,448)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.47889522, 0.47227842, 0.43047404], std=[0.24205776, 0.23828046, 0.25874835])
]),
'test': transforms.Compose([
transforms.Resize((448,448)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.47889522, 0.47227842, 0.43047404], std=[0.24205776, 0.23828046, 0.25874835])
]),
}
data_dir = 'data/cinic10'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val', 'test']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=9,
shuffle=True, num_workers=0)
for x in ['train', 'val', 'test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val', 'test']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
#%%
# model_ft = models.vgg19_bn(pretrained=True)
# num_ftrs = model_ft.classifier[0].in_features
model_ft = models.wide_resnet101_2(pretrained=True)
num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width_vgg = 1024 #Small for Resnet, large for VGG
layer_width_res = 20 #Small for Resnet, large for VGG
Num_class=10
class SpinalNet_ResNet(nn.Module):
def __init__(self):
super(SpinalNet_ResNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size, layer_width_res),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width_res, layer_width_res),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width_res, layer_width_res),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width_res, layer_width_res),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(layer_width_res*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
class SpinalNet_VGG(nn.Module):
def __init__(self):
super(SpinalNet_VGG, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width_vgg),
nn.BatchNorm1d(layer_width_vgg), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width_vgg, layer_width_vgg),
nn.BatchNorm1d(layer_width_vgg),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width_vgg, layer_width_vgg),
nn.BatchNorm1d(layer_width_vgg),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width_vgg, layer_width_vgg),
nn.BatchNorm1d(layer_width_vgg),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(layer_width_vgg*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
VGG_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
'''
Changing the fully connected layer to SpinalNet or VGG or ResNet
'''
#model_ft.fc = nn.Linear(num_ftrs, Num_class)
#model_ft.fc = SpinalNet_VGG #SpinalNet_ResNet() #SpinalNet_VGG
model_ft.fc = SpinalNet_ResNet()
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
test_token=0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val', 'test']:
'''
Test when a better validation result is found
'''
if test_token ==0 and phase == 'test' and epoch>8:
continue
test_token =0
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
test_token =1
time_elapsed = time.time() - since
print('Time from start {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.01, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=20)
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=20)
| 10,712
| 31.761468
| 113
|
py
|
SpinalNet
|
SpinalNet-master/Transfer Learning/Transfer_Learning_Caltech101.py
|
'''
We write this code with the help of PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
Dataset is Downloaded from https://www.kaggle.com/huangruichu/caltech101/version/2
Effects:
transforms.Resize((230,230)),
transforms.RandomRotation(15,),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
wide_resnet101_2 provides 96.11% test accuracy
wide_resnet101_2 SpinalNet_SpinalNet provides 96.40% test accuracy
wide_resnet101_2 SpinalNet_VGG provides 96.87% test accuracy
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((230,230)),
transforms.RandomRotation(15,),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'val': transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'test': transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
data_dir = 'data/Caltech101'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val', 'test']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=30,
shuffle=True, num_workers=0)
for x in ['train', 'val', 'test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val', 'test']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
#%%
# model_ft = models.vgg19_bn(pretrained=True)
# num_ftrs = model_ft.classifier[0].in_features
model_ft = models.wide_resnet101_2(pretrained=True)
num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 1024 #Slall for Resnet, large for VGG
Num_class=101
class SpinalNet_ResNet(nn.Module):
def __init__(self):
super(SpinalNet_ResNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
class SpinalNet_VGG(nn.Module):
def __init__(self):
super(SpinalNet_VGG, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
VGG_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
'''
Changing the fully connected layer to SpinalNet or VGG or ResNet
'''
model_ft.fc = nn.Linear(num_ftrs, 101)
#model_ft.fc = SpinalNet_ResNet() #SpinalNet_VGG
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
test_token=0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val', 'test']:
'''
Test when a better validation result is found
'''
if test_token ==0 and phase == 'test':
continue
test_token =0
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
test_token =1
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=20)
| 10,203
| 30.788162
| 93
|
py
|
SpinalNet
|
SpinalNet-master/Transfer Learning/Pytorch_data_to_folders.py
|
# -*- coding: utf-8 -*-
"""
We need to create train and val folders manually before running the script
@author: Dipu
"""
import torchvision
import matplotlib
import matplotlib.pyplot as plt
import numpy
import imageio
import os
data_train = torchvision.datasets.SVHN('./data', split='train', download=True,
transform=torchvision.transforms.Compose([
]))
folderlocation = './data/SVHN/'
for iter1 in range(10): # 10 = number of classes
path = folderlocation + 'train/'+str(iter1)
if not os.path.exists(path):
os.mkdir(path)
path = folderlocation + 'val/'+str(iter1)
if not os.path.exists(path):
os.mkdir(path)
for iter1 in range(len(data_train)):
x, a = data_train[iter1]
imageio.imwrite(folderlocation + 'train/'+str(a)+'/train'+str(iter1)+'.png', x)
data_test = torchvision.datasets.SVHN('./data', split='test', download=True,
transform=torchvision.transforms.Compose([
]))
for iter1 in range(len(data_test)):
x, a = data_test[iter1]
imageio.imwrite(folderlocation + 'val/'+str(a)+'/test'+str(iter1)+'.png', x)
| 1,195
| 28.170732
| 83
|
py
|
SpinalNet
|
SpinalNet-master/Transfer Learning/Transfer_Learning_Fruits360.py
|
'''
We write this code with the help of PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
Dataset is Downloaded from https://www.kaggle.com/moltean/fruits
Effects:
transforms.Resize((140,140)),
transforms.RandomRotation(15,),
transforms.RandomResizedCrop(128),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
Example output in Kaggle:
https://www.kaggle.com/dipuk0506/spinalnet-fruit360-99-99-accuracy
In that example, we got 99.99% Accuracy.
In one training session, we got 100% accuracy.
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'Training': transforms.Compose([
transforms.Resize((140,140)),
transforms.RandomRotation(15,),
transforms.RandomResizedCrop(128),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'Test': transforms.Compose([
transforms.Resize(128),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
data_dir = '../input/fruits/fruits-360'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['Training', 'Test']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=224,
shuffle=True, num_workers=0)
for x in ['Training', 'Test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['Training', 'Test']}
class_names = image_datasets['Training'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['Training']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
# model_ft = models.vgg19_bn(pretrained=True)
# num_ftrs = model_ft.classifier[0].in_features
model_ft = models.wide_resnet101_2(pretrained=True)
num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 131 #Small for Resnet, large for VGG
Num_class=131
class SpinalNet_ResNet(nn.Module):
def __init__(self):
super(SpinalNet_ResNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
class SpinalNet_VGG(nn.Module):
def __init__(self):
super(SpinalNet_VGG, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
VGG_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
'''
Changing the fully connected layer to SpinalNet or VGG or ResNet
'''
#model_ft.fc = nn.Linear(num_ftrs, 10)
model_ft.fc = SpinalNet_ResNet() #SpinalNet_VGG
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase
for phase in ['Training', 'Test']:
if phase == 'Training':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'Training'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'Training':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'Training':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'Test' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Time from Start {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.01, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=20)
| 9,786
| 32.064189
| 93
|
py
|
SpinalNet
|
SpinalNet-master/Transfer Learning/Transfer_Learning_Stanford_Cars.py
|
'''
Stanford Cars
We write this code with the help of PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
Dataset is downloaded from https://www.kaggle.com/jutrera/stanford-car-dataset-by-classes-folder?
Effect:
transforms.Resize((456,456)),
transforms.RandomRotation(15,),
transforms.RandomCrop(448),
transforms.RandomHorizontalFlip(),
196 classes
wide_resnet101_2 Spinal ResNet FC gives 93.35% test accuracy
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((456,456)),
transforms.RandomRotation(15,),
transforms.RandomCrop(448),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'val': transforms.Compose([
transforms.Resize((448,448)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
data_dir = 'data/car_data'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=9,
shuffle=True, num_workers=0)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
#%%
# model_ft = models.vgg19_bn(pretrained=True)
# num_ftrs = model_ft.classifier[0].in_features
model_ft = models.wide_resnet101_2(pretrained=True)
num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 196 #Small for Resnet, large for VGG
Num_class=196
class SpinalNet_ResNet(nn.Module):
def __init__(self):
super(SpinalNet_ResNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
class SpinalNet_VGG(nn.Module):
def __init__(self):
super(SpinalNet_VGG, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
VGG_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
'''
Changing the fully connected layer to SpinalNet or VGG or ResNet
'''
#model_ft.fc = nn.Linear(num_ftrs, Num_class)
model_ft.fc = SpinalNet_ResNet() #SpinalNet_VGG
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Time from Start {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=25)
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.0001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=25)
| 9,929
| 30.52381
| 97
|
py
|
SpinalNet
|
SpinalNet-master/Transfer Learning/Transfer_Learning_Oxford102flower.py
|
'''
We write this code with the help of PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
The dataset is downloaded from https://www.kaggle.com/c/oxford-102-flower-pytorch/data
Effects:
transforms.Resize((464,464)),
transforms.RandomRotation(15,),
transforms.RandomCrop(448),
transforms.RandomHorizontalFlip(),
wide_resnet101_2 gives 99.39% validation accuracy
wide_resnet101_2 SpinalNet_VGG FC gives 99.14% validation accuracy
wide_resnet101_2 SpinalNet_ResNet FC gives 99.30% validation accuracy
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((464,464)),
transforms.RandomRotation(15,),
transforms.RandomCrop(448),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'val': transforms.Compose([
transforms.Resize((448,448)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
data_dir = 'data/Oxford_flower102'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=8,
shuffle=True, num_workers=0)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
#%%
# model_ft = models.vgg19_bn(pretrained=True)
# num_ftrs = model_ft.classifier[0].in_features
model_ft = models.wide_resnet101_2(pretrained=True)
num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 102 #Small for Resnet, large for VGG
Num_class=102
class SpinalNet_ResNet(nn.Module):
def __init__(self):
super(SpinalNet_ResNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
class SpinalNet_VGG(nn.Module):
def __init__(self):
super(SpinalNet_VGG, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
VGG_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
'''
Changing the fully connected layer to SpinalNet or VGG or ResNet
'''
#model_ft.fc = nn.Linear(num_ftrs, Num_class)
model_ft.fc = SpinalNet_ResNet() #SpinalNet_VGG
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time_elapsed = time.time() - since
print('Time from Start {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=50)
| 9,691
| 30.986799
| 93
|
py
|
SpinalNet
|
SpinalNet-master/Transfer Learning/Transfer_Learning_Bird225.py
|
'''
We write this code with the help of PyTorch demo:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
Data Link:
https://www.kaggle.com/gpiosenka/100-bird-species
Version 30
Downloaded on 20/08/2020
Performances:
Data augmentation:
transforms.Resize((230,230)),
transforms.RandomRotation(15,),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
vgg19_bn Spinal FC (1024*4 neurons) gives 98.49% Validation Accuracy 99.02% Corresponding Test Accuracy
vgg19_bn Fc of VGG 4096-two layers gives 98.49% Validation Accuracy 98.67% Corresponding Test Accuracy
wide_resnet101_2 Spinal FC (1024*4 neurons) gives 98.84% Validation Accuracy 99.56% Corresponding Test Accuracy
'''
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((230,230)),
transforms.RandomRotation(15,),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'val': transforms.Compose([
#transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'test': transforms.Compose([
#transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
data_dir = 'data/bird225'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val', 'test']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=24,
shuffle=True, num_workers=0)
for x in ['train', 'val', 'test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val', 'test']}
class_names = image_datasets['train'].classes
device = 'cuda'#torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
#%%
# model_ft = models.vgg19_bn(pretrained=True)
# num_ftrs = model_ft.classifier[0].in_features
model_ft = models.wide_resnet101_2(pretrained=True)
num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 1024
Num_class=225
class SpinalNet(nn.Module):
def __init__(self):
super(SpinalNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
net_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
#%%
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
test_token=0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val', 'test']:
'''
Test when a better validation result is found
'''
if test_token ==0 and phase == 'test':
continue
test_token =0
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
test_token =1
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
'''
Changing the fully connected layer to SpinalNet
'''
#model_ft.fc = nn.Linear(num_ftrs, 10)
model_ft.fc = SpinalNet()
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=25)
| 8,387
| 28.850534
| 120
|
py
|
SpinalNet
|
SpinalNet-master/Transfer Learning/Transfer_Learning_MNIST.py
|
# Execution info: https://www.kaggle.com/dipuk0506/transfer-learning-on-mnist
from __future__ import print_function, division
import matplotlib
import imageio
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
folderlocation = './Data/'
path = folderlocation
if not os.path.exists(path):
os.mkdir(path)
path = folderlocation + 'train/'
if not os.path.exists(path):
os.mkdir(path)
path = folderlocation + 'valid/'
if not os.path.exists(path):
os.mkdir(path)
path = folderlocation + 'test/'
if not os.path.exists(path):
os.mkdir(path)
for iter1 in range(10): # 10 = number of classes
path = folderlocation + 'train/'+str(iter1)
if not os.path.exists(path):
os.mkdir(path)
path = folderlocation + 'valid/'+str(iter1)
if not os.path.exists(path):
os.mkdir(path)
path = folderlocation + 'test/'+str(iter1)
if not os.path.exists(path):
os.mkdir(path)
data_train = torchvision.datasets.MNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
]))
for iter1 in range(len(data_train)):
x, a = data_train[iter1]
if iter1%10 ==0:
imageio.imwrite(folderlocation + 'valid/'+str(a)+'/valid'+str(iter1)+'.png', x)
else:
imageio.imwrite(folderlocation + 'train/'+str(a)+'/train'+str(iter1)+'.png', x)
data_test = torchvision.datasets.MNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
]))
for iter1 in range(len(data_test)):
x, a = data_test[iter1]
imageio.imwrite(folderlocation + 'test/'+str(a)+'/test'+str(iter1)+'.png', x)
model_ft = models.vgg19_bn(pretrained=True)
num_ftrs = model_ft.classifier[0].in_features
#model_ft = models.wide_resnet101_2(pretrained=True)
#num_ftrs = model_ft.fc.in_features
plt.ion() # interactive mode
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((120,120)),
transforms.RandomRotation(10,),
transforms.RandomCrop(112),
transforms.RandomPerspective(),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]),
'valid': transforms.Compose([
transforms.Resize(112),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]),
'test': transforms.Compose([
transforms.Resize(112),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]),
}
data_dir = folderlocation
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'valid', 'test']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=128,
shuffle=True, num_workers=0)
for x in ['train', 'valid', 'test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid', 'test']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)#, title=[class_names[x] for x in classes])
half_in_size = round(num_ftrs/2)
layer_width = 1024
Num_class=10
class SpinalNet(nn.Module):
def __init__(self):
super(SpinalNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(half_in_size+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
class SpinalNet_Resnet(nn.Module):
def __init__(self):
super(SpinalNet_Resnet, self).__init__()
self.layer1 = nn.Linear(half_in_size, layer_width)
self.layer2 = nn.Linear(half_in_size+layer_width, layer_width)
self.layer3 = nn.Linear(half_in_size+layer_width, layer_width)
self.layer4 = nn.Linear(half_in_size+layer_width, layer_width)
self._out = nn.Linear(layer_width*4, Num_class)
def forward(self, x):
x1 = self.layer1(x[:, 0:half_in_size])
x2 = self.layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self._out(x)
return x
net_fc = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, Num_class)
)
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
test_token=0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'valid', 'test']:
'''
Test when a better validation result is found
'''
if test_token ==0 and phase == 'test':
continue
test_token =0
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'valid' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
test_token =1
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
'''
Changing the fully connected layer to SpinalNet
'''
#model_ft.fc = nn.Linear(num_ftrs, 10)
model_ft.classifier = SpinalNet()
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.01, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=10)
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=10)
#Save model
# Specify a path
PATH = "state_dict_model.pt"
# Save
torch.save(model_ft.state_dict(), PATH)
## Load
#model = Net()
#model.load_state_dict(torch.load(PATH))
#model.eval()
import shutil
shutil.rmtree(folderlocation)
| 10,753
| 29.725714
| 93
|
py
|
SpinalNet
|
SpinalNet-master/CIFAR-10/ResNet_default_and_SpinalFC_CIFAR10.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal ResNet code for CIFAR-10.
This code trains both NNs as two different models.
There is option of choosing ResNet18(), ResNet34(), SpinalResNet18(), or
SpinalResNet34().
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
import random
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#device = 'cpu'
# Hyper-parameters
num_epochs = 160
learning_rate = 0.001
torch.manual_seed(0)
random.seed(0)
first_HL = 256
# Image preprocessing modules
# Normalize training set together with augmentation
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
])
# Normalize test set same as training set without augmentation
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
])
# CIFAR-100 dataset
trainset = torchvision.datasets.CIFAR10(root='./data',
train=True,
download=True,
transform=transform_train)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=200, shuffle=True, num_workers=0)
testset = torchvision.datasets.CIFAR10(root='./data',
train=False,
download=True,
transform=transform_test)
test_loader = torch.utils.data.DataLoader(
testset, batch_size=200, shuffle=False, num_workers=0)
def conv3x3(in_channels, out_channels, stride=1):
"""3x3 kernel size with padding convolutional layer in ResNet BasicBlock."""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
class BasicBlock(nn.Module):
"""Basic Block of ReseNet."""
def __init__(self, in_channels, out_channels, stride=1, downsample=None):
"""Basic Block of ReseNet Builder."""
super(BasicBlock, self).__init__()
# First conv3x3 layer
self.conv1 = conv3x3(in_channels, out_channels, stride)
# Batch Normalization
self.bn1 = nn.BatchNorm2d(num_features=out_channels)
# ReLU Activation Function
self.relu = nn.ReLU(inplace=True)
# Second conv3x3 layer
self.conv2 = conv3x3(out_channels, out_channels)
# Batch Normalization
self.bn2 = nn.BatchNorm2d(num_features=out_channels)
# downsample for `residual`
self.downsample = downsample
self.stride = stride
def forward(self, x):
"""Forward Pass of Basic Block."""
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return out
class SpinalResNet(nn.Module):
"""Residual Neural Network."""
def __init__(self, block, duplicates, num_classes=10):
"""Residual Neural Network Builder."""
super(SpinalResNet, self).__init__()
self.in_channels = 32
self.conv1 = conv3x3(in_channels=3, out_channels=32)
self.bn = nn.BatchNorm2d(num_features=32)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout2d(p=0.02)
# block of Basic Blocks
self.conv2_x = self._make_block(block, duplicates[0], out_channels=32)
self.conv3_x = self._make_block(block, duplicates[1], out_channels=64, stride=2)
self.conv4_x = self._make_block(block, duplicates[2], out_channels=128, stride=2)
self.conv5_x = self._make_block(block, duplicates[3], out_channels=256, stride=2)
self.maxpool = nn.MaxPool2d(kernel_size=4, stride=1)
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
#self.fc_layer = nn.Linear(256, num_classes)
self.fc1 = nn.Linear(256, first_HL) #changed from 16 to 8
self.fc1_1 = nn.Linear(256 + first_HL, first_HL) #added
self.fc1_2 = nn.Linear(256 + first_HL, first_HL) #added
self.fc1_3 = nn.Linear(256 + first_HL, first_HL) #added
self.fc_layer = nn.Linear(first_HL*4, num_classes)
# initialize weights
# self.apply(initialize_weights)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal(m.weight.data, mode='fan_out')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_block(self, block, duplicates, out_channels, stride=1):
"""
Create Block in ResNet.
Args:
block: BasicBlock
duplicates: number of BasicBlock
out_channels: out channels of the block
Returns:
nn.Sequential(*layers)
"""
downsample = None
if (stride != 1) or (self.in_channels != out_channels):
downsample = nn.Sequential(
conv3x3(self.in_channels, out_channels, stride=stride),
nn.BatchNorm2d(num_features=out_channels)
)
layers = []
layers.append(
block(self.in_channels, out_channels, stride, downsample))
self.in_channels = out_channels
for _ in range(1, duplicates):
layers.append(block(out_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
"""Forward pass of ResNet."""
out = self.conv1(x)
out = self.bn(out)
out = self.relu(out)
out = self.dropout(out)
# Stacked Basic Blocks
out = self.conv2_x(out)
out = self.conv3_x(out)
out = self.conv4_x(out)
out = self.conv5_x(out)
out1 = self.maxpool2(out)
#print('out1',out1.shape)
out2 = out1[:,:,0,0]
#print('out2',out2.shape)
out2 = out2.view(out2.size(0),-1)
#print('out2',out2.shape)
x1 = out1[:,:,0,0]
x1 = self.relu(self.fc1(x1))
x2= torch.cat([ out1[:,:,0,1], x1], dim=1)
x2 = self.relu(self.fc1_1(x2))
x3= torch.cat([ out1[:,:,1,0], x2], dim=1)
x3 = self.relu(self.fc1_2(x3))
x4= torch.cat([ out1[:,:,1,1], x3], dim=1)
x4 = self.relu(self.fc1_3(x4))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
out = torch.cat([x, x4], dim=1)
out = self.fc_layer(out)
return out
class ResNet(nn.Module):
"""Residual Neural Network."""
def __init__(self, block, duplicates, num_classes=10):
"""Residual Neural Network Builder."""
super(ResNet, self).__init__()
self.in_channels = 32
self.conv1 = conv3x3(in_channels=3, out_channels=32)
self.bn = nn.BatchNorm2d(num_features=32)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout2d(p=0.02)
# block of Basic Blocks
self.conv2_x = self._make_block(block, duplicates[0], out_channels=32)
self.conv3_x = self._make_block(block, duplicates[1], out_channels=64, stride=2)
self.conv4_x = self._make_block(block, duplicates[2], out_channels=128, stride=2)
self.conv5_x = self._make_block(block, duplicates[3], out_channels=256, stride=2)
self.maxpool = nn.MaxPool2d(kernel_size=4, stride=1)
#self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=1)
self.fc_layer = nn.Linear(256, num_classes)
# initialize weights
# self.apply(initialize_weights)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal(m.weight.data, mode='fan_out')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_block(self, block, duplicates, out_channels, stride=1):
"""
Create Block in ResNet.
Args:
block: BasicBlock
duplicates: number of BasicBlock
out_channels: out channels of the block
Returns:
nn.Sequential(*layers)
"""
downsample = None
if (stride != 1) or (self.in_channels != out_channels):
downsample = nn.Sequential(
conv3x3(self.in_channels, out_channels, stride=stride),
nn.BatchNorm2d(num_features=out_channels)
)
layers = []
layers.append(
block(self.in_channels, out_channels, stride, downsample))
self.in_channels = out_channels
for _ in range(1, duplicates):
layers.append(block(out_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
"""Forward pass of ResNet."""
out = self.conv1(x)
out = self.bn(out)
out = self.relu(out)
out = self.dropout(out)
# Stacked Basic Blocks
out = self.conv2_x(out)
out = self.conv3_x(out)
out = self.conv4_x(out)
out = self.conv5_x(out)
out = self.maxpool(out)
out = out.view(out.size(0), -1)
out = out.view(out.size(0), -1)
out = self.fc_layer(out)
return out
model = ResNet(BasicBlock, [1,1,1,1]).to(device)
def ResNet18():
return ResNet(BasicBlock, [2,2,2,2]).to(device)
def SpinalResNet18():
return SpinalResNet(BasicBlock, [2,2,2,2]).to(device)
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3]).to(device)
def SpinalResNet34():
return SpinalResNet(BasicBlock, [3, 4, 6, 3]).to(device)
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = ResNet18().to(device)
model2 = SpinalResNet18().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 =0
#%%
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1> correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),5))
update_lr(optimizer1, curr_lr1)
print('Epoch :{} Accuracy NN: ({:.2f}%), Maximum Accuracy: {:.2f}%'.format(epoch,
100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2> correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),5))
update_lr(optimizer2, curr_lr2)
print('Epoch :{} Accuracy SpinalNet: ({:.2f}%), Maximum Accuracy: {:.2f}%'.format(epoch,
100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 13,289
| 29.906977
| 101
|
py
|
SpinalNet
|
SpinalNet-master/CIFAR-10/VGG_default_and_SpinalFC_CIFAR10.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal VGG code for CIFAR-10.
This code trains both NNs as two different models.
There is option of choosing NN among:
vgg11_bn(), vgg13_bn(), vgg16_bn(), vgg19_bn() and
Spinalvgg11_bn(), Spinalvgg13_bn(), Spinalvgg16_bn(), Spinalvgg19_bn()
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
import random
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#device = 'cpu'
# Hyper-parameters
num_epochs = 200
learning_rate = 0.0001
Half_width =256
layer_width=512
torch.manual_seed(0)
random.seed(0)
# Image preprocessing modules
# Normalize training set together with augmentation
transform = transforms.Compose([
transforms.Pad(4),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32),
transforms.ToTensor()])
# CIFAR-10 dataset
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=trainset,
batch_size=100,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=testset,
batch_size=100,
shuffle=False)
def conv3x3(in_channels, out_channels, stride=1):
"""3x3 kernel size with padding convolutional layer in ResNet BasicBlock."""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
cfg = {
'A' : [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B' : [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D' : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E' : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M']
}
class VGG(nn.Module):
def __init__(self, features, num_class=10):
super().__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, num_class)
)
def forward(self, x):
output = self.features(x)
output = output.view(output.size()[0], -1)
output = self.classifier(output)
return output
class SpinalVGG(nn.Module):
def __init__(self, features, num_class=10):
super().__init__()
self.features = features
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(), nn.Linear(Half_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_out = nn.Sequential(
nn.Dropout(), nn.Linear(layer_width*4, num_class)
)
def forward(self, x):
output = self.features(x)
output = output.view(output.size()[0], -1)
x = output
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
input_channel = 3
for l in cfg:
if l == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
continue
layers += [nn.Conv2d(input_channel, l, kernel_size=3, padding=1)]
if batch_norm:
layers += [nn.BatchNorm2d(l)]
layers += [nn.ReLU(inplace=True)]
input_channel = l
return nn.Sequential(*layers)
def vgg11_bn():
return VGG(make_layers(cfg['A'], batch_norm=True))
def vgg13_bn():
return VGG(make_layers(cfg['B'], batch_norm=True))
def vgg16_bn():
return VGG(make_layers(cfg['D'], batch_norm=True))
def vgg19_bn():
return VGG(make_layers(cfg['E'], batch_norm=True))
def Spinalvgg11_bn():
return SpinalVGG(make_layers(cfg['A'], batch_norm=True))
def Spinalvgg13_bn():
return SpinalVGG(make_layers(cfg['B'], batch_norm=True))
def Spinalvgg16_bn():
return SpinalVGG(make_layers(cfg['D'], batch_norm=True))
def Spinalvgg19_bn():
return SpinalVGG(make_layers(cfg['E'], batch_norm=True))
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = vgg19_bn().to(device)
model2 = Spinalvgg19_bn().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0.0
best_accuracy2 =0.0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
if i == 249:
print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1> correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer1, curr_lr1)
print('Test Accuracy of NN: {} % Best: {} %'.format(100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
net_opt1 = model1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2> correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer2, curr_lr2)
print('Test Accuracy of SpinalNet: {} % Best: {} %'.format(100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
net_opt2 = model2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 8,991
| 28.578947
| 116
|
py
|
SpinalNet
|
SpinalNet-master/CIFAR-10/CNN_dropout_CIFAR10.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the default CNN dropout code for comparison.
The code is collected and changed from:
https://zhenye-na.github.io/2018/09/28/pytorch-cnn-cifar10.html
@author: Dipu
"""
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import random
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
num_epochs = 160
learning_rate = 0.001
torch.manual_seed(0)
random.seed(0)
# Image preprocessing modules
transform = transforms.Compose([
transforms.Pad(4),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32),
transforms.ToTensor()])
# CIFAR-10 dataset
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=trainset,
batch_size=100,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=testset,
batch_size=100,
shuffle=False)
# 3x3 convolution
class CNN(nn.Module):
"""CNN."""
def __init__(self):
"""CNN Builder."""
super(CNN, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
# Conv Layer block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
# Conv Layer block 3
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc_layer = nn.Sequential(
nn.Dropout(p=0.1),
nn.Linear(4096, 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, 512),
nn.ReLU(inplace=True),
nn.Dropout(p=0.1),
nn.Linear(512, 10)
)
def forward(self, x):
"""Perform forward."""
# conv layers
x = self.conv_layer(x)
# flatten
x = x.view(x.size(0), -1)
# fc layer
x = self.fc_layer(x)
return x
model = CNN().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr = learning_rate
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 500 == 0:
print ("Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# Decay learning rate
if (epoch) == 1 or epoch>20:
curr_lr /= 3
update_lr(optimizer, curr_lr)
# Test the model
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the model on the test images: {} %'.format(100 * correct / total))
model.train()
| 4,878
| 27.04023
| 97
|
py
|
SpinalNet
|
SpinalNet-master/CIFAR-10/CNN_dropout_SpinalFC_CIFAR10.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the CNN dropout with Spinal fully-connected layer.
@author: Dipu
"""
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import random
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
num_epochs = 160
learning_rate = 0.001
torch.manual_seed(1)
random.seed(1)
Half_width =2048
layer_width = 128
# Image preprocessing modules
transform = transforms.Compose([
transforms.Pad(4),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32),
transforms.ToTensor()])
# CIFAR-10 dataset
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=trainset,
batch_size=100,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=testset,
batch_size=100,
shuffle=False)
# 3x3 convolution
class SpinalCNN(nn.Module):
"""CNN."""
def __init__(self):
"""CNN Builder."""
super(SpinalCNN, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
# Conv Layer block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
# Conv Layer block 3
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(Half_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_out = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(layer_width*4, 10)
)
def forward(self, x):
"""Perform forward."""
# conv layers
x = self.conv_layer(x)
# flatten
x = x.view(x.size(0), -1)
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
class CNN(nn.Module):
"""CNN."""
def __init__(self):
"""CNN Builder."""
super(CNN, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
# Conv Layer block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
# Conv Layer block 3
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc_layer = nn.Sequential(
nn.Dropout(p=0.1),
nn.Linear(4096, 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, 512),
nn.ReLU(inplace=True),
nn.Dropout(p=0.1),
nn.Linear(512, 10)
)
def forward(self, x):
"""Perform forward."""
# conv layers
x = self.conv_layer(x)
# flatten
x = x.view(x.size(0), -1)
# fc layer
x = self.fc_layer(x)
return x
model = SpinalCNN().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr = learning_rate
best_accuracy =0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 500 == 0:
print ("Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the model on the test images: {} %'.format(100 * correct / total))
if best_accuracy> correct / total:
curr_lr = curr_lr/3
update_lr(optimizer, curr_lr)
model.train()
| 7,532
| 29.746939
| 93
|
py
|
SpinalNet
|
SpinalNet-master/MNIST_VGG/EMNIST_digits_VGG_and _SpinalVGG.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal VGG code for EMNIST(Digits).
This code trains both NNs as two different models.
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torchvision
import torch.nn as nn
import math
import torch.nn.functional as F
import numpy as np
num_epochs = 200
batch_size_train = 100
batch_size_test = 1000
learning_rate = 0.005
momentum = 0.5
log_interval = 500
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='digits', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='digits', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
class VGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(VGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.classifier = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p = 0.5),
nn.Linear(512, num_classes),
)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return F.log_softmax(x, dim=1)
Half_width =128
layer_width =128
class SpinalVGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(SpinalVGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, num_classes),)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return F.log_softmax(x, dim=1)
device = 'cuda'
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = VGG().to(device)
model2 = SpinalVGG().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 =0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
if i == 499:
print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1>= correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer1, curr_lr1)
print('Test Accuracy of NN: {} % Best: {} %'.format(100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
net_opt1 = model1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2>= correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer2, curr_lr2)
print('Test Accuracy of SpinalNet: {} % Best: {} %'.format(100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
net_opt2 = model2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 11,675
| 32.551724
| 116
|
py
|
SpinalNet
|
SpinalNet-master/MNIST_VGG/KMNIST_VGG_and_SpinalVGG.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal VGG code for kMNIST.
This code trains both NNs as two different models.
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torchvision
import torch.nn as nn
import math
import torch.nn.functional as F
import numpy as np
num_epochs = 200
batch_size_train = 100
batch_size_test = 1000
learning_rate = 0.005
momentum = 0.5
log_interval = 500
random_seed = 1
torch.backends.cudnn.enabled = False
torch.manual_seed(random_seed)
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.KMNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.KMNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(train_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
class VGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(VGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.classifier = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p = 0.5),
nn.Linear(512, num_classes),
)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return F.log_softmax(x, dim=1)
Half_width =128
layer_width =128
class SpinalVGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(SpinalVGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, num_classes),)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return F.log_softmax(x, dim=1)
device = 'cuda'
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = VGG().to(device)
model2 = SpinalVGG().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 =0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
if i == 499:
print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1>= correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer1, curr_lr1)
print('Test Accuracy of NN: {} % Best: {} %'.format(100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
net_opt1 = model1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2>= correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer2, curr_lr2)
print('Test Accuracy of SpinalNet: {} % Best: {} %'.format(100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
net_opt2 = model2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 11,721
| 32.301136
| 116
|
py
|
SpinalNet
|
SpinalNet-master/MNIST_VGG/MNIST_VGG_and_SpinalVGG.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal VGG code for MNIST.
This code trains both NNs as two different models.
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torchvision
import torch.nn as nn
import math
import torch.nn.functional as F
import numpy as np
num_epochs = 200
batch_size_train = 100
batch_size_test = 1000
learning_rate = 0.005
momentum = 0.5
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
class VGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(VGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.classifier = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p = 0.5),
nn.Linear(512, num_classes),
)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return F.log_softmax(x, dim=1)
Half_width =128
layer_width =128
class SpinalVGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(SpinalVGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, num_classes),)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return F.log_softmax(x, dim=1)
device = 'cuda'
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = VGG().to(device)
model2 = SpinalVGG().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 =0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
if i == 499:
print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1>= correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer1, curr_lr1)
print('Test Accuracy of NN: {} % Best: {} %'.format(100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
net_opt1 = model1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2>= correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer2, curr_lr2)
print('Test Accuracy of SpinalNet: {} % Best: {} %'.format(100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
net_opt2 = model2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 11,616
| 32.191429
| 116
|
py
|
SpinalNet
|
SpinalNet-master/MNIST_VGG/FashionMNIST_VGG_and _SpinalVGG.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal VGG code for Fashion-MNIST.
This code trains both NNs as two different models.
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torchvision
import torch.nn as nn
import math
import torch.nn.functional as F
import numpy as np
num_epochs = 200
batch_size_train = 100
batch_size_test = 1000
learning_rate = 0.005
momentum = 0.5
log_interval = 500
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.FashionMNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.RandomResizedCrop(28, scale=(0.95,1)),
#torchvision.transforms.RandomCrop(28,2),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.5,), (0.5,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.FashionMNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.5,), (0.5,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(train_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
class VGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(VGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.classifier = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p = 0.5),
nn.Linear(512, num_classes),
)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return F.log_softmax(x, dim=1)
Half_width =128
layer_width =128
class SpinalVGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(SpinalVGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, num_classes),)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return F.log_softmax(x, dim=1)
device = 'cuda'
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = VGG().to(device)
model2 = SpinalVGG().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 =0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
if i == 499:
print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1>= correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer1, curr_lr1)
print('Test Accuracy of NN: {} % Best: {} %'.format(100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
net_opt1 = model1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2>= correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer2, curr_lr2)
print('Test Accuracy of SpinalNet: {} % Best: {} %'.format(100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
net_opt2 = model2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 11,734
| 32.528571
| 116
|
py
|
SpinalNet
|
SpinalNet-master/MNIST_VGG/EMNIST_letters_VGG_and _SpinalVGG.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal VGG code for EMNIST(Letters).
This code trains both NNs as two different models.
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torchvision
import torch.nn as nn
import math
import torch.nn.functional as F
import numpy as np
num_epochs = 200
batch_size_train = 100
batch_size_test = 1000
learning_rate = 0.005
momentum = 0.5
log_interval = 500
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='letters', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='letters', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
class VGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=62):
super(VGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.classifier = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p = 0.5),
nn.Linear(512, num_classes),
)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return F.log_softmax(x, dim=1)
Half_width =128
layer_width =128
class SpinalVGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=62):
super(SpinalVGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, num_classes),)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return F.log_softmax(x, dim=1)
device = 'cuda'
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = VGG().to(device)
model2 = SpinalVGG().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 = 0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
if i == 499:
print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1>= correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer1, curr_lr1)
print('Test Accuracy of NN: {} % Best: {} %'.format(100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
net_opt1 = model1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2>= correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer2, curr_lr2)
print('Test Accuracy of SpinalNet: {} % Best: {} %'.format(100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
net_opt2 = model2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 11,677
| 32.751445
| 116
|
py
|
SpinalNet
|
SpinalNet-master/MNIST_VGG/QMNIST_VGG_and _SpinalVGG.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal VGG code for QMNIST.
This code trains both NNs as two different models.
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torchvision
import torch.nn as nn
import math
import torch.nn.functional as F
import numpy as np
num_epochs = 200
batch_size_train = 100
batch_size_test = 1000
learning_rate = 0.005
momentum = 0.5
log_interval = 500
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.QMNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.QMNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
class VGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(VGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.classifier = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p = 0.5),
nn.Linear(512, num_classes),
)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return F.log_softmax(x, dim=1)
Half_width =128
layer_width =128
class SpinalVGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(SpinalVGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, num_classes),)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return F.log_softmax(x, dim=1)
device = 'cuda'
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = VGG().to(device)
model2 = SpinalVGG().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 =0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
if i == 499:
print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1>= correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer1, curr_lr1)
print('Test Accuracy of NN: {} % Best: {} %'.format(100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
net_opt1 = model1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2>= correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer2, curr_lr2)
print('Test Accuracy of SpinalNet: {} % Best: {} %'.format(100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
net_opt2 = model2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 11,636
| 32.34384
| 116
|
py
|
SpinalNet
|
SpinalNet-master/MNIST/Arch2_Fashion_MNIST.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the SpinalNet Arch2 Fashion MNIST code.
@author: Dipu
"""
import torch
import torchvision
import numpy as np
n_epochs = 200
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.005
momentum = 0.5
log_interval = 500
first_HL =300
max_accuracy= 0.0
random_seed = 1
torch.backends.cudnn.enabled = False
torch.manual_seed(random_seed)
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.FashionMNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.RandomResizedCrop(28, scale=(0.95,1)),
#torchvision.transforms.RandomCrop(28,2),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.5,), (0.5,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.FashionMNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.5,), (0.5,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(train_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
#%%
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(160, first_HL)
self.fc1_1 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_2 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_3 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_4 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_5 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_6 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_7 = nn.Linear(160 + first_HL, first_HL) #added
self.fcp = nn.Linear(720, first_HL)
self.fcp_1 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_2 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_3 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_4 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_5 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_6 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_7 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp2 = nn.Linear(392, first_HL)
self.fcp2_1 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_2 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_3 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_4 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_5 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_6 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_7 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_8 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_9 = nn.Linear(392 + first_HL, first_HL) #added
self.fc2 = nn.Linear(first_HL*26, 50) # changed first_HL from second_HL
self.fc3 = nn.Linear(50, 10) # changed first_HL from second_HL
#self.fc1 = nn.Linear(320, 50)
#self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x_real=x.view(-1, 28*28)
x = self.conv1(x)
x = F.relu(F.max_pool2d(x, 2))
x_conv1 = x.view(-1, 1440)
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
half_width = 160
x1 = x[:, 0:half_width]
x1 = F.relu(self.fc1(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fc1_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fc1_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fc1_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fc1_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fc1_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fc1_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fc1_7(x8))
x0 = torch.cat([x1, x2], dim=1)
x0 = torch.cat([x0, x3], dim=1)
x0 = torch.cat([x0, x4], dim=1)
x0 = torch.cat([x0, x5], dim=1)
x0 = torch.cat([x0, x6], dim=1)
x0 = torch.cat([x0, x7], dim=1)
x0 = torch.cat([x0, x8], dim=1)
x = x_conv1
half_width =720
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp_7(x8))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x0 = torch.cat([x, x0], dim=1)
x = x_real
half_width =392
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp2(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp2_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp2_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp2_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp2_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp2_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp2_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp2_7(x8))
x9= torch.cat([ x[:,0:half_width], x8], dim=1)
x9 = F.relu(self.fcp2_8(x9))
x10= torch.cat([ x[:,half_width:half_width*2], x9], dim=1)
x10 = F.relu(self.fcp2_9(x10))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x = torch.cat([x, x9], dim=1)
x = torch.cat([x, x10], dim=1)
x = torch.cat([x, x0], dim=1)
x = F.relu(self.fc2(x))
x = F.log_softmax(self.fc3(x))
return x
device = 'cuda'
network = Net().to(device)
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
data = data.to(device)
target = target.to(device)
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
# print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
# epoch, batch_idx * len(data), len(train_loader.dataset),
# 100. * batch_idx / len(train_loader), loss.item()))
train_losses.append(loss.item())
train_counter.append(
(batch_idx*64) + ((epoch-1)*len(train_loader.dataset)))
def test(max_accuracy, epoch):
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data = data.to(device)
target = target.to(device)
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
accuracy_local =100. * correct / len(test_loader.dataset)
if accuracy_local > max_accuracy:
max_accuracy = accuracy_local
print('Epoch :{} Avg. loss: {:.4f}, Accuracy: {}/{} ({:.2f}%), Maximum Accuracy: {:.2f}%'.format(
epoch, test_loss, correct, len(test_loader.dataset), accuracy_local, max_accuracy))
return max_accuracy
for epoch in range(1, n_epochs + 1):
train(epoch)
max_accuracy = test(max_accuracy, epoch)
if epoch>50:
optimizer = optim.SGD(network.parameters(), lr=learning_rate*5*np.asscalar(pow(np.random.rand(1),3)),
momentum=momentum)
| 10,213
| 34.099656
| 105
|
py
|
SpinalNet
|
SpinalNet-master/MNIST/Arch2_KMNIST.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the SpinalNet Arch2 KMNIST code.
@author: Dipu
"""
import torch
import torchvision
n_epochs = 200
batch_size_train = 64
batch_size_test = 1000
momentum = 0.5
log_interval = 5000
first_HL = 50
random_seed = 1
torch.backends.cudnn.enabled = False
torch.manual_seed(random_seed)
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.KMNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.KMNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(160, first_HL)
self.fc1_1 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_2 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_3 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_4 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_5 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_6 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_7 = nn.Linear(160 + first_HL, first_HL) #added
self.fcp = nn.Linear(720, first_HL)
self.fcp_1 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_2 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_3 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_4 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_5 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_6 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_7 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp2 = nn.Linear(392, first_HL)
self.fcp2_1 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_2 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_3 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_4 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_5 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_6 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_7 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_8 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_9 = nn.Linear(392 + first_HL, first_HL) #added
self.fc2 = nn.Linear(first_HL*26, 50) # changed first_HL from second_HL
self.fc3 = nn.Linear(50, 10) # changed first_HL from second_HL
#self.fc1 = nn.Linear(320, 50)
#self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x_real=x.view(-1, 28*28)
x = self.conv1(x)
x = F.relu(F.max_pool2d(x, 2))
x_conv1 = x.view(-1, 1440)
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
half_width = 160
x1 = x[:, 0:half_width]
x1 = F.relu(self.fc1(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fc1_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fc1_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fc1_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fc1_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fc1_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fc1_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fc1_7(x8))
x0 = torch.cat([x1, x2], dim=1)
x0 = torch.cat([x0, x3], dim=1)
x0 = torch.cat([x0, x4], dim=1)
x0 = torch.cat([x0, x5], dim=1)
x0 = torch.cat([x0, x6], dim=1)
x0 = torch.cat([x0, x7], dim=1)
x0 = torch.cat([x0, x8], dim=1)
x = x_conv1
half_width =720
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp_7(x8))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x0 = torch.cat([x, x0], dim=1)
x = x_real
half_width =392
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp2(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp2_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp2_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp2_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp2_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp2_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp2_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp2_7(x8))
x9= torch.cat([ x[:,0:half_width], x8], dim=1)
x9 = F.relu(self.fcp2_8(x9))
x10= torch.cat([ x[:,half_width:half_width*2], x9], dim=1)
x10 = F.relu(self.fcp2_9(x10))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x = torch.cat([x, x9], dim=1)
x = torch.cat([x, x10], dim=1)
x = torch.cat([x, x0], dim=1)
x = F.relu(self.fc2(x))
x = F.log_softmax(self.fc3(x))
return x
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def test(random_seed, epoch, max_accuracy):
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
accuracy = 100. * correct / len(test_loader.dataset)
if accuracy> max_accuracy:
max_accuracy = accuracy
if epoch%5==0:
print('Seed: {:.0f}, Epoch: {:.0f}; Test: Avg. loss: {:.4f}, Accuracy: {}/{}, Max Accuracy = ({:.2f}%)'.format(
random_seed, epoch,
test_loss, correct, len(test_loader.dataset),
max_accuracy))
return max_accuracy
for random_seed in range(2):
max_accuracy = 0
learning_rate =0.1
torch.manual_seed(random_seed)
#test(random_seed)
network = Net()
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
for epoch in range(1, n_epochs + 1):
train(epoch)
max_accuracy2 = test(random_seed,epoch, max_accuracy)
if max_accuracy == max_accuracy2:
learning_rate = 0.1*pow(torch.rand(1),5)
else:
max_accuracy = max_accuracy2
| 9,711
| 32.839721
| 117
|
py
|
SpinalNet
|
SpinalNet-master/MNIST/SpinalNet_MNIST.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the SpinalNet MNIST code.
It ususlly provides better performance for the same number of epoch.
The same code can also be used for KMNIST, QMNIST and FashionMNIST.
torchvision.datasets.MNIST needs to be changed to
torchvision.datasets.FashionMNIST for FashionMNIST simulations
@author: Dipu
"""
import torch
import torchvision
n_epochs = 8
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.01
momentum = 0.5
log_interval = 100
first_HL =8
torch.backends.cudnn.enabled = False
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(160, first_HL) #changed from 16 to 8
self.fc1_1 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_2 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_3 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_4 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_5 = nn.Linear(160 + first_HL, first_HL) #added
self.fc2 = nn.Linear(first_HL*6, 10) # changed first_HL from second_HL
#self.fc1 = nn.Linear(320, 50)
#self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x1 = x[:, 0:160]
x1 = F.relu(self.fc1(x1))
x2= torch.cat([ x[:,160:320], x1], dim=1)
x2 = F.relu(self.fc1_1(x2))
x3= torch.cat([ x[:,0:160], x2], dim=1)
x3 = F.relu(self.fc1_2(x3))
x4= torch.cat([ x[:,160:320], x3], dim=1)
x4 = F.relu(self.fc1_3(x4))
x5= torch.cat([ x[:,0:160], x4], dim=1)
x5 = F.relu(self.fc1_4(x5))
x6= torch.cat([ x[:,160:320], x5], dim=1)
x6 = F.relu(self.fc1_5(x6))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = self.fc2(x)
#x = F.relu(self.fc1(x))
#x = F.dropout(x, training=self.training)
#x = self.fc2(x)
return F.log_softmax(x)
network = Net()
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
train_losses.append(loss.item())
train_counter.append(
(batch_idx*64) + ((epoch-1)*len(train_loader.dataset)))
def test():
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
test()
for epoch in range(1, n_epochs + 1):
train(epoch)
test()
#%%
fig = plt.figure()
plt.plot(train_counter, train_losses, color='blue')
plt.scatter(test_counter, test_losses, color='red')
plt.legend(['Train Loss', 'Test Loss'], loc='upper right')
plt.xlabel('number of training examples seen')
plt.ylabel('negative log likelihood loss')
fig
| 5,560
| 29.387978
| 78
|
py
|
SpinalNet
|
SpinalNet-master/MNIST/Arch2_QMNIST.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the SpinalNet Arch2 QMNIST code.
@author: Dipu
"""
import torch
import torchvision
n_epochs = 200
batch_size_train = 64
batch_size_test = 1000
momentum = 0.5
log_interval = 5000
first_HL = 50
prob = 0.5
random_seed = 1
torch.backends.cudnn.enabled = False
torch.manual_seed(random_seed)
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.QMNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.QMNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(160, first_HL)
self.fc1_1 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_2 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_3 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_4 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_5 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_6 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_7 = nn.Linear(160 + first_HL, first_HL) #added
self.fcp = nn.Linear(720, first_HL)
self.fcp_1 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_2 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_3 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_4 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_5 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_6 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_7 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp2 = nn.Linear(392, first_HL)
self.fcp2_1 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_2 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_3 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_4 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_5 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_6 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_7 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_8 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_9 = nn.Linear(392 + first_HL, first_HL) #added
self.fc2 = nn.Linear(first_HL*26, 50) # changed first_HL from second_HL
self.fc3 = nn.Linear(50, 10) # changed first_HL from second_HL
#self.fc1 = nn.Linear(320, 50)
#self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x_real=x.view(-1, 28*28)
x = self.conv1(x)
x = F.relu(F.max_pool2d(x, 2))
x_conv1 = x.view(-1, 1440)
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
half_width = 160
x1 = x[:, 0:half_width]
x1 = F.relu(self.fc1(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fc1_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fc1_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fc1_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fc1_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fc1_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fc1_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fc1_7(x8))
x0 = torch.cat([x1, x2], dim=1)
x0 = torch.cat([x0, x3], dim=1)
x0 = torch.cat([x0, x4], dim=1)
x0 = torch.cat([x0, x5], dim=1)
x0 = torch.cat([x0, x6], dim=1)
x0 = torch.cat([x0, x7], dim=1)
x0 = torch.cat([x0, x8], dim=1)
x = x_conv1
half_width =720
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp_7(x8))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x0 = torch.cat([x, x0], dim=1)
x = x_real
half_width =392
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp2(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp2_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp2_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp2_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp2_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp2_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp2_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp2_7(x8))
x9= torch.cat([ x[:,0:half_width], x8], dim=1)
x9 = F.relu(self.fcp2_8(x9))
x10= torch.cat([ x[:,half_width:half_width*2], x9], dim=1)
x10 = F.relu(self.fcp2_9(x10))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x = torch.cat([x, x9], dim=1)
x = torch.cat([x, x10], dim=1)
x = torch.cat([x, x0], dim=1)
x = F.relu(self.fc2(x))
x = F.log_softmax(self.fc3(x))
return x
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def test(random_seed, epoch, max_accuracy):
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
accuracy = 100. * correct / len(test_loader.dataset)
if accuracy> max_accuracy:
max_accuracy = accuracy
if epoch%5==0:
print('Seed: {:.0f}, Epoch: {:.0f}; Test: Avg. loss: {:.4f}, Accuracy: {}/{}, Max Accuracy = ({:.2f}%)'.format(
random_seed, epoch,
test_loss, correct, len(test_loader.dataset),
max_accuracy))
return max_accuracy
for random_seed in range(2):
max_accuracy = 0
learning_rate =0.1
torch.manual_seed(random_seed)
#test(random_seed)
network = Net()
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
for epoch in range(1, n_epochs + 1):
train(epoch)
max_accuracy2 = test(random_seed,epoch, max_accuracy)
if max_accuracy == max_accuracy2:
learning_rate = learning_rate*.9
else:
max_accuracy = max_accuracy2
#workbook.close()
| 9,751
| 32.512027
| 117
|
py
|
SpinalNet
|
SpinalNet-master/MNIST/default_pytorch_EMNIST.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the default EMNIST code for comparison.
The code is collected from:
nextjournal.com/gkoehler/pytorch-mnist
As the EMNIST needs split='digits', we make a different file for EMNIST
@author: Dipu
"""
import torch
import torchvision
n_epochs = 8
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.01
momentum = 0.5
log_interval = 100
torch.backends.cudnn.enabled = False
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='digits', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='digits', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
network = Net()
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
train_losses.append(loss.item())
train_counter.append(
(batch_idx*64) + ((epoch-1)*len(train_loader.dataset)))
def test():
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
test()
for epoch in range(1, n_epochs + 1):
train(epoch)
test()
#%%
fig = plt.figure()
plt.plot(train_counter, train_losses, color='blue')
plt.scatter(test_counter, test_losses, color='red')
plt.legend(['Train Loss', 'Test Loss'], loc='upper right')
plt.xlabel('number of training examples seen')
plt.ylabel('negative log likelihood loss')
fig
| 4,274
| 28.081633
| 84
|
py
|
SpinalNet
|
SpinalNet-master/MNIST/Arch2_MNIST.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the SpinalNet Arch2 MNIST code.
@author: Dipu
"""
import torch
import torchvision
import numpy as np
n_epochs = 200
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.01
momentum = 0.5
log_interval = 500
first_HL =30
max_accuracy= 0.0
torch.backends.cudnn.enabled = False
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(160, first_HL)
self.fc1_1 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_2 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_3 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_4 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_5 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_6 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_7 = nn.Linear(160 + first_HL, first_HL) #added
self.fcp = nn.Linear(720, first_HL)
self.fcp_1 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_2 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_3 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_4 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_5 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_6 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_7 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp2 = nn.Linear(392, first_HL)
self.fcp2_1 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_2 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_3 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_4 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_5 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_6 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_7 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_8 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_9 = nn.Linear(392 + first_HL, first_HL) #added
self.fc2 = nn.Linear(first_HL*26, 50) # changed first_HL from second_HL
self.fc3 = nn.Linear(50, 10) # changed first_HL from second_HL
#self.fc1 = nn.Linear(320, 50)
#self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x_real=x.view(-1, 28*28)
x = self.conv1(x)
x = F.relu(F.max_pool2d(x, 2))
x_conv1 = x.view(-1, 1440)
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
half_width = 160
x1 = x[:, 0:half_width]
x1 = F.relu(self.fc1(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fc1_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fc1_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fc1_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fc1_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fc1_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fc1_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fc1_7(x8))
x0 = torch.cat([x1, x2], dim=1)
x0 = torch.cat([x0, x3], dim=1)
x0 = torch.cat([x0, x4], dim=1)
x0 = torch.cat([x0, x5], dim=1)
x0 = torch.cat([x0, x6], dim=1)
x0 = torch.cat([x0, x7], dim=1)
x0 = torch.cat([x0, x8], dim=1)
x = x_conv1
half_width =720
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp_7(x8))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x0 = torch.cat([x, x0], dim=1)
x = x_real
half_width =392
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp2(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp2_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp2_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp2_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp2_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp2_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp2_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp2_7(x8))
x9= torch.cat([ x[:,0:half_width], x8], dim=1)
x9 = F.relu(self.fcp2_8(x9))
x10= torch.cat([ x[:,half_width:half_width*2], x9], dim=1)
x10 = F.relu(self.fcp2_9(x10))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x = torch.cat([x, x9], dim=1)
x = torch.cat([x, x10], dim=1)
x = torch.cat([x, x0], dim=1)
x = F.relu(self.fc2(x))
x = F.log_softmax(self.fc3(x))
return x
device = 'cuda'
network = Net().to(device)
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
data = data.to(device)
target = target.to(device)
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
# print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
# epoch, batch_idx * len(data), len(train_loader.dataset),
# 100. * batch_idx / len(train_loader), loss.item()))
train_losses.append(loss.item())
train_counter.append(
(batch_idx*64) + ((epoch-1)*len(train_loader.dataset)))
def test(max_accuracy, epoch):
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data = data.to(device)
target = target.to(device)
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
accuracy_local =100. * correct / len(test_loader.dataset)
if accuracy_local > max_accuracy:
max_accuracy = accuracy_local
print('Epoch :{} Avg. loss: {:.4f}, Accuracy: {}/{} ({:.2f}%), Maximum Accuracy: {:.2f}%'.format(
epoch, test_loss, correct, len(test_loader.dataset), accuracy_local, max_accuracy))
return max_accuracy
for epoch in range(1, n_epochs + 1):
train(epoch)
max_accuracy = test(max_accuracy, epoch)
if epoch>100:
optimizer = optim.SGD(network.parameters(), lr=learning_rate*5*np.asscalar(pow(np.random.rand(1),3)),
momentum=momentum)
#%%
fig = plt.figure()
plt.plot(train_counter, train_losses, color='blue')
plt.scatter(test_counter, test_losses, color='red')
plt.legend(['Train Loss', 'Test Loss'], loc='upper right')
plt.xlabel('number of training examples seen')
plt.ylabel('negative log likelihood loss')
fig
#%%
with torch.no_grad():
output = network(example_data)
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Prediction: {}".format(
output.data.max(1, keepdim=True)[1][i].item()))
plt.xticks([])
plt.yticks([])
fig
| 10,686
| 33.253205
| 105
|
py
|
SpinalNet
|
SpinalNet-master/MNIST/default_pytorch_MNIST.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the default MNIST code for comparison.
The code is collected from:
nextjournal.com/gkoehler/pytorch-mnist
The same code can also be used for KMNIST, QMNIST and FashionMNIST.
torchvision.datasets.MNIST needs to be changed to
torchvision.datasets.FashionMNIST for FashionMNIST simulations
@author: Dipu
"""
import torch
import torchvision
n_epochs = 8
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.01
momentum = 0.5
log_interval = 100
torch.backends.cudnn.enabled = False
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
network = Net()
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
train_losses.append(loss.item())
train_counter.append(
(batch_idx*64) + ((epoch-1)*len(train_loader.dataset)))
def test():
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
test()
for epoch in range(1, n_epochs + 1):
train(epoch)
test()
#%%
fig = plt.figure()
plt.plot(train_counter, train_losses, color='blue')
plt.scatter(test_counter, test_losses, color='red')
plt.legend(['Train Loss', 'Test Loss'], loc='upper right')
plt.xlabel('number of training examples seen')
plt.ylabel('negative log likelihood loss')
fig
| 4,349
| 28.391892
| 76
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.