code stringlengths 17 6.64M |
|---|
def get_blocks(n, num_threads):
n_per_instance = (((n + (num_threads * CUDA_MAX_GRID_DIM)) - 1) // (num_threads * CUDA_MAX_GRID_DIM))
return (((n + (num_threads * n_per_instance)) - 1) // (num_threads * n_per_instance))
|
def compile_kernel(kernel, filename, functioname):
program = Program(kernel, filename)
ptx = program.compile()
m = function.Module()
m.load(bytes(ptx.encode()))
f = m.get_function(functioname)
return f
|
class WaitPrint(threading.Thread):
def __init__(self, t, message):
super().__init__()
self.t = t
self.message = message
self.running = True
def stop(self):
self.running = False
def run(self):
for _ in range(int((self.t // 0.1))):
time.sleep(0.1)
if (not self.running):
return
print(self.message, end='')
|
def show_running(func):
@wraps(func)
def g(*args, **kargs):
x = WaitPrint(2, '{}({})... '.format(func.__name__, ', '.join(([repr(x) for x in args] + ['{}={}'.format(key, repr(value)) for (key, value) in kargs.items()]))))
x.start()
t = time.perf_counter()
r = func(*args, **kargs)
if x.is_alive():
x.stop()
else:
print('done in {:.0f} seconds'.format((time.perf_counter() - t)))
return r
return g
|
def cached_dirpklgz(dirname):
'\n Cache a function with a directory\n '
def decorator(func):
'\n The actual decorator\n '
@lru_cache(maxsize=None)
@wraps(func)
def wrapper(*args):
'\n The wrapper of the function\n '
try:
os.makedirs(dirname)
except FileExistsError:
pass
indexfile = os.path.join(dirname, 'index.pkl')
try:
with open(indexfile, 'rb') as file:
index = pickle.load(file)
except FileNotFoundError:
index = {}
try:
filename = index[args]
except KeyError:
index[args] = filename = '{}.pkl.gz'.format(len(index))
with open(indexfile, 'wb') as file:
pickle.dump(index, file)
filepath = os.path.join(dirname, filename)
try:
with gzip.open(filepath, 'rb') as file:
print('load {}... '.format(filename), end='')
result = pickle.load(file)
except FileNotFoundError:
print('compute {}... '.format(filename), end='')
sys.stdout.flush()
result = func(*args)
print('save {}... '.format(filename), end='')
with gzip.open(filepath, 'wb') as file:
pickle.dump(result, file)
print('done')
return result
return wrapper
return decorator
|
def test_so3_rfft(b_in, b_out, device):
x = torch.randn((2 * b_in), (2 * b_in), (2 * b_in), dtype=torch.float, device=device)
from s2cnn.soft.so3_fft import so3_rfft
y1 = so3_rfft(x, b_out=b_out)
from s2cnn import so3_rft, so3_soft_grid
import lie_learn.spaces.S3 as S3
weights = torch.tensor(S3.quadrature_weights(b_in), dtype=torch.float, device=device)
x2 = torch.einsum('bac,b->bac', (x, weights))
y2 = so3_rft(x2.view((- 1)), b_out, so3_soft_grid(b_in))
assert ((y1 - y2).abs().max().item() < (0.0001 * y1.abs().mean().item()))
|
def test_inverse(f, g, b_in, b_out, device, complex):
if complex:
x = torch.randn((2 * b_in), (2 * b_in), (2 * b_in), 2, dtype=torch.float, device=device)
else:
x = torch.randn((2 * b_in), (2 * b_in), (2 * b_in), dtype=torch.float, device=device)
x = g(f(x, b_out=b_out), b_out=b_in)
y = g(f(x, b_out=b_out), b_out=b_in)
assert ((x - y).abs().max().item() < (0.0001 * y.abs().mean().item()))
|
def test_inverse2(f, g, b_in, b_out, device):
x = torch.randn(((b_in * ((4 * (b_in ** 2)) - 1)) // 3), 2, dtype=torch.float, device=device)
x = g(f(x, b_out=b_out), b_out=b_in)
y = g(f(x, b_out=b_out), b_out=b_in)
assert ((x - y).abs().max().item() < (0.0001 * y.abs().mean().item()))
|
def compare_cpu_gpu(f, x):
z1 = f(x.cpu())
z2 = f(x.cuda()).cpu()
q = ((z1 - z2).abs().max().item() / z1.std().item())
assert (q < 0.0001)
|
class BatchGenerator():
'\n Iterate over a video datasets, returning filenames of frames to laod.\n preprocessing.py can be used in combination with BatchGenerator to read and preprocess frames.\n\n (num_labels) number of labels in dataset.\n (filename) part of filename before _train.pkl or _test.pkl in annotations folder (e.g. Annotations/D1).\n (temporal_window) number of frames to be sampled in a single video segment.\n (flow_data_path) Path to folder containing flow frames.\n (rgb_data_path) Path to folder containing rgb frames.\n (random_sync) Half the batch with corresponding examples, half the batch with non-corresponding examples.\n (synchronised) flow and rgb frames are temporally synchronised for corresponding examples.\n\n nextBatch - returns rgb and flow filenames of frames to load in a training batch, labels, and correspondence labels\n nextBatchEval - returns rgb and flow filenames of frames to load in a training batch, labels, and correspondence labels\n '
def __init__(self, num_labels, filename, temporal_window=16, flow_data_path='flow_frames_parent/flow', rgb_data_path='rgb_frames_parent/frames', synchronised=False, random_sync=False):
self.random_sync = random_sync
self.synchronised = synchronised
self.temporal_window = temporal_window
self.flow_data_path = (flow_data_path + '/')
self.rgb_data_path = (rgb_data_path + '/')
self.filename = filename
self.num_labels = num_labels
(dataset_data, dataset_labels) = self._parse_inputs_df((filename + '_train.pkl'))
(dataset_data_test, dataset_labels_test) = self._parse_inputs_df((filename + '_test.pkl'))
dataset_data_total = np.arange(dataset_data.shape[0])
dataset_test_total = np.arange(dataset_data_test.shape[0])
dataset_data_train = (dataset_data, dataset_labels)
dataset_data_test = (dataset_data_test, dataset_labels_test)
self.dataset_data = {False: dataset_data_train, True: dataset_data_test}
dataset_data_train_total = dataset_data_total
dataset_data_test_total = dataset_test_total
self.dataset_total = {False: dataset_data_train_total, True: dataset_data_test_total}
def reset_dataset(self, test=False):
' Reset dataset iterator to include all data'
self.dataset_total[test] = np.arange(self.dataset_data[test][0].shape[0])
def _parse_inputs_df(self, filename):
' Read annotation file '
df = pd.read_pickle(filename)
data = []
for (_, line) in df.iterrows():
image = [((line['participant_id'] + '/') + line['video_id']), line['start_frame'], line['stop_frame']]
labels = line['verb_class']
one_hot = np.zeros(self.num_labels)
one_hot[labels] = 1.0
data.append((image[0], image[1], image[2], one_hot))
(segment, start, end, softmaxlabels) = list(zip(*data))
labels = list(softmaxlabels)
train = list(zip(segment, start, end))
train = np.array(train)
labels = np.array(labels)
return (train, labels)
def nextBatch(self, batch_size, test=False):
' Get next training samples\n loops through datasets, randomly sampling data without replacement to add to batch.'
batch_size = int(batch_size)
(dataset_data, dataset_labels) = self.dataset_data[test]
dataset_total = self.dataset_total[test]
file = self.filename
if (len(dataset_total) > batch_size):
sample_idx_t = np.random.choice(range(dataset_total.shape[0]), size=batch_size, replace=False)
sample_idx = dataset_total[sample_idx_t]
self.dataset_total[test] = np.delete(dataset_total, sample_idx_t, axis=0)
else:
sample_idx = dataset_total
remaining = (batch_size - sample_idx.shape[0])
dataset_total_temp = np.arange(dataset_data.shape[0])
dataset_total_temp = np.delete(dataset_total_temp, sample_idx, axis=0)
sample_idx_t = np.random.choice(range(dataset_total_temp.shape[0]), size=remaining, replace=False)
sample_idx = np.concatenate((sample_idx, dataset_total_temp[sample_idx_t]))
self.dataset_total[test] = np.delete(dataset_total_temp, sample_idx_t, axis=0)
print('Done Epoch')
sample = dataset_data[sample_idx]
if self.synchronised:
sychron = ([True] * len(sample))
else:
sychron = ([False] * len(sample))
sample = [self.sample_segment(filen, synchronise=to_sync) for (filen, to_sync) in zip(sample, sychron)]
(sample_rgb, sample_flow) = zip(*sample)
if self.random_sync:
half = int((len(sample) / 2))
fixed_sample_rgb = sample_rgb[:half]
fixed_sample_flow = sample_flow[:half]
variate_sample_rgb = sample_rgb[half:]
variate_sample_flow = sample_flow[half:]
variate_sample_flow = (variate_sample_flow[1:] + variate_sample_flow[:1])
sample_flow = (fixed_sample_flow + variate_sample_flow)
sample_rgb = (fixed_sample_rgb + variate_sample_rgb)
sychron = (([True] * len(fixed_sample_rgb)) + ([False] * len(variate_sample_rgb)))
elif self.synchronised:
sychron = ([True] * len(sample))
else:
sychron = ([True] * len(sample))
sample_labels = dataset_labels[sample_idx]
batch_labels = sample_labels
batch_rgb = list(sample_rgb)
batch_flow = list(sample_flow)
combined = list(zip(batch_rgb, batch_flow, batch_labels, sychron))
shuffle(combined)
(batch_rgb, batch_flow, batch_labels, sychron) = list(zip(*combined))
batch_rgb = np.array(batch_rgb)
batch_flow = np.array(batch_flow)
return (batch_rgb, batch_flow, batch_labels, sychron)
def nextBatchEval(self, batch_size, test=True):
' Get next testing samples, return 5 equidistant frames along a action segment\n loops through datasets, randomly sampling data without replacement to add to batch. '
(dataset_data, dataset_labels) = self.dataset_data[test]
dataset_total = self.dataset_total[test]
dataset_data = dataset_data
dataset_labels = dataset_labels
dataset_total = dataset_total
batch_rgb = []
batch_flow = []
batch_labels = np.empty(shape=[0, self.num_labels])
done = True
if (len(dataset_total) != 0):
done = False
if (len(dataset_total) > batch_size):
sample_idx = np.random.choice(range(dataset_total.shape[0]), size=batch_size, replace=False)
else:
sample_idx = range(dataset_total.shape[0])
done = True
sample = dataset_data[dataset_total[sample_idx]]
sample = [self.sample_segment_test(filen) for filen in sample]
(sample_rgb, sample_flow) = zip(*sample)
sample_labels = dataset_labels[dataset_total[sample_idx]]
self.dataset_total[test] = np.delete(dataset_total, sample_idx, axis=0)
batch_labels = np.concatenate((sample_labels, batch_labels))
batch_rgb = (list(sample_rgb) + batch_rgb)
batch_flow = (list(sample_flow) + batch_flow)
if done:
self.dataset_total[test] = np.arange(dataset_data.shape[0])
batch_rgb = np.array(batch_rgb)
batch_flow = np.array(batch_flow)
return (done, batch_rgb, batch_flow, batch_labels)
def sample_segment_test(self, s):
' Samples rgb and flow frame windows from a video segment s.\n Sampling 5 windows, equidistant along a video segment\n s = ["filename", start_frame, end_frame]'
def _path_to_dataset(flow):
if flow:
left = self.flow_data_path
else:
left = self.rgb_data_path
right = '/frame_'
numframe = 10
return (left, right, numframe)
def flow_filename(frameno, num_stack=1):
(left, right, fill_frame) = _path_to_dataset(True)
left_frame = (frameno - int(((num_stack - 1) / 2)))
right_frame = (frameno + int((num_stack / 2)))
filename = []
for no in range(left_frame, (right_frame + 1)):
filename.append((((((left + str(s[0])) + '/u') + right) + str(no).zfill(fill_frame)) + '.jpg'))
filename.append((((((left + str(s[0])) + '/v') + right) + str(no).zfill(fill_frame)) + '.jpg'))
return filename
def rgb_filename(frameno):
(left, right, fill_frame) = _path_to_dataset(False)
filename = ((((left + str(s[0])) + right) + str(frameno).zfill(fill_frame)) + '.jpg')
return filename
def c3d_sampling():
num_sample_frame = self.temporal_window
half_sample_frame = int((self.temporal_window / 2))
segment_images = []
segment_flow = []
step = 2
segment_start = (int(s[1]) + (step * half_sample_frame))
segment_end = ((int(s[2]) + 1) - (step * half_sample_frame))
if (segment_start >= segment_end):
segment_start = int(s[1])
segment_end = int(s[2])
if (segment_start <= ((half_sample_frame * step) + 1)):
segment_start = ((half_sample_frame * step) + 2)
for center_frame in np.linspace(segment_start, segment_end, 7, dtype=np.int32)[1:(- 1)]:
seg_f = []
seg_i = []
for no in range((center_frame - (step * half_sample_frame)), (center_frame + (step * half_sample_frame)), step):
seg_f.append(flow_filename(int((no / 2))))
seg_i.append(rgb_filename(no))
segment_flow.append(seg_f)
segment_images.append(seg_i)
return (segment_images, segment_flow)
return c3d_sampling()
def sample_segment(self, s, synchronise=False):
' Samples rgb and flow frame windows from a video segment s.\n Sampling temporal windows randomly in video segment.\n s = ["filename", start_frame, end_frame]'
def _path_to_dataset(flow):
if flow:
left = self.flow_data_path
else:
left = self.rgb_data_path
right = '/frame_'
numframe = 10
return (left, right, numframe)
def flow_filename(frameno, num_stack=1):
(left, right, fill_frame) = _path_to_dataset(True)
left_frame = (frameno - int(((num_stack - 1) / 2)))
right_frame = (frameno + int((num_stack / 2)))
filename = []
for no in range(left_frame, (right_frame + 1)):
filename.append((((((left + str(s[0])) + '/u') + right) + str(no).zfill(fill_frame)) + '.jpg'))
filename.append((((((left + str(s[0])) + '/v') + right) + str(no).zfill(fill_frame)) + '.jpg'))
return filename
def rgb_filename(frameno):
(left, right, fill_frame) = _path_to_dataset(False)
filename = ((((left + str(s[0])) + right) + str(frameno).zfill(fill_frame)) + '.jpg')
return filename
def c3d_sampling():
num_sample_frame = self.temporal_window
half_sample_frame = int((self.temporal_window / 2))
segment_images = []
segment_flow = []
step = 2
segment_start = (int(s[1]) + (step * half_sample_frame))
segment_end = ((int(s[2]) + 1) - (step * half_sample_frame))
if (segment_start >= segment_end):
segment_start = int(s[1])
segment_end = int(s[2])
if (segment_start <= ((half_sample_frame * step) + 1)):
segment_start = ((half_sample_frame * step) + 2)
if synchronise:
center_frame_rgb = center_frame_flow = randint(segment_start, segment_end)
else:
center_frame_rgb = randint(segment_start, segment_end)
center_frame_flow = randint(segment_start, segment_end)
for no in range((center_frame_rgb - (step * half_sample_frame)), (center_frame_rgb + (step * half_sample_frame)), step):
segment_images.append(rgb_filename(no))
for no in range((center_frame_flow - (step * half_sample_frame)), (center_frame_flow + (step * half_sample_frame)), step):
segment_flow.append(flow_filename(int((no / 2))))
return (segment_images, segment_flow)
return c3d_sampling()
|
def i3d_model(input_images, is_training, num_labels, dropout, flip_classifier_gradient=False, flip_weight=1.0, aux_classifier=False, feat_level='features'):
rgb_model = i3d.InceptionI3d((num_labels + num_labels), spatial_squeeze=True, final_endpoint='Logits', aux_classifier=aux_classifier)
(logits, endpoints) = rgb_model(input_images, is_training=is_training, dropout_keep_prob=dropout, flip_classifier_gradient=flip_classifier_gradient, flip_weight=flip_weight)
if aux_classifier:
aux_classifier_logits = endpoints['aux_classifier']
else:
aux_classifier_logits = None
features = endpoints[feat_level]
return (logits, aux_classifier_logits, features)
|
def build_i3d(reuse_variables, input_images, is_training, num_labels, flow, temporal_window, dropout, flip_classifier_gradient, flip_weight=1.0, aux_classifier=False, feat_level='features'):
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
return i3d_model(input_images, is_training, num_labels, dropout, flip_classifier_gradient=flip_classifier_gradient, flip_weight=flip_weight, aux_classifier=aux_classifier, feat_level=feat_level)
|
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
|
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
|
def domain_classifier(feat):
shape = feat.get_shape().as_list()
dim = np.prod(shape[1:])
feat = tf.reshape(feat, [(- 1), dim])
with tf.variable_scope('Domain_Classifier'):
d_h_fc0 = tf.layers.dense(feat, 100, kernel_initializer=tf.initializers.truncated_normal(stddev=0.1), name='first')
d_h_fc0 = tf.nn.relu(d_h_fc0)
d_logits = tf.layers.dense(d_h_fc0, 2, kernel_initializer=tf.initializers.truncated_normal(stddev=0.1), name='second')
return d_logits
|
def predict_synch(feat):
shape = feat.get_shape().as_list()
dim = np.prod(shape[1:])
feat = tf.reshape(feat, [(- 1), dim])
d_h_fc0 = tf.layers.dense(feat, 100, kernel_initializer=tf.initializers.truncated_normal(stddev=0.1), name='first')
d_h_fc0 = tf.nn.relu(d_h_fc0)
d_logits = tf.layers.dense(d_h_fc0, 2, kernel_initializer=tf.initializers.truncated_normal(stddev=0.1), name='second')
return (d_logits, d_h_fc0)
|
class FlipGradientBuilder(object):
def __init__(self):
self.num_calls = 0
def __call__(self, x, l=1.0):
grad_name = ('FlipGradient%d' % self.num_calls)
@ops.RegisterGradient(grad_name)
def _flip_gradients(op, grad):
return [(tf.negative(grad) * l)]
g = tf.get_default_graph()
with g.gradient_override_map({'Identity': grad_name}):
y = tf.identity(x)
self.num_calls += 1
return y
|
class Unit3D(snt.AbstractModule):
'Basic unit containing Conv3D + BatchNorm + non-linearity.'
def __init__(self, output_channels, kernel_shape=(1, 1, 1), stride=(1, 1, 1), activation_fn=tf.nn.relu, use_batch_norm=True, use_bias=False, name='unit_3d'):
'Initializes Unit3D module.'
super(Unit3D, self).__init__(name=name)
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._use_batch_norm = use_batch_norm
self._activation_fn = activation_fn
self._use_bias = use_bias
def _build(self, inputs, is_training):
'Connects the module to inputs.\n\n Args:\n inputs: Inputs to the Unit3D component.\n is_training: whether to use training mode for snt.BatchNorm (boolean).\n\n Returns:\n Outputs from the module.\n '
net = snt.Conv3D(output_channels=self._output_channels, kernel_shape=self._kernel_shape, stride=self._stride, padding=snt.SAME, use_bias=self._use_bias)(inputs)
if self._use_batch_norm:
bn = snt.BatchNorm()
net = bn(net, is_training=is_training, test_local_stats=False)
if (self._activation_fn is not None):
net = self._activation_fn(net)
return net
|
class InceptionI3d(snt.AbstractModule):
'Inception-v1 I3D architecture.\n\n The model is introduced in:\n\n Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset\n Joao Carreira, Andrew Zisserman\n https://arxiv.org/pdf/1705.07750v1.pdf.\n\n See also the Inception architecture, introduced in:\n\n Going deeper with convolutions\n Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,\n Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.\n http://arxiv.org/pdf/1409.4842v1.pdf.\n '
VALID_ENDPOINTS = ('Conv3d_1a_7x7', 'MaxPool3d_2a_3x3', 'Conv3d_2b_1x1', 'Conv3d_2c_3x3', 'MaxPool3d_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool3d_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool3d_5a_2x2', 'Mixed_5b', 'Mixed_5c', 'Logits', 'Predictions')
def __init__(self, num_classes=400, spatial_squeeze=True, final_endpoint='Logits', name='inception_i3d', aux_classifier=False):
"Initializes I3D model instance.\n\n Args:\n num_classes: The number of outputs in the logit layer (default 400, which\n matches the Kinetics dataset).\n spatial_squeeze: Whether to squeeze the spatial dimensions for the logits\n before returning (default True).\n final_endpoint: The model contains many possible endpoints.\n `final_endpoint` specifies the last endpoint for the model to be built\n up to. In addition to the output at `final_endpoint`, all the outputs\n at endpoints up to `final_endpoint` will also be returned, in a\n dictionary. `final_endpoint` must be one of\n InceptionI3d.VALID_ENDPOINTS (default 'Logits').\n name: A string (optional). The name of this module.\n\n Raises:\n ValueError: if `final_endpoint` is not recognized.\n "
if (final_endpoint not in self.VALID_ENDPOINTS):
raise ValueError(('Unknown final endpoint %s' % final_endpoint))
super(InceptionI3d, self).__init__(name=name)
self._num_classes = num_classes
self._spatial_squeeze = spatial_squeeze
self._final_endpoint = final_endpoint
self._aux_classifier = aux_classifier
def _build(self, inputs, is_training, dropout_keep_prob=1.0, flip_classifier_gradient=False, flip_weight=1.0):
'Connects the model to inputs.\n\n Args:\n inputs: Inputs to the model, which should have dimensions\n `batch_size` x `num_frames` x 224 x 224 x `num_channels`.\n is_training: whether to use training mode for snt.BatchNorm (boolean).\n dropout_keep_prob: Probability for the tf.nn.dropout layer (float in\n [0, 1)).\n\n Returns:\n A tuple consisting of:\n 1. Network output at location `self._final_endpoint`.\n 2. Dictionary containing all endpoints up to `self._final_endpoint`,\n indexed by endpoint name.\n\n Raises:\n ValueError: if `self._final_endpoint` is not recognized.\n '
if (self._final_endpoint not in self.VALID_ENDPOINTS):
raise ValueError(('Unknown final endpoint %s' % self._final_endpoint))
net = inputs
end_points = {}
end_point = 'Conv3d_1a_7x7'
net = Unit3D(output_channels=64, kernel_shape=[7, 7, 7], stride=[2, 2, 2], name=end_point)(net, is_training=is_training)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'MaxPool3d_2a_3x3'
net = tf.nn.max_pool3d(net, ksize=[1, 1, 3, 3, 1], strides=[1, 1, 2, 2, 1], padding=snt.SAME, name=end_point)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Conv3d_2b_1x1'
net = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name=end_point)(net, is_training=is_training)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Conv3d_2c_3x3'
net = Unit3D(output_channels=192, kernel_shape=[3, 3, 3], name=end_point)(net, is_training=is_training)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'MaxPool3d_3a_3x3'
net = tf.nn.max_pool3d(net, ksize=[1, 1, 3, 3, 1], strides=[1, 1, 2, 2, 1], padding=snt.SAME, name=end_point)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=96, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=16, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=32, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=192, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=96, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'MaxPool3d_4a_3x3'
net = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 2, 2, 2, 1], padding=snt.SAME, name=end_point)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=192, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=96, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=208, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=16, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=48, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=160, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=112, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=224, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=24, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=64, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=256, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=24, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=64, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=112, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=144, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=288, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=64, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=64, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_4f'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=256, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=160, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=320, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'MaxPool3d_5a_2x2'
net = tf.nn.max_pool3d(net, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding=snt.SAME, name=end_point)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=256, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=160, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=320, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=32, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3], name='Conv3d_0a_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = Unit3D(output_channels=384, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
with tf.variable_scope('Branch_1'):
branch_1 = Unit3D(output_channels=192, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_1 = Unit3D(output_channels=384, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_1, is_training=is_training)
with tf.variable_scope('Branch_2'):
branch_2 = Unit3D(output_channels=48, kernel_shape=[1, 1, 1], name='Conv3d_0a_1x1')(net, is_training=is_training)
branch_2 = Unit3D(output_channels=128, kernel_shape=[3, 3, 3], name='Conv3d_0b_3x3')(branch_2, is_training=is_training)
with tf.variable_scope('Branch_3'):
branch_3 = tf.nn.max_pool3d(net, ksize=[1, 3, 3, 3, 1], strides=[1, 1, 1, 1, 1], padding=snt.SAME, name='MaxPool3d_0a_3x3')
branch_3 = Unit3D(output_channels=128, kernel_shape=[1, 1, 1], name='Conv3d_0b_1x1')(branch_3, is_training=is_training)
net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)
end_points[end_point] = net
if (self._final_endpoint == end_point):
return (net, end_points)
end_point = 'Logits'
with tf.variable_scope(end_point):
net = tf.nn.avg_pool3d(net, ksize=[1, 2, 7, 7, 1], strides=[1, 1, 1, 1, 1], padding=snt.VALID)
end_points['features'] = net
net = tf.nn.dropout(net, dropout_keep_prob)
if flip_classifier_gradient:
net = flip_gradient(net, flip_weight)
if self._aux_classifier:
with tf.variable_scope('AuxLogits'):
logits_aux = Unit3D(output_channels=self._num_classes, kernel_shape=[1, 1, 1], activation_fn=None, use_batch_norm=False, use_bias=True, name='Conv3d_0c_1x1')(net, is_training=is_training)
if self._spatial_squeeze:
logits_aux = tf.squeeze(logits_aux, [2, 3], name='SpatialSqueezeAux')
end_points['aux_classifier'] = tf.reduce_mean(logits_aux, axis=1)
logits = Unit3D(output_channels=self._num_classes, kernel_shape=[1, 1, 1], activation_fn=None, use_batch_norm=False, use_bias=True, name='Conv3d_0c_1x1')(net, is_training=is_training)
if self._spatial_squeeze:
logits = tf.squeeze(logits, [2, 3], name='SpatialSqueeze')
averaged_logits = tf.reduce_mean(logits, axis=1)
end_points[end_point] = averaged_logits
if (self._final_endpoint == end_point):
return (averaged_logits, end_points)
end_point = 'Predictions'
predictions = tf.nn.softmax(averaged_logits)
end_points[end_point] = predictions
return (predictions, end_points)
|
def _mix_rbf_kernel(X, Y, gammas, wts=None):
if (wts is None):
wts = ([1] * len(gammas))
XX = tf.matmul(X, X, transpose_b=True)
XY = tf.matmul(X, Y, transpose_b=True)
YY = tf.matmul(Y, Y, transpose_b=True)
X_sqnorms = tf.diag_part(XX)
Y_sqnorms = tf.diag_part(YY)
r = (lambda x: tf.expand_dims(x, 0))
c = (lambda x: tf.expand_dims(x, 1))
(K_XX, K_XY, K_YY) = (0, 0, 0)
for (gamma_t, wt) in zip(gammas, wts):
gamma = (1 / gamma_t)
K_XX += (wt * tf.exp(((- gamma) * ((((- 2) * XX) + c(X_sqnorms)) + r(X_sqnorms)))))
K_XY += (wt * tf.exp(((- gamma) * ((((- 2) * XY) + c(X_sqnorms)) + r(Y_sqnorms)))))
K_YY += (wt * tf.exp(((- gamma) * ((((- 2) * YY) + c(Y_sqnorms)) + r(Y_sqnorms)))))
return (K_XX, K_XY, K_YY, tf.reduce_sum(wts))
|
def rbf_mmd2(X, Y, gammas=1, biased=True):
return mix_rbf_mmd2(X, Y, gammas=[gammas], biased=biased)
|
def mix_rbf_mmd2(X, Y, gammas=(1,), wts=None, biased=True):
(K_XX, K_XY, K_YY, d) = _mix_rbf_kernel(X, Y, gammas, wts)
return _mmd2(K_XX, K_XY, K_YY, const_diagonal=d, biased=biased)
|
def _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
m = tf.cast(tf.shape(K_XX)[0], tf.float32)
n = tf.cast(tf.shape(K_YY)[0], tf.float32)
if biased:
mmd2 = (((tf.reduce_sum(K_XX) / (m * m)) + (tf.reduce_sum(K_YY) / (n * n))) - ((2 * tf.reduce_sum(K_XY)) / (m * n)))
else:
if (const_diagonal is not False):
trace_X = (m * const_diagonal)
trace_Y = (n * const_diagonal)
else:
trace_X = tf.trace(K_XX)
trace_Y = tf.trace(K_YY)
mmd2 = ((((tf.reduce_sum(K_XX) - trace_X) / (m * (m - 1))) + ((tf.reduce_sum(K_YY) - trace_Y) / (n * (n - 1)))) - ((2 * tf.reduce_sum(K_XY)) / (m * n)))
return (mmd2, n)
|
def main():
seen = tf.placeholder(tf.float32, shape=[None, 1024])
unseen = tf.placeholder(tf.float32, shape=[None, 1024])
(mmd, n) = rbf_mmd2(seen, unseen)
(mmd, n) = mix_rbf_mmd2(seen, unseen, gammas=[10.0, 1.0, 0.1, 0.01, 0.001])
source_numpy = np.load(sys.argv[1])
target_numpy = np.load(sys.argv[2])
source_numpy_labels = np.load(sys.argv[3])
target_numpy_labels = np.load(sys.argv[4])
with tf.Session() as sess:
print('Total', sess.run(mmd, feed_dict={seen: source_numpy, unseen: target_numpy}))
for i in np.unique(source_numpy_labels):
print(i, sess.run(mmd, feed_dict={seen: source_numpy[(source_numpy_labels == i)], unseen: target_numpy[(target_numpy_labels == i)]}))
|
def _get_variables_to_restore_load(to_ignore, flow):
to_ignore.append('domain_accumulators')
to_ignore.append('accum_accumulators')
to_ignore.append('accumulators')
scope_to_ignore = to_ignore
if flow:
scope_to_ignore.append('RGB')
scope_to_ignore.append('Joint')
else:
scope_to_ignore.append('Flow')
scope_to_ignore.append('Joint')
variables = slim.get_variables_to_restore(exclude=scope_to_ignore)
keyword_filter = to_ignore
return [x for x in variables if (not any(((word in x.name) for word in keyword_filter)))]
|
def read_joint(mode=''):
if (mode == 'restore'):
to_ignore = ['Adam', 'adam', 'Momentum', 'beta1_power', 'beta2_power', 'global_step', 'Domain_Classifier']
elif (mode == 'continue'):
to_ignore = []
else:
raise Exception('Unknown mode for read_joint')
to_ignore.append('domain_accumulators')
to_ignore.append('accum_accumulators')
to_ignore.append('accumulators')
to_ignore.append('Flow')
to_ignore.append('RGB')
variables = slim.get_variables_to_restore(exclude=to_ignore)
variables_restore = [x for x in variables if (not any(((word in x.name) for word in to_ignore)))]
rgb_variable_map = {}
for variable in variables_restore:
name = variable.name.replace(':0', '')
rgb_variable_map[name] = variable
variable_loader = tf.train.Saver(var_list=rgb_variable_map, reshape=True, max_to_keep=20)
return variable_loader
|
def read_i3d_checkpoint(mode='', flow=False, aux_logits=False):
if (mode == 'pretrain'):
to_ignore = ['inception_i3d/Logits', 'Adam', 'adam', 'Momentum', 'beta1_power', 'beta2_power', 'global_step', 'Domain_Classifier', 'arrow_test']
elif (mode == 'restore'):
to_ignore = ['Adam', 'adam', 'Momentum', 'beta1_power', 'beta2_power', 'global_step', 'Domain_Classifier']
elif (mode == 'continue'):
to_ignore = []
else:
raise Exception('Unkown mode for read_i3d_checkpoint')
variables_restore = _get_variables_to_restore_load(to_ignore, flow)
rgb_variable_map = {}
for variable in variables_restore:
name = variable.name.replace(':0', '')
rgb_variable_map[name] = variable
variable_loader = tf.train.Saver(var_list=rgb_variable_map, reshape=True, max_to_keep=20)
return variable_loader
|
def restore_base(sess, saver, checkpoint_path, model_to_restore, restore_mode='model'):
if (restore_mode == 'continue'):
ckpt = tf.train.get_checkpoint_state(checkpoint_path)
if (ckpt and ckpt.model_checkpoint_path):
saver['continue'].restore(sess, ckpt.model_checkpoint_path)
else:
raise Exception('Cannot find a model to continue training')
start_step = ckpt.model_checkpoint_path.split('/')[(- 1)].split('-')[(- 1)]
elif (restore_mode == 'model'):
saver['model'].restore(sess, model_to_restore)
start_step = 0
elif ((restore_mode == 'pretrain') or (restore_mode == 'pretrain_from_synch')):
saver['pretrain'].restore(sess, model_to_restore)
start_step = 0
else:
raise Exception('A valid restore Mode must be set --restore_mode==[continue,model,pretrain]')
return start_step
|
def restore_joint(sess, saver, checkpoint_path, model_to_restore, restore_mode='model'):
if (restore_mode == 'continue'):
ckpt = tf.train.get_checkpoint_state(checkpoint_path)
if (ckpt and ckpt.model_checkpoint_path):
saver['continue'].restore(sess, ckpt.model_checkpoint_path)
else:
raise Exception('Cannot find a model to continue training')
start_step = ckpt.model_checkpoint_path.split('/')[(- 1)].split('-')[(- 1)]
elif ((restore_mode == 'model') or (restore_mode == 'pretrain_from_synch')):
saver['model'].restore(sess, model_to_restore)
start_step = 0
elif (restore_mode == 'pretrain'):
start_step = 0
else:
raise Exception('A valid restore Mode must be set --restore_mode==[continue,model,pretrain]')
return start_step
|
def init_savers_base(flow=False):
pretrain_loader = read_i3d_checkpoint(mode='pretrain', flow=flow)
model_loader = read_i3d_checkpoint(mode='restore', flow=flow)
savesave = read_i3d_checkpoint(mode='continue', flow=flow)
return (pretrain_loader, model_loader, savesave)
|
class TrainTestScript():
' Creates a framework to train/test an MM-SADA model\n (FLAGS) TensorFlow flags\n (results_dir) Directory of tensorboard files and other testing logs\n (train_dir) Director of saved model\n\n Methods:\n train - train MM-SADA\n test - evaluate an MM-SADA saved model\n '
def __init__(self, FLAGS, results_dir, train_dir):
self.FLAGS = FLAGS
self.train_dir = train_dir
self.datasets = FLAGS.datasets
self.unseen_dataset = FLAGS.unseen_dataset
self.num_gpus = FLAGS.num_gpus
self.num_labels = FLAGS.num_labels
self.target_data = (not (not FLAGS.domain_mode))
if self.target_data:
if ((FLAGS.domain_mode == 'None') or (FLAGS.domain_mode == 'Pretrain')):
self.target_data = False
print('No adaptation')
if FLAGS.domain_mode:
self.domain_mode = FLAGS.domain_mode
else:
self.domain_mode = 'None'
self.lr = FLAGS.lr
if (not FLAGS.modality):
raise Exception('Need to Specify modality')
if ((FLAGS.modality != 'rgb') and (FLAGS.modality != 'flow') and (FLAGS.modality != 'joint')):
raise Exception('Invalid Modality')
self.results_dir = ((results_dir + '_') + FLAGS.modality)
self.modality = FLAGS.modality
self.model = Model(num_gpus=self.num_gpus, num_labels=self.num_labels, modality=self.modality, temporal_window=self.FLAGS.temporal_window, batch_norm_update=self.FLAGS.batch_norm_update, domain_mode=self.domain_mode, steps_per_update=FLAGS.steps_before_update, aux_classifier=self.FLAGS.aux_classifier, synchronised=self.FLAGS.synchronised, predict_synch=self.FLAGS.pred_synch, selfsupervised_lambda=self.FLAGS.self_lambda)
def training_batch_gen(self):
batch_gen = BatchGenerator(self.num_labels, self.datasets, temporal_window=self.FLAGS.temporal_window, rgb_data_path=self.FLAGS.rgb_data_path, flow_data_path=self.FLAGS.flow_data_path, synchronised=self.FLAGS.synchronised, random_sync=self.FLAGS.pred_synch)
batch_gen_unseen = BatchGenerator(self.num_labels, self.unseen_dataset, temporal_window=self.FLAGS.temporal_window, rgb_data_path=self.FLAGS.rgb_data_path, flow_data_path=self.FLAGS.flow_data_path, synchronised=self.FLAGS.synchronised, random_sync=self.FLAGS.pred_synch)
return (batch_gen, batch_gen_unseen)
def testing_batch_gen(self):
batch_gen = BatchGenerator(self.num_labels, self.datasets, temporal_window=self.FLAGS.temporal_window, rgb_data_path=self.FLAGS.rgb_data_path, flow_data_path=self.FLAGS.flow_data_path, synchronised=self.FLAGS.synchronised, random_sync=self.FLAGS.pred_synch)
batch_gen_unseen = BatchGenerator(self.num_labels, self.unseen_dataset, temporal_window=self.FLAGS.temporal_window, rgb_data_path=self.FLAGS.rgb_data_path, flow_data_path=self.FLAGS.flow_data_path, synchronised=self.FLAGS.synchronised, random_sync=self.FLAGS.pred_synch)
return (batch_gen, batch_gen_unseen)
def train(self):
' Train MM-SADA model'
g1 = tf.Graph()
with g1.as_default(), tf.device('/cpu:0'):
self.model.init_savers()
train_writer = tf.summary.FileWriter((self.results_dir + '/train'))
seen_writer = tf.summary.FileWriter((self.results_dir + '/seen'))
unseen_writer = tf.summary.FileWriter((self.results_dir + '/unseen'))
(batch_gen, batch_gen_unseen) = self.training_batch_gen()
with tf.Session(graph=g1, config=tf.ConfigProto(allow_soft_placement=True)) as sess:
print('init variables')
sess.run(tf.global_variables_initializer())
start_step = self.model.restore_model_train(sess, self.train_dir, self.FLAGS.restore_model_flow, self.FLAGS.restore_model_rgb, self.FLAGS.restore_model_joint, self.FLAGS.restore_mode)
for step in range(int(start_step), (self.FLAGS.max_steps + 1)):
p = (float(step) / self.FLAGS.max_steps)
lin = (((2 / (1.0 + np.exp(((- 10.0) * p)))) - 1) * self.FLAGS.lambda_in)
start_time = time.time()
(training_loss, training_accuracy, summary) = train_step(sess, self.model, self.FLAGS, batch_gen, batch_gen_unseen, lin, self.target_data)
for s in summary:
train_writer.add_summary(s, step)
duration = (time.time() - start_time)
if ((step % 50) == 0):
num_examples_per_step = self.FLAGS.batch_size
examples_per_sec = (num_examples_per_step / duration)
sec_per_batch = duration
format_str = '(Train) %s: step %d, loss %.3f, acc %.3f (%.1f examples/sec; %.3f sec/batch)'
print((format_str % (datetime.now(), step, training_loss, training_accuracy, examples_per_sec, sec_per_batch)))
(valaccuracy, domainaccuracy, average_class) = evaluate(sess, self.model, self.FLAGS, batch_gen, lin)
domainaccuracy = (1.0 - domainaccuracy)
val_summary = tf.Summary()
val_summary.value.add(tag='acc/Accuracy', simple_value=valaccuracy)
domain_summary = tf.Summary()
domain_summary.value.add(tag='acc/Domain', simple_value=domainaccuracy)
seen_writer.add_summary(val_summary, step)
seen_writer.add_summary(domain_summary, step)
format_str = '(Val) %s: domain:%s step:%d accuracy:%f avg_class %f domain_accuracy %f'
print((format_str % (datetime.now(), 'Source', step, valaccuracy, average_class, domainaccuracy)))
if self.FLAGS.pred_synch:
synch_accuracy = evaluate_self_supervised(sess, self.model, self.FLAGS, batch_gen, lin, mode='synch')
val_summary = tf.Summary()
val_summary.value.add(tag='acc/Synch_Accuracy', simple_value=synch_accuracy)
seen_writer.add_summary(val_summary, step)
format_str = '(Val) %s: domain:%s step:%d synch_accuracy:%f'
print((format_str % (datetime.now(), 'Source', step, synch_accuracy)))
(valaccuracy, domainaccuracy, average_class) = evaluate(sess, self.model, self.FLAGS, batch_gen_unseen, lin)
val_summary = tf.Summary()
val_summary.value.add(tag='acc/Accuracy', simple_value=valaccuracy)
domain_summary = tf.Summary()
domain_summary.value.add(tag='acc/Domain', simple_value=domainaccuracy)
unseen_writer.add_summary(val_summary, step)
unseen_writer.add_summary(domain_summary, step)
format_str = '(Val) %s: domain:%s step:%d accuracy:%f avg_class %f domain_accuracy %f'
print((format_str % (datetime.now(), 'Target', step, valaccuracy, average_class, domainaccuracy)))
if self.FLAGS.pred_synch:
synch_accuracy = evaluate_self_supervised(sess, self.model, self.FLAGS, batch_gen_unseen, lin, mode='synch')
val_summary = tf.Summary()
val_summary.value.add(tag='acc/Synch_Accuracy', simple_value=synch_accuracy)
unseen_writer.add_summary(val_summary, step)
format_str = '(Val) %s: domain:%s step:%d synch_accuracy:%f'
print((format_str % (datetime.now(), 'Target', step, synch_accuracy)))
if (((step % 50) == 0) or (step == self.FLAGS.max_steps)):
self.model.save_model(sess, self.train_dir, step)
def test(self):
' Evaluate MM-SADA model'
def _save_results(FLAGS, feature_list, label_list, predict_list, img_path_list, ident, test=True):
' Save statistics and extracted features to feature_path folder'
if test:
stringtest = 'test'
else:
stringtest = 'train'
source_domain = os.path.basename(FLAGS.datasets)
np.save(((((((((FLAGS.feature_path + '/') + stringtest) + '_feat_') + source_domain) + '_') + str(FLAGS.modelnum)) + '_') + str(ident)), feature_list)
np.save(((((((((FLAGS.feature_path + '/') + stringtest) + '_label') + source_domain) + '_') + str(FLAGS.modelnum)) + '_') + str(ident)), label_list)
np.save(((((((((FLAGS.feature_path + '/') + stringtest) + '_pred') + source_domain) + '_') + str(FLAGS.modelnum)) + '_') + str(ident)), predict_list)
np.save(((((((((FLAGS.feature_path + '/') + stringtest) + '_filenames') + source_domain) + '_') + str(FLAGS.modelnum)) + '_') + str(ident)), img_path_list)
with tf.Graph().as_default(), tf.device('/cpu:0'):
(batch_gen, batch_gen_unseen) = self.testing_batch_gen()
self.model.init_savers()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
self.model.restore_model_test(sess, self.train_dir, self.FLAGS.modelnum)
lin = 0.0
step = 0
seen_filenames = ''
seen_accuracy = ''
(valaccuracy, domainaccuracy, valperclass, valfeat, valfile, vallabel, valpredict) = evaluate(sess, self.model, self.FLAGS, batch_gen, lin, test=(not self.FLAGS.eval_train), out_features=self.FLAGS.features, extra_info=True)
if self.FLAGS.features:
_save_results(self.FLAGS, valfeat, vallabel, valpredict, valfile, 'Source', test=(not self.FLAGS.eval_train))
seen_accuracy = ((seen_accuracy + str(valaccuracy)) + ',')
seen_filenames = ((seen_filenames + 'Source') + ',')
format_str = '(Val) %s: domain:%s step:%d accuracy:%f domain_accuracy %f'
print((format_str % (datetime.now(), 'Source', step, valaccuracy, domainaccuracy)))
(valaccuracy, domainaccuracy, valperclass, valfeat, valfile, vallabel, valpredict) = evaluate(sess, self.model, self.FLAGS, batch_gen_unseen, lin, test=(not self.FLAGS.eval_train), out_features=self.FLAGS.features, extra_info=True)
domainaccuracy = (1.0 - domainaccuracy)
if self.FLAGS.features:
_save_results(self.FLAGS, valfeat, vallabel, valpredict, valfile, 'Target', test=(not self.FLAGS.eval_train))
format_str = '(Val) %s: domain:%s step:%d accuracy:%f domain_accuracy %f'
print((format_str % (datetime.now(), 'Target', step, valaccuracy, domainaccuracy)))
results_log_file = '/logs/results.list'
if (not os.path.exists((self.results_dir + '/logs'))):
os.makedirs((self.results_dir + '/logs'))
if (not os.path.isfile((self.results_dir + results_log_file))):
f = open((self.results_dir + results_log_file), 'w')
f.write(((seen_filenames + 'target,step,target_directory') + '\n'))
f.close()
f = open((self.results_dir + results_log_file), 'a')
f.write(((((((seen_accuracy + str(valaccuracy)) + ',') + str(self.FLAGS.modelnum)) + ',') + self.FLAGS.unseen_dataset) + '\n'))
f.close()
|
def parse_args(FLAGS):
error = False
if (FLAGS.train is None):
print('Specify whether to train (True) or test (False) --train')
error = True
if (FLAGS.results_path is None):
print('Specify path to save logs and models --results_path')
error = True
if (FLAGS.datasets is None):
print('Specify the Source domain dataset --datasets')
error = True
if (FLAGS.unseen_dataset is None):
print('Specify the Target domain dataset --unseen_dataset')
error = True
if (FLAGS.rgb_data_path is None):
print('Specify the path to rgb frames --rgb_data_path')
error = True
if (FLAGS.flow_data_path is None):
print('Specify the path to flow frames --flow_data_path')
error = True
if error:
return True
if FLAGS.train:
if (FLAGS.restore_mode is None):
print("Specify the restore mode --restore_mode ('pretrain', 'model', 'continue')")
error = True
if ((FLAGS.restore_model_flow is None) and FLAGS.flow):
print('Specify pretrained model to use --pretrained_model')
error = True
if ((FLAGS.restore_model_rgb is None) and (not FLAGS.flow)):
print('Specify pretrained model to use --pretrained_model')
error = True
else:
if (FLAGS.modelnum is None):
print('Specify model number to restore for testing --modelnum')
error = True
if FLAGS.features:
if (FLAGS.feature_path is None):
print('Specify path to store features --feature_path')
error = True
if error:
return True
return False
|
def input_parser():
flags = tf.app.flags
flags.DEFINE_boolean('train', None, 'Weither to train or evaluate (False)')
flags.DEFINE_string('results_path', None, 'Where to store the log files and saved models')
flags.DEFINE_float('lr', 0.001, 'Initial Learning Rate')
flags.DEFINE_float('batch_norm_update', 0.9, 'Update rate of batch norm statistics')
flags.DEFINE_integer('num_gpus', 8, 'number of gpus to run')
flags.DEFINE_integer('max_steps', 6000, 'Number of batches to run.')
flags.DEFINE_integer('steps_before_update', 1, 'number of steps to run before updating weights')
flags.DEFINE_string('domain_mode', None, 'background only for dataset2')
flags.DEFINE_float('lambda_in', 1.0, 'grl hyperparameter')
flags.DEFINE_float('self_lambda', 5.0, 'weigthing of self supervised loss')
flags.DEFINE_string('datasets', None, 'Comma seperated list of datasets')
flags.DEFINE_string('unseen_dataset', None, 'Specify file path to unseen dataset folder')
flags.DEFINE_integer('num_labels', 8, 'Total number of combined labels')
flags.DEFINE_integer('batch_size', 128, 'Size of a batch')
flags.DEFINE_boolean('synchronised', None, 'Weither to synchronise flow and rgb')
flags.DEFINE_string('modality', 'joint', 'rgb, flow or joint (default: joint)')
flags.DEFINE_integer('temporal_window', 16, 'i3d temporal window')
flags.DEFINE_boolean('aux_classifier', None, '2 classifiers')
flags.DEFINE_boolean('pred_synch', None, 'Predict if modalities are synchronised')
flags.DEFINE_boolean('features', None, 'Weither to produce features of evalutate')
flags.DEFINE_string('feature_path', None, 'path to store features')
flags.DEFINE_boolean('eval_train', None, 'Weither to evaludate training example rather than test')
flags.DEFINE_integer('modelnum', None, 'model number to restore for testing')
flags.DEFINE_string('restore_model_rgb', None, 'Load these weights excluding Logits')
flags.DEFINE_string('restore_model_flow', None, 'Load these weights excluding Logits')
flags.DEFINE_string('restore_model_joint', None, 'Load these weights excluding Logits')
flags.DEFINE_string('rgb_data_path', None, 'path to rgb data')
flags.DEFINE_string('flow_data_path', None, 'path to flow data')
flags.DEFINE_string('restore_mode', None, 'pretrain (for base netwrok without logits), model (restore base model with classification logits) or continue (restore everything)')
FLAGS = flags.FLAGS
source_domain = os.path.basename(FLAGS.datasets)
target_domain = os.path.basename(FLAGS.unseen_dataset)
train_dir = ((((((((FLAGS.results_path + '/saved_model_') + source_domain) + '_') + target_domain) + '_') + str(FLAGS.lr)) + '_') + str(FLAGS.batch_norm_update))
if (not os.path.exists(train_dir)):
os.makedirs(train_dir)
results_dir = ((((((((FLAGS.results_path + '/results_') + source_domain) + '_') + target_domain) + '_') + str(FLAGS.lr)) + '_') + str(FLAGS.batch_norm_update))
return (FLAGS, train_dir, results_dir)
|
def main():
(flags, train_dir, results_dir) = input_parser()
if parse_args(flags):
return
train_test = TrainTestScript(flags, results_dir, train_dir)
if flags.train:
train_test.train()
else:
train_test.test()
|
def eval_batch_mlp(mlp, data, batch_idxs, criterion, device_id=0):
' evaluate a batch for the baseline mlp '
atom_types = to_one_hot(data['features']['atom_types'][(batch_idxs, ...)], NUM_ATOM_TYPES)
targets = data['targets'][(batch_idxs, ...)]
atom_types = Variable(atom_types)
targets = Variable(targets)
if torch.cuda.is_available():
atom_types = atom_types.cuda(device_id)
targets = targets.cuda(device_id)
outputs = mlp(atom_types)
loss = criterion(outputs, targets)
return loss
|
def eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion, device_id=0):
' evaluate a batch for the s2cnn '
geometry = data['features']['geometry'][(batch_idxs, ...)]
atom_types = data['features']['atom_types'][(batch_idxs, ...)]
atom_types_one_hot = to_one_hot(atom_types, NUM_ATOM_TYPES)
targets = data['targets'][(batch_idxs, ...)]
geometry = Variable(geometry)
atom_types = Variable(atom_types)
atom_types_one_hot = Variable(atom_types_one_hot)
targets = Variable(targets)
if torch.cuda.is_available():
atom_types_one_hot = atom_types_one_hot.cuda(device_id)
geometry = geometry.cuda(device_id)
atom_types = atom_types.cuda(device_id)
targets = targets.cuda(device_id)
outputs = mlp(atom_types_one_hot)
outputs += s2cnn(geometry, atom_types)
loss = criterion(outputs, targets)
return loss
|
def train_baseline(mlp, data, train_batches, test_batches, num_epochs, learning_rate_mlp, device_id=0):
' train the baseline model '
optim = OPTIMIZER(mlp.parameters(), lr=learning_rate_mlp)
criterion = nn.MSELoss()
if torch.cuda.is_available():
criterion = criterion.cuda(device_id)
for epoch in range(num_epochs):
train_losses = []
print('training')
for (iteration, batch_idxs) in enumerate(train_batches):
mlp.train()
optim.zero_grad()
loss = eval_batch_mlp(mlp, data, batch_idxs, criterion, device_id)
loss.backward()
optim.step()
train_losses.append(loss.item())
print('\riteration {}/{}'.format((iteration + 1), train_batches.num_iterations()), end='')
print()
test_losses = []
print('evaluating')
for (iteration, batch_idxs) in enumerate(test_batches):
mlp.eval()
loss = eval_batch_mlp(mlp, data, batch_idxs, criterion)
test_losses.append(loss.item())
print('\riteration {}/{}'.format((iteration + 1), test_batches.num_iterations()), end='')
print()
train_loss = np.sqrt(np.mean(train_losses))
test_loss = np.sqrt(np.mean(test_losses))
print('epoch {}/{} - avg train loss: {}, test loss: {}'.format((epoch + 1), num_epochs, train_loss, test_loss))
return (train_loss, test_loss)
|
def train_s2cnn(mlp, s2cnn, data, train_batches, test_batches, num_epochs, init_learning_rate_s2cnn, learning_rate_decay_epochs, device_id=0):
' train the s2cnn keeping the baseline frozen '
optim = OPTIMIZER(s2cnn.parameters(), lr=init_learning_rate_s2cnn)
criterion = nn.MSELoss()
if torch.cuda.is_available():
criterion = criterion.cuda(device_id)
for epoch in range(num_epochs):
optim = exp_lr_scheduler(optim, epoch, init_lr=init_learning_rate_s2cnn, lr_decay_epoch=learning_rate_decay_epochs)
train_losses = []
print('training')
for (iteration, batch_idxs) in enumerate(train_batches):
s2cnn.train()
mlp.eval()
optim.zero_grad()
loss = eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion)
loss.backward()
optim.step()
train_losses.append(loss.item())
print('\riteration {}/{} - batch loss: {}'.format((iteration + 1), train_batches.num_iterations(), np.sqrt(train_losses[(- 1)])), end='')
print()
test_losses = []
print('evaluating')
for (iteration, batch_idxs) in enumerate(test_batches):
s2cnn.eval()
mlp.eval()
loss = eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion)
test_losses.append(loss.item())
print('\riteration {}/{} - batch loss: {}'.format((iteration + 1), test_batches.num_iterations(), np.sqrt(test_losses[(- 1)])), end='')
print()
train_loss = np.sqrt(np.mean(train_losses))
test_loss = np.sqrt(np.mean(test_losses))
print('epoch {}/{} - avg train loss: {}, test loss: {}'.format((epoch + 1), num_epochs, train_loss, test_loss))
return (train_loss, test_loss)
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default='data.joblib')
parser.add_argument('--test_strat', type=int, default=0)
parser.add_argument('--device_id', type=int, default=0)
parser.add_argument('--num_epochs_s2cnn', type=int, default=30)
parser.add_argument('--num_epochs_mlp', type=int, default=30)
parser.add_argument('--batch_size_s2cnn', type=int, default=32)
parser.add_argument('--batch_size_mlp', type=int, default=32)
parser.add_argument('--init_learning_rate_s2cnn', type=int, default=0.001)
parser.add_argument('--learning_rate_mlp', type=int, default=0.001)
parser.add_argument('--learning_rate_decay_epochs', type=int, default=10)
args = parser.parse_args()
torch.cuda.set_device(args.device_id)
print('evaluating on {}'.format(args.test_strat))
print('loading data...', end='')
(data, train_idxs, test_idxs) = load_data(args.data_path, args.test_strat, cuda=args.device_id)
print('done!')
mlp = BaselineRegressor()
s2cnn = S2CNNRegressor()
if torch.cuda.is_available():
for model in [mlp, s2cnn]:
model.cuda(args.device_id)
print('training baseline model')
print('mlp #params: {}'.format(count_params(mlp)))
train_baseline(mlp, data, IndexBatcher(train_idxs, args.batch_size_mlp, cuda=args.device_id), IndexBatcher(test_idxs, args.batch_size_mlp, cuda=args.device_id), args.num_epochs_mlp, args.learning_rate_mlp, args.device_id)
print('training residual s2cnn model')
print('s2cnn #params: {}'.format(count_params(s2cnn)))
train_s2cnn(mlp, s2cnn, data, IndexBatcher(train_idxs, args.batch_size_s2cnn, cuda=args.device_id), IndexBatcher(test_idxs, args.batch_size_s2cnn, cuda=args.device_id), args.num_epochs_s2cnn, args.init_learning_rate_s2cnn, args.learning_rate_decay_epochs, args.device_id)
|
class S2Block(nn.Module):
' simple s2 convolution block '
def __init__(self, b_in, b_out, f_in, f_out):
' b_in/b_out: bandwidth of input/output signals\n f_in/f_out: filters in input/output signals '
super(S2Block, self).__init__()
self.grid_s2 = s2_near_identity_grid(n_alpha=(2 * b_in), n_beta=2)
self.cnn = S2Convolution(nfeature_in=f_in, nfeature_out=f_out, b_in=b_in, b_out=b_out, grid=self.grid_s2)
self.bn = nn.BatchNorm3d(f_out, affine=AFFINE)
def forward(self, x):
x = self.cnn(x)
x = self.bn(x)
x = nonlinearity(x)
return x
|
class So3Block(nn.Module):
' simple so3 convolution block '
def __init__(self, b_in, b_out, f_in, f_out):
' b_in/b_out: bandwidth of input/output signals\n f_in/f_out: filters in input/output signals '
super(So3Block, self).__init__()
self.grid_so3 = so3_near_identity_grid(n_alpha=(2 * b_in), n_beta=2, n_gamma=2)
self.cnn = SO3Convolution(nfeature_in=f_in, nfeature_out=f_out, b_in=b_in, b_out=b_out, grid=self.grid_so3)
self.bn = nn.BatchNorm3d(f_out, affine=AFFINE)
def forward(self, x):
x = self.cnn(x)
x = self.bn(x)
x = nonlinearity(x)
return x
|
class DeepSet(nn.Module):
' deep set block '
def __init__(self, f, h1, h_latent, h2, n_objs):
' f: input filters\n h1, h2: hidden units for encoder/decoder mlps\n h_latent: dimensions\n n_objs: of objects to aggregate in latent space '
super(DeepSet, self).__init__()
self.f = f
self.h1 = h1
self.h3 = h2
self.n_objs = n_objs
self.emb_h = nn.Linear(f, h1)
self.emb_rep = nn.Linear(h1, h_latent)
self.proj_h = nn.Linear(h_latent, h2)
self.proj = nn.Linear(h2, 1)
self.bn1 = nn.BatchNorm1d(h1, affine=AFFINE)
self.bn2 = nn.BatchNorm1d(h_latent, affine=AFFINE)
self.bn3 = nn.BatchNorm1d(h2, affine=AFFINE)
def forward(self, x, mask):
x = self.emb_h(x)
x = self.bn1(x)
x = nonlinearity(x)
x = self.emb_rep(x)
x = self.bn2(x)
x = nonlinearity(x)
(n, h_latent) = x.size()
x = x.view((n // self.n_objs), self.n_objs, h_latent)
x = torch.sum((x * mask), dim=1)
x = self.proj_h(x)
x = self.bn3(x)
x = nonlinearity(x)
x = self.proj(x)
return x
|
class S2CNNRegressor(nn.Module):
' approximate energy using spherical representations '
def __init__(self):
super(S2CNNRegressor, self).__init__()
n_objs = 23
self.blocks = [S2Block(b_in=10, f_in=5, b_out=8, f_out=8), So3Block(b_in=8, b_out=6, f_in=8, f_out=16), So3Block(b_in=6, b_out=4, f_in=16, f_out=32), So3Block(b_in=4, b_out=2, f_in=32, f_out=64)]
for (i, block) in enumerate(self.blocks):
setattr(self, 'block{0}'.format(i), block)
self.ds = DeepSet(64, 256, 64, 512, n_objs)
def forward(self, x, atom_types):
(n_batch, n_atoms, n_features, bandwidth, _) = x.size()
mask = (atom_types > 0).view(n_batch, n_atoms, 1).float()
x = x.view((n_batch * n_atoms), n_features, bandwidth, bandwidth)
for block in self.blocks:
x = block(x)
x = so3_integrate(x)
y = self.ds(x, mask)
return y
|
class IndexBatcher():
def __init__(self, indices, n_batch, cuda=None):
self.indices = indices.astype(np.int64)
self.n_batch = n_batch
self.pos = 0
self.cuda = cuda
self.internal_indices = np.arange(len(indices)).astype(np.int64)
np.random.shuffle(self.internal_indices)
def __iter__(self):
return self
def reset(self):
self.pos = 0
np.random.shuffle(self.internal_indices)
def __next__(self):
start = self.pos
end = np.minimum((self.pos + self.n_batch), len(self.indices))
self.pos += self.n_batch
if (self.pos >= len(self.indices)):
self.reset()
raise StopIteration
tensor = torch.LongTensor(self.indices[self.internal_indices[start:end]])
if (self.cuda is not None):
tensor.cuda(self.cuda)
return tensor
def num_iterations(self):
return (len(self.indices) // self.n_batch)
next = __next__
|
def to_one_hot(x, n):
x_ = torch.unsqueeze(x, 2)
dims = (*x.size(), n)
one_hot = torch.FloatTensor(*dims).zero_()
one_hot.scatter_(2, x_, 1)
return one_hot
|
def load_data(path, test_strat_id=None, cuda=None):
'\n Loads the data\n\n path: path to the molecule .gz\n batch_size: size of a mini batch\n test_strat_id: id of strat being used as test set\n '
data = joblib.load(path)
type_remap = (- np.ones((int(data['features']['atom_types'].max()) + 1)))
unique_types = np.unique(data['features']['atom_types']).astype(int)
type_remap[unique_types] = np.arange(len(unique_types))
data['features']['atom_types'] = type_remap[data['features']['atom_types'].astype(int)]
data['features']['geometry'] = torch.FloatTensor(data['features']['geometry'].astype(np.float32))
data['features']['atom_types'] = torch.LongTensor(data['features']['atom_types'].astype(np.int64))
data['targets'] = torch.from_numpy(data['targets'])
if (cuda is not None):
data['features']['geometry'].cuda(cuda)
data['features']['atom_types'].cuda(cuda)
data['targets'].cuda(cuda)
train = np.ndarray(0)
test = np.ndarray(0)
if (not test_strat_id):
test_strat_id = np.random.randint(len(data['strats']))
for i in range(len(data['strats'])):
if (i != test_strat_id):
train = np.concatenate((train, data['strats'][i]))
else:
test = np.concatenate((test, data['strats'][i]))
return (data, train, test)
|
def exp_lr_scheduler(optimizer, epoch, init_lr=0.005, lr_decay_epoch=40):
'Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs.'
lr = (init_lr * (0.1 ** (epoch // lr_decay_epoch)))
if ((epoch % lr_decay_epoch) == 0):
print('LR is set to {}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
|
def count_params(model):
return sum([np.prod(p.size()) for p in model.parameters() if p.requires_grad])
|
class Model(nn.Module):
def __init__(self, nclasses):
super().__init__()
self.features = [6, 100, 100, nclasses]
self.bandwidths = [64, 16, 10]
assert (len(self.bandwidths) == (len(self.features) - 1))
sequence = []
grid = s2_equatorial_grid(max_beta=0, n_alpha=(2 * self.bandwidths[0]), n_beta=1)
sequence.append(S2Convolution(self.features[0], self.features[1], self.bandwidths[0], self.bandwidths[1], grid))
for l in range(1, (len(self.features) - 2)):
nfeature_in = self.features[l]
nfeature_out = self.features[(l + 1)]
b_in = self.bandwidths[l]
b_out = self.bandwidths[(l + 1)]
sequence.append(nn.BatchNorm3d(nfeature_in, affine=True))
sequence.append(nn.ReLU())
grid = so3_equatorial_grid(max_beta=0, max_gamma=0, n_alpha=(2 * b_in), n_beta=1, n_gamma=1)
sequence.append(SO3Convolution(nfeature_in, nfeature_out, b_in, b_out, grid))
sequence.append(nn.BatchNorm3d(self.features[(- 2)], affine=True))
sequence.append(nn.ReLU())
self.sequential = nn.Sequential(*sequence)
output_features = self.features[(- 2)]
self.out_layer = nn.Linear(output_features, self.features[(- 1)])
def forward(self, x):
x = self.sequential(x)
x = so3_integrate(x)
x = self.out_layer(x)
return F.log_softmax(x, dim=1)
|
class Model(nn.Module):
def __init__(self, nclasses):
super().__init__()
self.features = [6, 50, 70, 350, nclasses]
self.bandwidths = [128, 32, 22, 7]
assert (len(self.bandwidths) == (len(self.features) - 1))
sequence = []
grid = s2_equatorial_grid(max_beta=0, n_alpha=(2 * self.bandwidths[0]), n_beta=1)
sequence.append(S2Convolution(self.features[0], self.features[1], self.bandwidths[0], self.bandwidths[1], grid))
for l in range(1, (len(self.features) - 2)):
nfeature_in = self.features[l]
nfeature_out = self.features[(l + 1)]
b_in = self.bandwidths[l]
b_out = self.bandwidths[(l + 1)]
sequence.append(nn.BatchNorm3d(nfeature_in, affine=True))
sequence.append(nn.ReLU())
grid = so3_equatorial_grid(max_beta=0, max_gamma=0, n_alpha=(2 * b_in), n_beta=1, n_gamma=1)
sequence.append(SO3Convolution(nfeature_in, nfeature_out, b_in, b_out, grid))
sequence.append(nn.BatchNorm3d(self.features[(- 2)], affine=True))
sequence.append(nn.ReLU())
self.sequential = nn.Sequential(*sequence)
self.out_layer = nn.Sequential(nn.BatchNorm1d(self.features[(- 2)], affine=False), nn.Linear(self.features[(- 2)], self.features[(- 1)]))
def forward(self, x):
x = self.sequential(x)
x = x.view(x.size(0), x.size(1), (- 1)).max((- 1))[0]
x = self.out_layer(x)
return F.log_softmax(x, dim=1)
|
class KeepName():
def __init__(self, transform):
self.transform = transform
def __call__(self, file_name):
return (file_name, self.transform(file_name))
|
def main(log_dir, augmentation, dataset, batch_size, num_workers):
print(check_output(['nodejs', '--version']).decode('utf-8'))
torch.backends.cudnn.benchmark = True
transform = torchvision.transforms.Compose([CacheNPY(prefix='b64_', repeat=augmentation, pick_randomly=False, transform=torchvision.transforms.Compose([ToMesh(random_rotations=True, random_translation=0.1), ProjectOnSphere(bandwidth=64)])), (lambda xs: torch.stack([torch.FloatTensor(x) for x in xs]))])
transform = KeepName(transform)
test_set = Shrec17('data', dataset, perturbed=True, download=True, transform=transform)
loader = importlib.machinery.SourceFileLoader('model', os.path.join(log_dir, 'model.py'))
mod = types.ModuleType(loader.name)
loader.exec_module(mod)
model = mod.Model(55)
model.cuda()
model.load_state_dict(torch.load(os.path.join(log_dir, 'state.pkl')))
resdir = os.path.join(log_dir, (dataset + '_perturbed'))
if os.path.isdir(resdir):
shutil.rmtree(resdir)
os.mkdir(resdir)
predictions = []
ids = []
loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True, drop_last=False)
for (batch_idx, data) in enumerate(loader):
model.eval()
if (dataset != 'test'):
data = data[0]
(file_names, data) = data
(batch_size, rep) = data.size()[:2]
data = data.view((- 1), *data.size()[2:])
data = data.cuda()
with torch.no_grad():
pred = model(data).data
pred = pred.view(batch_size, rep, (- 1))
pred = pred.sum(1)
predictions.append(pred.cpu().numpy())
ids.extend([x.split('/')[(- 1)].split('.')[0] for x in file_names])
print('[{}/{}] '.format(batch_idx, len(loader)))
predictions = np.concatenate(predictions)
predictions_class = np.argmax(predictions, axis=1)
for i in range(len(ids)):
if ((i % 100) == 0):
print('{}/{} '.format(i, len(ids)), end='\r')
idfile = os.path.join(resdir, ids[i])
retrieved = [(predictions[(j, predictions_class[j])], ids[j]) for j in range(len(ids)) if (predictions_class[j] == predictions_class[i])]
retrieved = sorted(retrieved, reverse=True)
retrieved = [i for (_, i) in retrieved]
with open(idfile, 'w') as f:
f.write('\n'.join(retrieved))
url = 'https://shapenet.cs.stanford.edu/shrec17/code/evaluator.zip'
file_path = 'evaluator.zip'
r = requests.get(url, stream=True)
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=(16 * (1024 ** 2))):
if chunk:
f.write(chunk)
f.flush()
zip_ref = zipfile.ZipFile(file_path, 'r')
zip_ref.extractall('.')
zip_ref.close()
print(check_output(['nodejs', 'evaluate.js', (os.path.join('..', log_dir) + '/')], cwd='evaluator').decode('utf-8'))
shutil.copy2(os.path.join('evaluator', (log_dir + '.summary.csv')), os.path.join(log_dir, 'summary.csv'))
|
def main(log_dir, model_path, augmentation, dataset, batch_size, learning_rate, num_workers):
arguments = copy.deepcopy(locals())
os.mkdir(log_dir)
shutil.copy2(__file__, os.path.join(log_dir, 'script.py'))
shutil.copy2(model_path, os.path.join(log_dir, 'model.py'))
logger = logging.getLogger('train')
logger.setLevel(logging.DEBUG)
logger.handlers = []
ch = logging.StreamHandler()
logger.addHandler(ch)
fh = logging.FileHandler(os.path.join(log_dir, 'log.txt'))
logger.addHandler(fh)
logger.info('%s', repr(arguments))
torch.backends.cudnn.benchmark = True
loader = importlib.machinery.SourceFileLoader('model', os.path.join(log_dir, 'model.py'))
mod = types.ModuleType(loader.name)
loader.exec_module(mod)
model = mod.Model(55)
model.cuda()
logger.info('{} paramerters in total'.format(sum((x.numel() for x in model.parameters()))))
logger.info('{} paramerters in the last layer'.format(sum((x.numel() for x in model.out_layer.parameters()))))
bw = model.bandwidths[0]
transform = CacheNPY(prefix='b{}_'.format(bw), repeat=augmentation, transform=torchvision.transforms.Compose([ToMesh(random_rotations=True, random_translation=0.1), ProjectOnSphere(bandwidth=bw)]))
def target_transform(x):
classes = ['02691156', '02747177', '02773838', '02801938', '02808440', '02818832', '02828884', '02843684', '02871439', '02876657', '02880940', '02924116', '02933112', '02942699', '02946921', '02954340', '02958343', '02992529', '03001627', '03046257', '03085013', '03207941', '03211117', '03261776', '03325088', '03337140', '03467517', '03513137', '03593526', '03624134', '03636649', '03642806', '03691459', '03710193', '03759954', '03761084', '03790512', '03797390', '03928116', '03938244', '03948459', '03991062', '04004475', '04074963', '04090263', '04099429', '04225987', '04256520', '04330267', '04379243', '04401088', '04460130', '04468005', '04530566', '04554684']
return classes.index(x[0])
train_set = Shrec17('data', dataset, perturbed=True, download=True, transform=transform, target_transform=target_transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True, drop_last=True)
optimizer = torch.optim.SGD(model.parameters(), lr=0, momentum=0.9)
def train_step(data, target):
model.train()
(data, target) = (data.cuda(), target.cuda())
prediction = model(data)
loss = F.nll_loss(prediction, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
correct = prediction.data.max(1)[1].eq(target.data).long().cpu().sum()
return (loss.item(), correct.item())
def get_learning_rate(epoch):
limits = [100, 200]
lrs = [1, 0.1, 0.01]
assert (len(lrs) == (len(limits) + 1))
for (lim, lr) in zip(limits, lrs):
if (epoch < lim):
return (lr * learning_rate)
return (lrs[(- 1)] * learning_rate)
for epoch in range(300):
lr = get_learning_rate(epoch)
logger.info('learning rate = {} and batch size = {}'.format(lr, train_loader.batch_size))
for p in optimizer.param_groups:
p['lr'] = lr
total_loss = 0
total_correct = 0
time_before_load = time.perf_counter()
for (batch_idx, (data, target)) in enumerate(train_loader):
time_after_load = time.perf_counter()
time_before_step = time.perf_counter()
(loss, correct) = train_step(data, target)
total_loss += loss
total_correct += correct
logger.info('[{}:{}/{}] LOSS={:.2} <LOSS>={:.2} ACC={:.2} <ACC>={:.2} time={:.2}+{:.2}'.format(epoch, batch_idx, len(train_loader), loss, (total_loss / (batch_idx + 1)), (correct / len(data)), ((total_correct / len(data)) / (batch_idx + 1)), (time_after_load - time_before_load), (time.perf_counter() - time_before_step)))
time_before_load = time.perf_counter()
torch.save(model.state_dict(), os.path.join(log_dir, 'state.pkl'))
|
def s2_near_identity_grid(max_beta=(np.pi / 8), n_alpha=8, n_beta=3):
'\n :return: rings around the north pole\n size of the kernel = n_alpha * n_beta\n '
beta = ((np.arange(start=1, stop=(n_beta + 1), dtype=np.float) * max_beta) / n_beta)
alpha = np.linspace(start=0, stop=(2 * np.pi), num=n_alpha, endpoint=False)
(B, A) = np.meshgrid(beta, alpha, indexing='ij')
B = B.flatten()
A = A.flatten()
grid = np.stack((B, A), axis=1)
return tuple((tuple(ba) for ba in grid))
|
def s2_equatorial_grid(max_beta=0, n_alpha=32, n_beta=1):
'\n :return: rings around the equator\n size of the kernel = n_alpha * n_beta\n '
beta = np.linspace(start=((np.pi / 2) - max_beta), stop=((np.pi / 2) + max_beta), num=n_beta, endpoint=True)
alpha = np.linspace(start=0, stop=(2 * np.pi), num=n_alpha, endpoint=False)
(B, A) = np.meshgrid(beta, alpha, indexing='ij')
B = B.flatten()
A = A.flatten()
grid = np.stack((B, A), axis=1)
return tuple((tuple(ba) for ba in grid))
|
def s2_soft_grid(b):
beta = (((np.arange((2 * b)) + 0.5) / (2 * b)) * np.pi)
alpha = np.linspace(start=0, stop=(2 * np.pi), num=(2 * b), endpoint=False)
(B, A) = np.meshgrid(beta, alpha, indexing='ij')
B = B.flatten()
A = A.flatten()
grid = np.stack((B, A), axis=1)
return tuple((tuple(ba) for ba in grid))
|
def s2_mm(x, y):
'\n :param x: [l * m, batch, feature_in, complex]\n :param y: [l * m, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n '
from s2cnn.utils.complex import complex_mm
assert (y.size(3) == 2)
assert (x.size(3) == 2)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert (y.size(1) == nfeature_in)
nspec = x.size(0)
assert (y.size(0) == nspec)
if x.is_cuda:
return _cuda_S2_mm.apply(x, y)
nl = round((nspec ** 0.5))
Fz_list = []
begin = 0
for l in range(nl):
L = ((2 * l) + 1)
size = L
Fx = x[begin:(begin + size)]
Fy = y[begin:(begin + size)]
Fx = Fx.view((L * nbatch), nfeature_in, 2)
Fy = Fy.transpose(0, 1)
Fy = Fy.contiguous()
Fy = Fy.view(nfeature_in, (L * nfeature_out), 2)
Fz = complex_mm(Fx, Fy, conj_y=True)
Fz = Fz.view(L, nbatch, L, nfeature_out, 2)
Fz = Fz.transpose(1, 2)
Fz = Fz.contiguous()
Fz = Fz.view((L * L), nbatch, nfeature_out, 2)
Fz_list.append(Fz)
begin += size
z = torch.cat(Fz_list, 0)
return z
|
class _cuda_S2_mm(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return _cuda_s2_mm(x, y)
@staticmethod
def backward(ctx, gradz):
import s2cnn.utils.cuda as cuda_utils
(x, y) = ctx.saved_tensors
nl = round((x.size(0) ** 0.5))
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
nspec = ((((4 * (nl ** 2)) - 1) * nl) // 3)
device = torch.cuda.current_device()
gradx_cuda_kernel = _setup_s2mm_gradx_cuda_kernel(nbatch=nbatch, nspec=nspec, nl=nl, nfeature_in=nfeature_in, nfeature_out=nfeature_out, device=device)
grady_cuda_kernel = _setup_s2mm_grady_cuda_kernel(nbatch=nbatch, nspec=nspec, nl=nl, nfeature_in=nfeature_in, nfeature_out=nfeature_out, device=device)
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
gradx = grady = None
if ctx.needs_input_grad[0]:
gradx = gradz.new_empty(((nl ** 2), nbatch, nfeature_in, 2))
gradx_cuda_kernel(block=(cuda_utils.CUDA_NUM_THREADS, 1, 1), grid=(cuda_utils.get_blocks((((nl ** 2) * nbatch) * nfeature_in), 1024), 1, 1), args=[gradz.contiguous().data_ptr(), y.contiguous().data_ptr(), gradx.data_ptr()], stream=stream)
if ctx.needs_input_grad[1]:
grady = gradz.new_empty(((nl ** 2), nfeature_in, nfeature_out, 2))
grady_cuda_kernel(block=(cuda_utils.CUDA_NUM_THREADS, 1, 1), grid=(cuda_utils.get_blocks((((nl ** 2) * nfeature_in) * nfeature_out), 1024), 1, 1), args=[gradz.contiguous().data_ptr(), x.contiguous().data_ptr(), grady.data_ptr()], stream=stream)
return (gradx, grady)
|
def _cuda_s2_mm(x, y):
'\n :param x: [l * m, batch, feature_in, complex]\n :param y: [l * m, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n '
import s2cnn.utils.cuda as cuda_utils
assert (x.is_cuda and (x.dtype == torch.float32))
assert (y.is_cuda and (y.dtype == torch.float32))
assert (y.size(3) == 2)
assert (x.size(3) == 2)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert (y.size(1) == nfeature_in)
assert (y.size(0) == x.size(0))
nl = round((x.size(0) ** 0.5))
nspec = ((((4 * (nl ** 2)) - 1) * nl) // 3)
assert (x.size(0) == (nl ** 2))
assert (y.size(0) == (nl ** 2))
device = torch.cuda.current_device()
cuda_kernel = _setup_s2mm_cuda_kernel(nbatch=nbatch, nspec=nspec, nfeature_in=nfeature_in, nfeature_out=nfeature_out, device=device)
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
output = x.new_empty((nspec, nbatch, nfeature_out, 2))
cuda_kernel(block=(cuda_utils.CUDA_NUM_THREADS, 1, 1), grid=(cuda_utils.get_blocks(((nspec * nbatch) * nfeature_out), 1024), 1, 1), args=[x.contiguous().data_ptr(), y.contiguous().data_ptr(), output.data_ptr()], stream=stream)
return output
|
@lru_cache(maxsize=32)
def _setup_s2mm_cuda_kernel(nbatch, nspec, nfeature_in, nfeature_out, device=0):
kernel = Template('\n#define COMPUTE_LMN(s) int l = powf(3.0/4.0 * s, 1.0/3.0) - 0.5; int L = l * (4 * l * l - 1) / 3; int rest = s - L; if (rest >= (2 * l + 1) * (2 * l + 1)) { ++l; L = l * (4 * l * l - 1) / 3; rest = s - L; } int m = rest / (2 * l + 1) - l; int n = rest % (2 * l + 1) - l;\n\n#define EXTRACT(i1, i2, n2, i3, n3) int i1 = index; int i3 = i1 % (n3); i1 /= n3; int i2 = i1 % (n2); i1 /= n2;\n\n#define CONTRACT1(s1, i2, n2, i3, n3) ( ( (l * l + (l + (s1))) * (n2) + (i2) ) * (n3) + (i3) )\n\n#define CONTRACT2(s1, s2, i2, n2, i3, n3) ( ( (L + (l + (s1)) * (2 * l + 1) + (l + (s2))) * (n2) + (i2) ) * (n3) + (i3) )\n\nextern "C"\n__global__ void main_(const float* in_x, const float* in_y, float* out) {\n for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < ${nspec} * ${nbatch} * ${nfeature_out}; index += blockDim.x * gridDim.x) {\n EXTRACT(s, i, ${nbatch}, f_out, ${nfeature_out})\n\n // compute s -> (l,m,n)\n COMPUTE_LMN(s)\n\n float out_re = 0.0;\n float out_im = 0.0;\n\n for (int f_in = 0; f_in < ${nfeature_in}; ++f_in) {\n float x_re = in_x[CONTRACT1(m, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 0];\n float x_im = in_x[CONTRACT1(m, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 1];\n float y_re = in_y[CONTRACT1(n, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 0];\n float y_im = in_y[CONTRACT1(n, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 1];\n\n // x times y conjugate\n out_re += x_re * y_re + x_im * y_im;\n out_im += x_im * y_re - x_re * y_im;\n }\n\n out[index * 2 + 0] = out_re;\n out[index * 2 + 1] = out_im;\n }\n}\n').substitute({'nbatch': nbatch, 'nspec': nspec, 'nfeature_in': nfeature_in, 'nfeature_out': nfeature_out})
import s2cnn.utils.cuda as cuda_utils
return cuda_utils.compile_kernel(kernel, 's2mm.cu', 'main_')
|
@lru_cache(maxsize=32)
def _setup_s2mm_gradx_cuda_kernel(nbatch, nspec, nl, nfeature_in, nfeature_out, device=0):
kernel = Template('\n#define COMPUTE_LM(s) int l = sqrtf(s); int L = (4 * l * l - 1) * l / 3; int m = s - l * l - l;\n\n#define EXTRACT(i1, i2, n2, i3, n3) int i1 = index; int i3 = i1 % (n3); i1 /= n3; int i2 = i1 % (n2); i1 /= n2;\n\n#define CONTRACT1(s1, i2, n2, i3, n3) ( ( (l * l + (l + (s1))) * (n2) + (i2) ) * (n3) + (i3) )\n\n#define CONTRACT2(s1, s2, i2, n2, i3, n3) ( ( (L + (l + (s1)) * (2 * l + 1) + (l + (s2))) * (n2) + (i2) ) * (n3) + (i3) )\n\nextern "C"\n__global__ void main_(const float* grad_z, const float* y, float* grad_x) {\n for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (${nl} * ${nl}) * ${nbatch} * ${nfeature_in}; index += blockDim.x * gridDim.x) {\n EXTRACT(s, i, ${nbatch}, f_in, ${nfeature_in})\n\n // compute s -> (l,m)\n COMPUTE_LM(s)\n\n float out_re = 0.0;\n float out_im = 0.0;\n\n for (int f_out = 0; f_out < ${nfeature_out}; ++f_out) {\n for (int k = -l; k <= l; ++k) {\n float grad_z_re = grad_z[CONTRACT2(m, k, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 0];\n float grad_z_im = grad_z[CONTRACT2(m, k, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 1];\n float y_re = y[CONTRACT1(k, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 0];\n float y_im = y[CONTRACT1(k, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 1];\n\n // grad_z times y\n out_re += grad_z_re * y_re - grad_z_im * y_im;\n out_im += grad_z_re * y_im + grad_z_im * y_re;\n }\n }\n\n grad_x[index * 2 + 0] = out_re;\n grad_x[index * 2 + 1] = out_im;\n }\n}\n').substitute({'nbatch': nbatch, 'nspec': nspec, 'nl': nl, 'nfeature_in': nfeature_in, 'nfeature_out': nfeature_out})
import s2cnn.utils.cuda as cuda_utils
return cuda_utils.compile_kernel(kernel, 's2mm_gradx.cu', 'main_')
|
@lru_cache(maxsize=32)
def _setup_s2mm_grady_cuda_kernel(nbatch, nspec, nl, nfeature_in, nfeature_out, device=0):
kernel = Template('\n#define COMPUTE_LM(s) int l = powf(s, 0.5); int L = (4 * l * l - 1) * l / 3; int m = s - l * l - l;\n\n#define EXTRACT(i1, i2, n2, i3, n3) int i1 = index; int i3 = i1 % (n3); i1 /= n3; int i2 = i1 % (n2); i1 /= n2;\n\n#define CONTRACT1(s1, i2, n2, i3, n3) ( ( (l * l + (l + (s1))) * (n2) + (i2) ) * (n3) + (i3) )\n\n#define CONTRACT2(s1, s2, i2, n2, i3, n3) ( ( (L + (l + (s1)) * (2 * l + 1) + (l + (s2))) * (n2) + (i2) ) * (n3) + (i3) )\n\nextern "C"\n__global__ void main_(const float* grad_z, const float* x, float* grad_y) {\n for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (${nl} * ${nl}) * ${nfeature_in} * ${nfeature_out}; index += blockDim.x * gridDim.x) {\n EXTRACT(s, f_in, ${nfeature_in}, f_out, ${nfeature_out})\n\n // compute s -> (l,m)\n COMPUTE_LM(s)\n\n float out_re = 0.0;\n float out_im = 0.0;\n\n for (int i = 0; i < ${nbatch}; ++i) {\n for (int k = -l; k <= l; ++k) {\n float grad_z_re = grad_z[CONTRACT2(k, m, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 0];\n float grad_z_im = grad_z[CONTRACT2(k, m, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 1];\n float x_re = x[CONTRACT1(k, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 0];\n float x_im = x[CONTRACT1(k, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 1];\n\n // conjugate grad_z times x\n out_re += grad_z_re * x_re + grad_z_im * x_im;\n out_im += grad_z_re * x_im - grad_z_im * x_re;\n }\n }\n\n grad_y[index * 2 + 0] = out_re;\n grad_y[index * 2 + 1] = out_im;\n }\n}\n').substitute({'nbatch': nbatch, 'nspec': nspec, 'nl': nl, 'nfeature_in': nfeature_in, 'nfeature_out': nfeature_out})
import s2cnn.utils.cuda as cuda_utils
return cuda_utils.compile_kernel(kernel, 's2mm_grady.cu', 'main_')
|
def test_compare_cuda_cpu():
x = torch.rand((((1 + 3) + 5) + 7), 2, 3, 2)
y = torch.rand((((1 + 3) + 5) + 7), 3, 5, 2)
z1 = s2_mm(x, y)
z2 = s2_mm(x.cuda(), y.cuda()).cpu()
q = ((z1 - z2).abs().max().item() / z1.std().item())
print(q)
assert (q < 0.0001)
|
def so3_rft(x, b, grid):
'\n Real Fourier Transform\n :param x: [..., beta_alpha_gamma]\n :param b: output bandwidth signal\n :param grid: tuple of (beta, alpha, gamma) tuples\n :return: [l * m * n, ..., complex]\n '
F = _setup_so3_ft(b, grid, device_type=x.device.type, device_index=x.device.index)
assert (x.size((- 1)) == F.size(0))
sz = x.size()
x = torch.einsum('ia,afc->fic', (x.view((- 1), x.size((- 1))), F.clone()))
x = x.view((- 1), *sz[:(- 1)], 2)
return x
|
@cached_dirpklgz('cache/setup_so3_ft')
def __setup_so3_ft(b, grid):
from lie_learn.representations.SO3.wigner_d import wigner_D_matrix
n_spatial = len(grid)
n_spectral = np.sum([(((2 * l) + 1) ** 2) for l in range(b)])
F = np.zeros((n_spatial, n_spectral), dtype=complex)
for (i, (beta, alpha, gamma)) in enumerate(grid):
Dmats = [wigner_D_matrix(l, alpha, beta, gamma, field='complex', normalization='quantum', order='centered', condon_shortley='cs').conj() for l in range(b)]
F[i] = np.hstack([Dl.flatten() for Dl in Dmats])
F = F.view('float').reshape(((- 1), n_spectral, 2))
return F
|
@lru_cache(maxsize=32)
def _setup_so3_ft(b, grid, device_type, device_index):
F = __setup_so3_ft(b, grid)
F = torch.tensor(F.astype(np.float32), dtype=torch.float32, device=torch.device(device_type, device_index))
return F
|
def so3_mm(x, y):
'\n :param x: [l * m * n, batch, feature_in, complex]\n :param y: [l * m * n, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n '
from s2cnn.utils.complex import complex_mm
import math
assert (y.size(3) == 2)
assert (x.size(3) == 2)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert (y.size(1) == nfeature_in)
nspec = x.size(0)
assert (y.size(0) == nspec)
nl = math.ceil((((3 / 4) * nspec) ** (1 / 3)))
assert (nspec == ((nl * ((4 * (nl ** 2)) - 1)) // 3))
if x.is_cuda:
return _cuda_SO3_mm.apply(x, y)
Fz_list = []
begin = 0
for l in range(nl):
L = ((2 * l) + 1)
size = (L ** 2)
Fx = x[begin:(begin + size)]
Fy = y[begin:(begin + size)]
Fx = Fx.view(L, L, nbatch, nfeature_in, 2)
Fx = Fx.transpose(0, 1)
Fx = Fx.transpose(0, 2)
Fx = Fx.transpose(2, 3)
Fx = Fx.contiguous()
Fx = Fx.view((nbatch * L), (nfeature_in * L), 2)
Fy = Fy.view(L, L, nfeature_in, nfeature_out, 2)
Fy = Fy.transpose(0, 2)
Fy = Fy.contiguous()
Fy = Fy.view((nfeature_in * L), (L * nfeature_out), 2)
Fz = complex_mm(Fx, Fy, conj_y=True)
Fz = Fz.view(nbatch, (L * L), nfeature_out, 2)
Fz = Fz.transpose(0, 1)
Fz_list.append(Fz)
begin += size
z = torch.cat(Fz_list, 0)
return z
|
class _cuda_SO3_mm(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
'\n :param x: [l * m * n, batch, feature_in, complex]\n :param y: [l * m * n, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n '
assert (x.is_cuda and (x.dtype == torch.float32))
assert (y.is_cuda and (y.dtype == torch.float32))
assert (y.size(3) == 2)
assert (x.size(3) == 2)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert (y.size(1) == nfeature_in)
nspec = x.size(0)
assert (y.size(0) == nspec)
nl = round((((3 / 4) * nspec) ** (1 / 3)))
assert (nspec == ((nl * ((4 * (nl ** 2)) - 1)) // 3))
ctx.save_for_backward(x, y)
device = torch.cuda.current_device()
cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nbatch, nj=nfeature_out, nk=nfeature_in, conj_y=True, trans_y_spec=True, device=device)
output = x.new_empty((nspec, nbatch, nfeature_out, 2))
cuda_kernel(x, y, output)
return output
@staticmethod
def backward(ctx, gradz):
(x, y) = ctx.saved_tensors
nspec = x.size(0)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
nl = round((((3 / 4) * nspec) ** (1 / 3)))
assert (nspec == ((nl * ((4 * (nl ** 2)) - 1)) // 3))
gradx = grady = None
device = torch.cuda.current_device()
if ctx.needs_input_grad[0]:
gradx_cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nbatch, nj=nfeature_in, nk=nfeature_out, trans_y_feature=True, device=device)
gradx = gradz.new_empty((nspec, nbatch, nfeature_in, 2))
gradx_cuda_kernel(gradz, y, gradx)
if ctx.needs_input_grad[1]:
grady_cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nfeature_out, nj=nfeature_in, nk=nbatch, trans_out_feature=True, conj_x=True, trans_x_spec=True, trans_x_feature=True, device=device)
grady = gradz.new_empty((nspec, nfeature_in, nfeature_out, 2))
grady_cuda_kernel(gradz, x, grady)
return (gradx, grady)
|
@lru_cache(maxsize=32)
def _setup_so3mm_cuda_kernel(nl, ni, nj, nk, conj_x=False, conj_y=False, trans_x_spec=False, trans_x_feature=False, trans_y_spec=False, trans_y_feature=False, trans_out_feature=False, device=0):
'\n return a function that computes\n out[l*m*n, i, j] = sum_k sum_p x[l*m*p, i, k] y[l*p*n, k, j]\n where out, x, y are complex valued\n\n if conj_x is set to True, x is conjugated\n if conj_y is set to True, y is conjugated\n if trans_x_spec is set to True m and p are permuted in x[...]\n if trans_y_spec is set to True p and n are permuted in y[...]\n if trans_x_feature is set to True i and k are permuted in x[...]\n if trans_y_feature is set to True k and j are permuted in y[...]\n if trans_out_feature is set to True i and j are permuted in out[...]\n '
kernel = '\n#define NI {}\n#define NJ {}\n#define NK {}\n'.format(ni, nj, nk)
if ((not trans_x_spec) and (not trans_x_feature)):
kernel += '#define INDEX_X (((L0 + m * L + p) * NI + i) * NK + k)\n'
if ((not trans_x_spec) and trans_x_feature):
kernel += '#define INDEX_X (((L0 + m * L + p) * NK + k) * NI + i)\n'
if (trans_x_spec and (not trans_x_feature)):
kernel += '#define INDEX_X (((L0 + p * L + m) * NI + i) * NK + k)\n'
if (trans_x_spec and trans_x_feature):
kernel += '#define INDEX_X (((L0 + p * L + m) * NK + k) * NI + i)\n'
if ((not trans_y_spec) and (not trans_y_feature)):
kernel += '#define INDEX_Y (((L0 + p * L + n) * NK + k) * NJ + j)\n'
if ((not trans_y_spec) and trans_y_feature):
kernel += '#define INDEX_Y (((L0 + p * L + n) * NJ + j) * NK + k)\n'
if (trans_y_spec and (not trans_y_feature)):
kernel += '#define INDEX_Y (((L0 + n * L + p) * NK + k) * NJ + j)\n'
if (trans_y_spec and trans_y_feature):
kernel += '#define INDEX_Y (((L0 + n * L + p) * NJ + j) * NK + k)\n'
if (not trans_out_feature):
kernel += '#define INDEX_OUT (((L0 + m * L + n) * NI + i) * NJ + j)\n'
if trans_out_feature:
kernel += '#define INDEX_OUT (((L0 + m * L + n) * NJ + j) * NI + i)\n'
kernel += '\n#define CONJ_X {}\n#define CONJ_Y {}\n'.format(('x_im = -x_im;' if conj_x else ';'), ('y_im = -y_im;' if conj_y else ';'))
kernel += '\n#define CEIL_DIV(x, y) (((x) + (y) - 1) / (y))\n\nextern "C"\n__global__ void main_(const float* in_x, const float* in_y, float* out)\n{\n // start of thread independant code\n int l = blockIdx.z;\n int L = 2 * l + 1;\n int L0 = (4 * l*l - 1) * l / 3;\n\n if (blockIdx.y * 32 >= L * NI || blockIdx.x * 32 >= L * NJ) {\n return;\n }\n\n int ntile = CEIL_DIV(L * NK, 32);\n // end of thread independant code\n\n int mi = blockIdx.y * 32 + threadIdx.y;\n int m = mi / NI;\n int i = mi % NI;\n int nj = blockIdx.x * 32 + threadIdx.x;\n int n = nj / NJ;\n int j = nj % NJ;\n\n float sum_re = 0.0;\n float sum_im = 0.0;\n\n for (int tile = 0; tile < ntile; ++tile) {\n __shared__ float tileX[2][32][32];\n __shared__ float tileY[2][32][32];\n\n int pk = tile * 32 + threadIdx.x;\n int p = pk / NK;\n int k = pk % NK;\n int index = INDEX_X * 2;\n tileX[0][threadIdx.y][threadIdx.x] = m < L && p < L ? in_x[index + 0] : 0.0;\n tileX[1][threadIdx.y][threadIdx.x] = m < L && p < L ? in_x[index + 1] : 0.0;\n\n pk = tile * 32 + threadIdx.y;\n p = pk / NK;\n k = pk % NK;\n index = INDEX_Y * 2;\n tileY[0][threadIdx.y][threadIdx.x] = p < L && n < L ? in_y[index + 0] : 0.0;\n tileY[1][threadIdx.y][threadIdx.x] = p < L && n < L ? in_y[index + 1] : 0.0;\n\n __syncthreads();\n\n for (int any = 0; any < 32; ++any) {\n float x_re = tileX[0][threadIdx.y][any];\n float x_im = tileX[1][threadIdx.y][any];\n float y_re = tileY[0][any][threadIdx.x];\n float y_im = tileY[1][any][threadIdx.x];\n\n CONJ_X\n CONJ_Y\n\n sum_re += x_re * y_re - x_im * y_im;\n sum_im += x_re * y_im + x_im * y_re;\n }\n\n __syncthreads();\n }\n\n if (m < L && n < L) {\n int index = INDEX_OUT * 2;\n out[index + 0] = sum_re;\n out[index + 1] = sum_im;\n }\n}\n'
import s2cnn.utils.cuda as cuda_utils
kernel = cuda_utils.compile_kernel(kernel, 'so3_mm.cu', 'main_')
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
def fun(x, y, output):
assert output.is_contiguous()
kernel(block=(32, 32, 1), grid=(math.ceil(((((2 * nl) - 1) * nj) / 32)), math.ceil(((((2 * nl) - 1) * ni) / 32)), nl), args=[x.contiguous().data_ptr(), y.contiguous().data_ptr(), output.data_ptr()], stream=stream)
return fun
|
def test_compare_cuda_cpu():
x = torch.rand((((1 + 9) + 25) + 49), 2, 3, 2)
y = torch.rand((((1 + 9) + 25) + 49), 3, 5, 2)
z1 = so3_mm(x, y)
z2 = so3_mm(x.cuda(), y.cuda()).cpu()
q = ((z1 - z2).abs().max().item() / z1.std().item())
print(q)
assert (q < 0.0001)
|
class S2Convolution(Module):
def __init__(self, nfeature_in, nfeature_out, b_in, b_out, grid):
"\n :param nfeature_in: number of input fearures\n :param nfeature_out: number of output features\n :param b_in: input bandwidth (precision of the input SOFT grid)\n :param b_out: output bandwidth\n :param grid: points of the sphere defining the kernel, tuple of (alpha, beta)'s\n "
super(S2Convolution, self).__init__()
self.nfeature_in = nfeature_in
self.nfeature_out = nfeature_out
self.b_in = b_in
self.b_out = b_out
self.grid = grid
self.kernel = Parameter(torch.empty(nfeature_in, nfeature_out, len(grid)).uniform_((- 1), 1))
self.scaling = (1.0 / math.sqrt((((len(self.grid) * self.nfeature_in) * (self.b_out ** 4.0)) / (self.b_in ** 2.0))))
self.bias = Parameter(torch.zeros(1, nfeature_out, 1, 1, 1))
def forward(self, x):
'\n :x: [batch, feature_in, beta, alpha]\n :return: [batch, feature_out, beta, alpha, gamma]\n '
assert (x.size(1) == self.nfeature_in)
assert (x.size(2) == (2 * self.b_in))
assert (x.size(3) == (2 * self.b_in))
x = S2_fft_real.apply(x, self.b_out)
y = s2_rft((self.kernel * self.scaling), self.b_out, self.grid)
z = s2_mm(x, y)
z = SO3_ifft_real.apply(z)
z = (z + self.bias)
return z
|
class SO3Convolution(Module):
def __init__(self, nfeature_in, nfeature_out, b_in, b_out, grid):
"\n :param nfeature_in: number of input fearures\n :param nfeature_out: number of output features\n :param b_in: input bandwidth (precision of the input SOFT grid)\n :param b_out: output bandwidth\n :param grid: points of the SO(3) group defining the kernel, tuple of (alpha, beta, gamma)'s\n "
super(SO3Convolution, self).__init__()
self.nfeature_in = nfeature_in
self.nfeature_out = nfeature_out
self.b_in = b_in
self.b_out = b_out
self.grid = grid
self.kernel = Parameter(torch.empty(nfeature_in, nfeature_out, len(grid)).uniform_((- 1), 1))
self.bias = Parameter(torch.zeros(1, nfeature_out, 1, 1, 1))
self.scaling = (1.0 / math.sqrt((((len(self.grid) * self.nfeature_in) * (self.b_out ** 3.0)) / (self.b_in ** 3.0))))
def forward(self, x):
'\n :x: [batch, feature_in, beta, alpha, gamma]\n :return: [batch, feature_out, beta, alpha, gamma]\n '
assert (x.size(1) == self.nfeature_in)
assert (x.size(2) == (2 * self.b_in))
assert (x.size(3) == (2 * self.b_in))
assert (x.size(4) == (2 * self.b_in))
x = SO3_fft_real.apply(x, self.b_out)
y = so3_rft((self.kernel * self.scaling), self.b_out, self.grid)
assert (x.size(0) == y.size(0))
assert (x.size(2) == y.size(1))
z = so3_mm(x, y)
assert (z.size(0) == x.size(0))
assert (z.size(1) == x.size(1))
assert (z.size(2) == y.size(2))
z = SO3_ifft_real.apply(z)
z = (z + self.bias)
return z
|
class SO3Shortcut(Module):
'\n Useful for ResNet\n '
def __init__(self, nfeature_in, nfeature_out, b_in, b_out):
super(SO3Shortcut, self).__init__()
assert (b_out <= b_in)
if ((nfeature_in != nfeature_out) or (b_in != b_out)):
self.conv = SO3Convolution(nfeature_in=nfeature_in, nfeature_out=nfeature_out, b_in=b_in, b_out=b_out, grid=((0, 0, 0),))
else:
self.conv = None
def forward(self, x):
'\n :x: [batch, feature_in, beta, alpha, gamma]\n :return: [batch, feature_out, beta, alpha, gamma]\n '
if (self.conv is not None):
return self.conv(x)
else:
return x
|
def so3_integrate(x):
'\n Integrate a signal on SO(3) using the Haar measure\n \n :param x: [..., beta, alpha, gamma] (..., 2b, 2b, 2b)\n :return y: [...] (...)\n '
assert (x.size((- 1)) == x.size((- 2)))
assert (x.size((- 2)) == x.size((- 3)))
b = (x.size((- 1)) // 2)
w = _setup_so3_integrate(b, device_type=x.device.type, device_index=x.device.index)
x = torch.sum(x, dim=(- 1)).squeeze((- 1))
x = torch.sum(x, dim=(- 1)).squeeze((- 1))
sz = x.size()
x = x.view((- 1), (2 * b))
w = w.view((2 * b), 1)
x = torch.mm(x, w).squeeze((- 1))
x = x.view(*sz[:(- 1)])
return x
|
@lru_cache(maxsize=32)
@show_running
def _setup_so3_integrate(b, device_type, device_index):
import lie_learn.spaces.S3 as S3
return torch.tensor(S3.quadrature_weights(b), dtype=torch.float32, device=torch.device(device_type, device_index))
|
def so3_rotation(x, alpha, beta, gamma):
'\n :param x: [..., beta, alpha, gamma] (..., 2b, 2b, 2b)\n '
b = (x.size()[(- 1)] // 2)
x_size = x.size()
Us = _setup_so3_rotation(b, alpha, beta, gamma, device_type=x.device.type, device_index=x.device.index)
x = SO3_fft_real.apply(x)
Fz_list = []
begin = 0
for l in range(b):
L = ((2 * l) + 1)
size = (L ** 2)
Fx = x[begin:(begin + size)]
Fx = Fx.view(L, (- 1), 2)
U = Us[l].view(L, L, 2)
Fz = complex_mm(U, Fx, conj_x=True)
Fz = Fz.view(size, (- 1), 2)
Fz_list.append(Fz)
begin += size
Fz = torch.cat(Fz_list, 0)
z = SO3_ifft_real.apply(Fz)
z = z.contiguous()
z = z.view(*x_size)
return z
|
@cached_dirpklgz('cache/setup_so3_rotation')
def __setup_so3_rotation(b, alpha, beta, gamma):
from lie_learn.representations.SO3.wigner_d import wigner_D_matrix
Us = [wigner_D_matrix(l, alpha, beta, gamma, field='complex', normalization='quantum', order='centered', condon_shortley='cs') for l in range(b)]
Us = [Us[l].astype(np.complex64).view(np.float32).reshape((((2 * l) + 1), ((2 * l) + 1), 2)) for l in range(b)]
return Us
|
@lru_cache(maxsize=32)
def _setup_so3_rotation(b, alpha, beta, gamma, device_type, device_index):
Us = __setup_so3_rotation(b, alpha, beta, gamma)
Us = [torch.tensor(U, dtype=torch.float32, device=torch.device(device_type, device_index)) for U in Us]
return Us
|
def get_blocks(n, num_threads):
n_per_instance = (((n + (num_threads * CUDA_MAX_GRID_DIM)) - 1) // (num_threads * CUDA_MAX_GRID_DIM))
return (((n + (num_threads * n_per_instance)) - 1) // (num_threads * n_per_instance))
|
def compile_kernel(kernel, filename, functioname):
program = Program(kernel, filename)
ptx = program.compile()
m = function.Module()
m.load(bytes(ptx.encode()))
f = m.get_function(functioname)
return f
|
class WaitPrint(threading.Thread):
def __init__(self, t, message):
super().__init__()
self.t = t
self.message = message
self.running = True
def stop(self):
self.running = False
def run(self):
for _ in range(int((self.t // 0.1))):
time.sleep(0.1)
if (not self.running):
return
print(self.message, end='')
|
def show_running(func):
@wraps(func)
def g(*args, **kargs):
x = WaitPrint(2, '{}({})... '.format(func.__name__, ', '.join(([repr(x) for x in args] + ['{}={}'.format(key, repr(value)) for (key, value) in kargs.items()]))))
x.start()
t = time.perf_counter()
r = func(*args, **kargs)
if x.is_alive():
x.stop()
else:
print('done in {:.0f} seconds'.format((time.perf_counter() - t)))
return r
return g
|
def cached_dirpklgz(dirname):
'\n Cache a function with a directory\n '
def decorator(func):
'\n The actual decorator\n '
@lru_cache(maxsize=None)
@wraps(func)
def wrapper(*args):
'\n The wrapper of the function\n '
try:
os.makedirs(dirname)
except FileExistsError:
pass
indexfile = os.path.join(dirname, 'index.pkl')
try:
with open(indexfile, 'rb') as file:
index = pickle.load(file)
except FileNotFoundError:
index = {}
try:
filename = index[args]
except KeyError:
index[args] = filename = '{}.pkl.gz'.format(len(index))
with open(indexfile, 'wb') as file:
pickle.dump(index, file)
filepath = os.path.join(dirname, filename)
try:
with gzip.open(filepath, 'rb') as file:
print('load {}... '.format(filename), end='')
result = pickle.load(file)
except FileNotFoundError:
print('compute {}... '.format(filename), end='')
sys.stdout.flush()
result = func(*args)
print('save {}... '.format(filename), end='')
with gzip.open(filepath, 'wb') as file:
pickle.dump(result, file)
print('done')
return result
return wrapper
return decorator
|
def test_so3_rfft(b_in, b_out, device):
x = torch.randn((2 * b_in), (2 * b_in), (2 * b_in), dtype=torch.float, device=device)
from s2cnn.soft.so3_fft import so3_rfft
y1 = so3_rfft(x, b_out=b_out)
from s2cnn import so3_rft, so3_soft_grid
import lie_learn.spaces.S3 as S3
weights = torch.tensor(S3.quadrature_weights(b_in), dtype=torch.float, device=device)
x2 = torch.einsum('bac,b->bac', (x, weights))
y2 = so3_rft(x2.view((- 1)), b_out, so3_soft_grid(b_in))
assert ((y1 - y2).abs().max().item() < (0.0001 * y1.abs().mean().item()))
|
def test_inverse(f, g, b_in, b_out, device, complex):
if complex:
x = torch.randn((2 * b_in), (2 * b_in), (2 * b_in), 2, dtype=torch.float, device=device)
else:
x = torch.randn((2 * b_in), (2 * b_in), (2 * b_in), dtype=torch.float, device=device)
x = g(f(x, b_out=b_out), b_out=b_in)
y = g(f(x, b_out=b_out), b_out=b_in)
assert ((x - y).abs().max().item() < (0.0001 * y.abs().mean().item()))
|
def test_inverse2(f, g, b_in, b_out, device):
x = torch.randn(((b_in * ((4 * (b_in ** 2)) - 1)) // 3), 2, dtype=torch.float, device=device)
x = g(f(x, b_out=b_out), b_out=b_in)
y = g(f(x, b_out=b_out), b_out=b_in)
assert ((x - y).abs().max().item() < (0.0001 * y.abs().mean().item()))
|
def compare_cpu_gpu(f, x):
z1 = f(x.cpu())
z2 = f(x.cuda()).cpu()
q = ((z1 - z2).abs().max().item() / z1.std().item())
assert (q < 0.0001)
|
def get_filenames_of_path(path: pathlib.Path, ext: str='*'):
'Returns a list of files in a directory/path. Uses pathlib.'
filenames = [file for file in path.glob(ext) if file.is_file()]
return filenames
|
def compute_max_depth(shape, max_depth=10, print_out=True):
shapes = []
shapes.append(shape)
for level in range(1, max_depth):
if (((shape % (2 ** level)) == 0) and ((shape / (2 ** level)) > 1)):
shapes.append((shape / (2 ** level)))
if print_out:
print(f'Level {level}: {(shape / (2 ** level))}')
else:
if print_out:
print(f'Max-level: {(level - 1)}')
break
return shapes
|
def compute_possible_shapes(low, high, depth):
possible_shapes = {}
for shape in range(low, (high + 1)):
shapes = compute_max_depth(shape, max_depth=depth, print_out=False)
if (len(shapes) == depth):
possible_shapes[shape] = shapes
return possible_shapes
|
def get_filenames_of_path(path: pathlib.Path, ext: str='*'):
'Returns a list of files in a directory/path. Uses pathlib.'
filenames = [file for file in path.glob(ext) if file.is_file()]
return filenames
|
def get_filenames_of_path(path: pathlib.Path, ext: str='*'):
'Returns a list of files in a directory/path. Uses pathlib.'
filenames = [file for file in path.glob(ext) if file.is_file()]
return filenames
|
def preprocess(img: np.ndarray):
img = np.moveaxis(img, (- 1), 0)
img = normalize_01(img)
img = np.expand_dims(img, axis=0)
img = img.astype(np.float32)
return img
|
def postprocess(img: torch.tensor):
img = torch.argmax(img, dim=1)
img = img.cpu().numpy()
img = np.squeeze(img)
img = re_normalize(img)
return img
|
def get_filenames_of_path(path: pathlib.Path, ext: str='*'):
'Returns a list of files in a directory/path. Uses pathlib.'
filenames = [file for file in path.glob(ext) if file.is_file()]
return filenames
|
def get_filenames_of_path(path: pathlib.Path, ext: str='*'):
'Returns a list of files in a directory/path. Uses pathlib.'
filenames = [file for file in path.glob(ext) if file.is_file()]
return filenames
|
class DatasetViewerExtra(DatasetViewer):
def show_sample(self):
sample = self.get_sample_dataset(self.index)
(x, y, x_name, y_name) = (sample['x'], sample['y'], sample['x_name'], sample['y_name'])
x = self.transform_x(x)
y = self.transform_y(y)
if (self.image_layer not in self.viewer.layers):
self.image_layer = self.create_image_layer(x, x_name)
else:
self.update_image_layer(self.image_layer, x, x_name)
if (self.label_layer not in self.viewer.layers):
self.label_layer = self.create_label_layer(y, y_name)
else:
self.update_label_layer(self.label_layer, y, y_name)
self.viewer.reset_view()
|
def get_k_highest_values(scores, k):
return np.argpartition(np.array(scores), (- k))[(- k):]
|
def get_k_lowest_values(scores, k):
return np.argpartition(np.array(scores), k)[:k]
|
def log_k_worst_best_scores(metric_obj, k):
import itertools
scores = metric_obj.get_metrics_epoch(last=True, transpose=False).numpy()
names = np.array(list(itertools.chain.from_iterable(metric_obj.last_names)))
k_lowest = get_k_lowest_values(scores, k=k)
k_highest = get_k_highest_values(scores, k=k)
df_lowest = pd.DataFrame({f'{metric_obj}': scores[k_lowest], 'name': names[k_lowest]})
df_highest = pd.DataFrame({f'{metric_obj}': scores[k_highest], 'name': names[k_highest]})
log_table(name=f'{metric_obj}-lowest', table=df_lowest, experiment=neptune_logger.experiment)
log_table(name=f'{metric_obj}-highest', table=df_highest, experiment=neptune_logger.experiment)
return (df_lowest, df_highest)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.