code stringlengths 17 6.64M |
|---|
class ELMClassifier(ELMRegressor):
'\n ELMClassifier is a classifier based on the Extreme Learning Machine.\n\n An Extreme Learning Machine (ELM) is a single layer feedforward\n network with a random hidden layer components and ordinary linear\n least squares fitting of the hidden->output weights by default.\n [1][2]\n\n ELMClassifier is an ELMRegressor subclass that first binarizes the\n data, then uses the superclass to compute the decision function that\n is then unbinarized to yield the prediction.\n\n The params for the RandomLayer used in the input transform are\n exposed in the ELMClassifier constructor.\n\n Parameters\n ----------\n `n_hidden` : int, optional (default=20)\n Number of units to generate in the SimpleRandomLayer\n\n `activation_func` : {callable, string} optional (default=\'tanh\')\n Function used to transform input activation\n\n It must be one of \'tanh\', \'sine\', \'tribas\', \'inv_tribase\', \'sigmoid\',\n \'hardlim\', \'softlim\', \'gaussian\', \'multiquadric\', \'inv_multiquadric\',\n \'reclinear\' or a callable. If none is given, \'tanh\' will be used. \n If a callable is given, it will be used to compute the hidden unit\n activations.\n\n `activation_args` : dictionary, optional (default=None)\n Supplies keyword arguments for a callable activation_func\n\n `random_state` : int, RandomState instance or None (default=None)\n Control the pseudo random number generator used to generate the\n hidden unit weights at fit time.\n\n Attributes\n ----------\n `classes_` : numpy array of shape [n_classes]\n Array of class labels\n\n See Also\n --------\n RandomLayer, RBFRandomLayer, MLPRandomLayer,\n GenELMRegressor, GenELMClassifier, ELMClassifier\n\n References\n ----------\n .. [1] http://www.extreme-learning-machines.org\n .. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:\n Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,\n 2006.\n '
def __init__(self, n_hidden=20, alpha=0.5, rbf_width=1.0, activation_func='tanh', activation_args=None, user_components=None, regressor=None, binarizer=LabelBinarizer((- 1), 1), random_state=None):
super(ELMClassifier, self).__init__(n_hidden=n_hidden, alpha=alpha, random_state=random_state, activation_func=activation_func, activation_args=activation_args, user_components=user_components, rbf_width=rbf_width, regressor=regressor)
self.classes_ = None
self.binarizer = binarizer
def decision_function(self, X):
'\n This function return the decision function values related to each\n class on an array of test vectors X.\n\n Parameters\n ----------\n X : array-like of shape [n_samples, n_features]\n\n Returns\n -------\n C : array of shape [n_samples, n_classes] or [n_samples,]\n Decision function values related to each class, per sample.\n In the two-class case, the shape is [n_samples,]\n '
return super(ELMClassifier, self).predict(X)
def fit(self, X, y):
'\n Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : array-like of shape [n_samples, n_outputs]\n Target values (class labels in classification, real numbers in\n regression)\n\n Returns\n -------\n self : object\n\n Returns an instance of self.\n '
self.classes_ = np.unique(y)
y_bin = self.binarizer.fit_transform(y)
super(ELMClassifier, self).fit(X, y_bin)
return self
def predict(self, X):
'\n Predict values using the model\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n\n Returns\n -------\n C : numpy array of shape [n_samples, n_outputs]\n Predicted values.\n '
raw_predictions = self.decision_function(X)
class_predictions = self.binarizer.inverse_transform(raw_predictions)
return class_predictions
def score(self, X, y):
"Force use of accuracy score since we don't inherit\n from ClassifierMixin"
from sklearn.metrics import accuracy_score
return accuracy_score(y, self.predict(X))
|
def eval_batch_mlp(mlp, data, batch_idxs, criterion, device_id=0):
' evaluate a batch for the baseline mlp '
atom_types = to_one_hot(data['features']['atom_types'][(batch_idxs, ...)], NUM_ATOM_TYPES)
targets = data['targets'][(batch_idxs, ...)]
atom_types = Variable(atom_types)
targets = Variable(targets)
if torch.cuda.is_available():
atom_types = atom_types.cuda(device_id)
targets = targets.cuda(device_id)
outputs = mlp(atom_types)
loss = criterion(outputs, targets)
return loss
|
def eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion, device_id=0):
' evaluate a batch for the s2cnn '
geometry = data['features']['geometry'][(batch_idxs, ...)]
atom_types = data['features']['atom_types'][(batch_idxs, ...)]
atom_types_one_hot = to_one_hot(atom_types, NUM_ATOM_TYPES)
targets = data['targets'][(batch_idxs, ...)]
geometry = Variable(geometry)
atom_types = Variable(atom_types)
atom_types_one_hot = Variable(atom_types_one_hot)
targets = Variable(targets)
if torch.cuda.is_available():
atom_types_one_hot = atom_types_one_hot.cuda(device_id)
geometry = geometry.cuda(device_id)
atom_types = atom_types.cuda(device_id)
targets = targets.cuda(device_id)
outputs = mlp(atom_types_one_hot)
outputs += s2cnn(geometry, atom_types)
loss = criterion(outputs, targets)
return loss
|
def train_baseline(mlp, data, train_batches, test_batches, num_epochs, learning_rate_mlp, device_id=0):
' train the baseline model '
optim = OPTIMIZER(mlp.parameters(), lr=learning_rate_mlp)
criterion = nn.MSELoss()
if torch.cuda.is_available():
criterion = criterion.cuda(device_id)
for epoch in range(num_epochs):
train_losses = []
print('training')
for (iteration, batch_idxs) in enumerate(train_batches):
mlp.train()
optim.zero_grad()
loss = eval_batch_mlp(mlp, data, batch_idxs, criterion, device_id)
loss.backward()
optim.step()
train_losses.append(loss.item())
print('\riteration {}/{}'.format((iteration + 1), train_batches.num_iterations()), end='')
print()
test_losses = []
print('evaluating')
for (iteration, batch_idxs) in enumerate(test_batches):
mlp.eval()
loss = eval_batch_mlp(mlp, data, batch_idxs, criterion)
test_losses.append(loss.item())
print('\riteration {}/{}'.format((iteration + 1), test_batches.num_iterations()), end='')
print()
train_loss = np.sqrt(np.mean(train_losses))
test_loss = np.sqrt(np.mean(test_losses))
print('epoch {}/{} - avg train loss: {}, test loss: {}'.format((epoch + 1), num_epochs, train_loss, test_loss))
return (train_loss, test_loss)
|
def train_s2cnn(mlp, s2cnn, data, train_batches, test_batches, num_epochs, init_learning_rate_s2cnn, learning_rate_decay_epochs, device_id=0):
' train the s2cnn keeping the baseline frozen '
optim = OPTIMIZER(s2cnn.parameters(), lr=init_learning_rate_s2cnn)
criterion = nn.MSELoss()
if torch.cuda.is_available():
criterion = criterion.cuda(device_id)
for epoch in range(num_epochs):
optim = exp_lr_scheduler(optim, epoch, init_lr=init_learning_rate_s2cnn, lr_decay_epoch=learning_rate_decay_epochs)
train_losses = []
print('training')
for (iteration, batch_idxs) in enumerate(train_batches):
s2cnn.train()
mlp.eval()
optim.zero_grad()
loss = eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion)
loss.backward()
optim.step()
train_losses.append(loss.item())
print('\riteration {}/{} - batch loss: {}'.format((iteration + 1), train_batches.num_iterations(), np.sqrt(train_losses[(- 1)])), end='')
print()
test_losses = []
print('evaluating')
for (iteration, batch_idxs) in enumerate(test_batches):
s2cnn.eval()
mlp.eval()
loss = eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion)
test_losses.append(loss.item())
print('\riteration {}/{} - batch loss: {}'.format((iteration + 1), test_batches.num_iterations(), np.sqrt(test_losses[(- 1)])), end='')
print()
train_loss = np.sqrt(np.mean(train_losses))
test_loss = np.sqrt(np.mean(test_losses))
print('epoch {}/{} - avg train loss: {}, test loss: {}'.format((epoch + 1), num_epochs, train_loss, test_loss))
return (train_loss, test_loss)
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default='data.joblib')
parser.add_argument('--test_strat', type=int, default=0)
parser.add_argument('--device_id', type=int, default=0)
parser.add_argument('--num_epochs_s2cnn', type=int, default=30)
parser.add_argument('--num_epochs_mlp', type=int, default=30)
parser.add_argument('--batch_size_s2cnn', type=int, default=32)
parser.add_argument('--batch_size_mlp', type=int, default=32)
parser.add_argument('--init_learning_rate_s2cnn', type=int, default=0.001)
parser.add_argument('--learning_rate_mlp', type=int, default=0.001)
parser.add_argument('--learning_rate_decay_epochs', type=int, default=10)
args = parser.parse_args()
torch.cuda.set_device(args.device_id)
print('evaluating on {}'.format(args.test_strat))
print('loading data...', end='')
(data, train_idxs, test_idxs) = load_data(args.data_path, args.test_strat, cuda=args.device_id)
print('done!')
mlp = BaselineRegressor()
s2cnn = S2CNNRegressor()
if torch.cuda.is_available():
for model in [mlp, s2cnn]:
model.cuda(args.device_id)
print('training baseline model')
print('mlp #params: {}'.format(count_params(mlp)))
train_baseline(mlp, data, IndexBatcher(train_idxs, args.batch_size_mlp, cuda=args.device_id), IndexBatcher(test_idxs, args.batch_size_mlp, cuda=args.device_id), args.num_epochs_mlp, args.learning_rate_mlp, args.device_id)
print('training residual s2cnn model')
print('s2cnn #params: {}'.format(count_params(s2cnn)))
train_s2cnn(mlp, s2cnn, data, IndexBatcher(train_idxs, args.batch_size_s2cnn, cuda=args.device_id), IndexBatcher(test_idxs, args.batch_size_s2cnn, cuda=args.device_id), args.num_epochs_s2cnn, args.init_learning_rate_s2cnn, args.learning_rate_decay_epochs, args.device_id)
|
class S2Block(nn.Module):
' simple s2 convolution block '
def __init__(self, b_in, b_out, f_in, f_out):
' b_in/b_out: bandwidth of input/output signals\n f_in/f_out: filters in input/output signals '
super(S2Block, self).__init__()
self.grid_s2 = s2_near_identity_grid(n_alpha=(2 * b_in), n_beta=2)
self.cnn = S2Convolution(nfeature_in=f_in, nfeature_out=f_out, b_in=b_in, b_out=b_out, grid=self.grid_s2)
self.bn = nn.BatchNorm3d(f_out, affine=AFFINE)
def forward(self, x):
x = self.cnn(x)
x = self.bn(x)
x = nonlinearity(x)
return x
|
class So3Block(nn.Module):
' simple so3 convolution block '
def __init__(self, b_in, b_out, f_in, f_out):
' b_in/b_out: bandwidth of input/output signals\n f_in/f_out: filters in input/output signals '
super(So3Block, self).__init__()
self.grid_so3 = so3_near_identity_grid(n_alpha=(2 * b_in), n_beta=2, n_gamma=2)
self.cnn = SO3Convolution(nfeature_in=f_in, nfeature_out=f_out, b_in=b_in, b_out=b_out, grid=self.grid_so3)
self.bn = nn.BatchNorm3d(f_out, affine=AFFINE)
def forward(self, x):
x = self.cnn(x)
x = self.bn(x)
x = nonlinearity(x)
return x
|
class DeepSet(nn.Module):
' deep set block '
def __init__(self, f, h1, h_latent, h2, n_objs):
' f: input filters\n h1, h2: hidden units for encoder/decoder mlps\n h_latent: dimensions\n n_objs: of objects to aggregate in latent space '
super(DeepSet, self).__init__()
self.f = f
self.h1 = h1
self.h3 = h2
self.n_objs = n_objs
self.emb_h = nn.Linear(f, h1)
self.emb_rep = nn.Linear(h1, h_latent)
self.proj_h = nn.Linear(h_latent, h2)
self.proj = nn.Linear(h2, 1)
self.bn1 = nn.BatchNorm1d(h1, affine=AFFINE)
self.bn2 = nn.BatchNorm1d(h_latent, affine=AFFINE)
self.bn3 = nn.BatchNorm1d(h2, affine=AFFINE)
def forward(self, x, mask):
x = self.emb_h(x)
x = self.bn1(x)
x = nonlinearity(x)
x = self.emb_rep(x)
x = self.bn2(x)
x = nonlinearity(x)
(n, h_latent) = x.size()
x = x.view((n // self.n_objs), self.n_objs, h_latent)
x = torch.sum((x * mask), dim=1)
x = self.proj_h(x)
x = self.bn3(x)
x = nonlinearity(x)
x = self.proj(x)
return x
|
class S2CNNRegressor(nn.Module):
' approximate energy using spherical representations '
def __init__(self):
super(S2CNNRegressor, self).__init__()
n_objs = 23
self.blocks = [S2Block(b_in=10, f_in=5, b_out=8, f_out=8), So3Block(b_in=8, b_out=6, f_in=8, f_out=16), So3Block(b_in=6, b_out=4, f_in=16, f_out=32), So3Block(b_in=4, b_out=2, f_in=32, f_out=64)]
for (i, block) in enumerate(self.blocks):
setattr(self, 'block{0}'.format(i), block)
self.ds = DeepSet(64, 256, 64, 512, n_objs)
def forward(self, x, atom_types):
(n_batch, n_atoms, n_features, bandwidth, _) = x.size()
mask = (atom_types > 0).view(n_batch, n_atoms, 1).float()
x = x.view((n_batch * n_atoms), n_features, bandwidth, bandwidth)
for block in self.blocks:
x = block(x)
x = so3_integrate(x)
y = self.ds(x, mask)
return y
|
class IndexBatcher():
def __init__(self, indices, n_batch, cuda=None):
self.indices = indices.astype(np.int64)
self.n_batch = n_batch
self.pos = 0
self.cuda = cuda
self.internal_indices = np.arange(len(indices)).astype(np.int64)
np.random.shuffle(self.internal_indices)
def __iter__(self):
return self
def reset(self):
self.pos = 0
np.random.shuffle(self.internal_indices)
def __next__(self):
start = self.pos
end = np.minimum((self.pos + self.n_batch), len(self.indices))
self.pos += self.n_batch
if (self.pos >= len(self.indices)):
self.reset()
raise StopIteration
tensor = torch.LongTensor(self.indices[self.internal_indices[start:end]])
if (self.cuda is not None):
tensor.cuda(self.cuda)
return tensor
def num_iterations(self):
return (len(self.indices) // self.n_batch)
next = __next__
|
def to_one_hot(x, n):
x_ = torch.unsqueeze(x, 2)
dims = (*x.size(), n)
one_hot = torch.FloatTensor(*dims).zero_()
one_hot.scatter_(2, x_, 1)
return one_hot
|
def load_data(path, test_strat_id=None, cuda=None):
'\n Loads the data\n\n path: path to the molecule .gz\n batch_size: size of a mini batch\n test_strat_id: id of strat being used as test set\n '
data = joblib.load(path)
type_remap = (- np.ones((int(data['features']['atom_types'].max()) + 1)))
unique_types = np.unique(data['features']['atom_types']).astype(int)
type_remap[unique_types] = np.arange(len(unique_types))
data['features']['atom_types'] = type_remap[data['features']['atom_types'].astype(int)]
data['features']['geometry'] = torch.FloatTensor(data['features']['geometry'].astype(np.float32))
data['features']['atom_types'] = torch.LongTensor(data['features']['atom_types'].astype(np.int64))
data['targets'] = torch.from_numpy(data['targets'])
if (cuda is not None):
data['features']['geometry'].cuda(cuda)
data['features']['atom_types'].cuda(cuda)
data['targets'].cuda(cuda)
train = np.ndarray(0)
test = np.ndarray(0)
if (not test_strat_id):
test_strat_id = np.random.randint(len(data['strats']))
for i in range(len(data['strats'])):
if (i != test_strat_id):
train = np.concatenate((train, data['strats'][i]))
else:
test = np.concatenate((test, data['strats'][i]))
return (data, train, test)
|
def exp_lr_scheduler(optimizer, epoch, init_lr=0.005, lr_decay_epoch=40):
'Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs.'
lr = (init_lr * (0.1 ** (epoch // lr_decay_epoch)))
if ((epoch % lr_decay_epoch) == 0):
print('LR is set to {}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
|
def count_params(model):
return sum([np.prod(p.size()) for p in model.parameters() if p.requires_grad])
|
class Model(nn.Module):
def __init__(self, nclasses):
super().__init__()
self.features = [6, 100, 100, nclasses]
self.bandwidths = [64, 16, 10]
assert (len(self.bandwidths) == (len(self.features) - 1))
sequence = []
grid = s2_equatorial_grid(max_beta=0, n_alpha=(2 * self.bandwidths[0]), n_beta=1)
sequence.append(S2Convolution(self.features[0], self.features[1], self.bandwidths[0], self.bandwidths[1], grid))
for l in range(1, (len(self.features) - 2)):
nfeature_in = self.features[l]
nfeature_out = self.features[(l + 1)]
b_in = self.bandwidths[l]
b_out = self.bandwidths[(l + 1)]
sequence.append(nn.BatchNorm3d(nfeature_in, affine=True))
sequence.append(nn.ReLU())
grid = so3_equatorial_grid(max_beta=0, max_gamma=0, n_alpha=(2 * b_in), n_beta=1, n_gamma=1)
sequence.append(SO3Convolution(nfeature_in, nfeature_out, b_in, b_out, grid))
sequence.append(nn.BatchNorm3d(self.features[(- 2)], affine=True))
sequence.append(nn.ReLU())
self.sequential = nn.Sequential(*sequence)
output_features = self.features[(- 2)]
self.out_layer = nn.Linear(output_features, self.features[(- 1)])
def forward(self, x):
x = self.sequential(x)
x = so3_integrate(x)
x = self.out_layer(x)
return F.log_softmax(x, dim=1)
|
class Model(nn.Module):
def __init__(self, nclasses):
super().__init__()
self.features = [6, 50, 70, 350, nclasses]
self.bandwidths = [128, 32, 22, 7]
assert (len(self.bandwidths) == (len(self.features) - 1))
sequence = []
grid = s2_equatorial_grid(max_beta=0, n_alpha=(2 * self.bandwidths[0]), n_beta=1)
sequence.append(S2Convolution(self.features[0], self.features[1], self.bandwidths[0], self.bandwidths[1], grid))
for l in range(1, (len(self.features) - 2)):
nfeature_in = self.features[l]
nfeature_out = self.features[(l + 1)]
b_in = self.bandwidths[l]
b_out = self.bandwidths[(l + 1)]
sequence.append(nn.BatchNorm3d(nfeature_in, affine=True))
sequence.append(nn.ReLU())
grid = so3_equatorial_grid(max_beta=0, max_gamma=0, n_alpha=(2 * b_in), n_beta=1, n_gamma=1)
sequence.append(SO3Convolution(nfeature_in, nfeature_out, b_in, b_out, grid))
sequence.append(nn.BatchNorm3d(self.features[(- 2)], affine=True))
sequence.append(nn.ReLU())
self.sequential = nn.Sequential(*sequence)
self.out_layer = nn.Sequential(nn.BatchNorm1d(self.features[(- 2)], affine=False), nn.Linear(self.features[(- 2)], self.features[(- 1)]))
def forward(self, x):
x = self.sequential(x)
x = x.view(x.size(0), x.size(1), (- 1)).max((- 1))[0]
x = self.out_layer(x)
return F.log_softmax(x, dim=1)
|
class KeepName():
def __init__(self, transform):
self.transform = transform
def __call__(self, file_name):
return (file_name, self.transform(file_name))
|
def main(log_dir, augmentation, dataset, batch_size, num_workers):
print(check_output(['nodejs', '--version']).decode('utf-8'))
torch.backends.cudnn.benchmark = True
transform = torchvision.transforms.Compose([CacheNPY(prefix='b64_', repeat=augmentation, pick_randomly=False, transform=torchvision.transforms.Compose([ToMesh(random_rotations=True, random_translation=0.1), ProjectOnSphere(bandwidth=64)])), (lambda xs: torch.stack([torch.FloatTensor(x) for x in xs]))])
transform = KeepName(transform)
test_set = Shrec17('data', dataset, perturbed=True, download=True, transform=transform)
loader = importlib.machinery.SourceFileLoader('model', os.path.join(log_dir, 'model.py'))
mod = types.ModuleType(loader.name)
loader.exec_module(mod)
model = mod.Model(55)
model.cuda()
model.load_state_dict(torch.load(os.path.join(log_dir, 'state.pkl')))
resdir = os.path.join(log_dir, (dataset + '_perturbed'))
if os.path.isdir(resdir):
shutil.rmtree(resdir)
os.mkdir(resdir)
predictions = []
ids = []
loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True, drop_last=False)
for (batch_idx, data) in enumerate(loader):
model.eval()
if (dataset != 'test'):
data = data[0]
(file_names, data) = data
(batch_size, rep) = data.size()[:2]
data = data.view((- 1), *data.size()[2:])
data = data.cuda()
with torch.no_grad():
pred = model(data).data
pred = pred.view(batch_size, rep, (- 1))
pred = pred.sum(1)
predictions.append(pred.cpu().numpy())
ids.extend([x.split('/')[(- 1)].split('.')[0] for x in file_names])
print('[{}/{}] '.format(batch_idx, len(loader)))
predictions = np.concatenate(predictions)
predictions_class = np.argmax(predictions, axis=1)
for i in range(len(ids)):
if ((i % 100) == 0):
print('{}/{} '.format(i, len(ids)), end='\r')
idfile = os.path.join(resdir, ids[i])
retrieved = [(predictions[(j, predictions_class[j])], ids[j]) for j in range(len(ids)) if (predictions_class[j] == predictions_class[i])]
retrieved = sorted(retrieved, reverse=True)
retrieved = [i for (_, i) in retrieved]
with open(idfile, 'w') as f:
f.write('\n'.join(retrieved))
url = 'https://shapenet.cs.stanford.edu/shrec17/code/evaluator.zip'
file_path = 'evaluator.zip'
r = requests.get(url, stream=True)
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=(16 * (1024 ** 2))):
if chunk:
f.write(chunk)
f.flush()
zip_ref = zipfile.ZipFile(file_path, 'r')
zip_ref.extractall('.')
zip_ref.close()
print(check_output(['nodejs', 'evaluate.js', (os.path.join('..', log_dir) + '/')], cwd='evaluator').decode('utf-8'))
shutil.copy2(os.path.join('evaluator', (log_dir + '.summary.csv')), os.path.join(log_dir, 'summary.csv'))
|
def main(log_dir, model_path, augmentation, dataset, batch_size, learning_rate, num_workers):
arguments = copy.deepcopy(locals())
os.mkdir(log_dir)
shutil.copy2(__file__, os.path.join(log_dir, 'script.py'))
shutil.copy2(model_path, os.path.join(log_dir, 'model.py'))
logger = logging.getLogger('train')
logger.setLevel(logging.DEBUG)
logger.handlers = []
ch = logging.StreamHandler()
logger.addHandler(ch)
fh = logging.FileHandler(os.path.join(log_dir, 'log.txt'))
logger.addHandler(fh)
logger.info('%s', repr(arguments))
torch.backends.cudnn.benchmark = True
loader = importlib.machinery.SourceFileLoader('model', os.path.join(log_dir, 'model.py'))
mod = types.ModuleType(loader.name)
loader.exec_module(mod)
model = mod.Model(55)
model.cuda()
logger.info('{} paramerters in total'.format(sum((x.numel() for x in model.parameters()))))
logger.info('{} paramerters in the last layer'.format(sum((x.numel() for x in model.out_layer.parameters()))))
bw = model.bandwidths[0]
transform = CacheNPY(prefix='b{}_'.format(bw), repeat=augmentation, transform=torchvision.transforms.Compose([ToMesh(random_rotations=True, random_translation=0.1), ProjectOnSphere(bandwidth=bw)]))
def target_transform(x):
classes = ['02691156', '02747177', '02773838', '02801938', '02808440', '02818832', '02828884', '02843684', '02871439', '02876657', '02880940', '02924116', '02933112', '02942699', '02946921', '02954340', '02958343', '02992529', '03001627', '03046257', '03085013', '03207941', '03211117', '03261776', '03325088', '03337140', '03467517', '03513137', '03593526', '03624134', '03636649', '03642806', '03691459', '03710193', '03759954', '03761084', '03790512', '03797390', '03928116', '03938244', '03948459', '03991062', '04004475', '04074963', '04090263', '04099429', '04225987', '04256520', '04330267', '04379243', '04401088', '04460130', '04468005', '04530566', '04554684']
return classes.index(x[0])
train_set = Shrec17('data', dataset, perturbed=True, download=True, transform=transform, target_transform=target_transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True, drop_last=True)
optimizer = torch.optim.SGD(model.parameters(), lr=0, momentum=0.9)
def train_step(data, target):
model.train()
(data, target) = (data.cuda(), target.cuda())
prediction = model(data)
loss = F.nll_loss(prediction, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
correct = prediction.data.max(1)[1].eq(target.data).long().cpu().sum()
return (loss.item(), correct.item())
def get_learning_rate(epoch):
limits = [100, 200]
lrs = [1, 0.1, 0.01]
assert (len(lrs) == (len(limits) + 1))
for (lim, lr) in zip(limits, lrs):
if (epoch < lim):
return (lr * learning_rate)
return (lrs[(- 1)] * learning_rate)
for epoch in range(300):
lr = get_learning_rate(epoch)
logger.info('learning rate = {} and batch size = {}'.format(lr, train_loader.batch_size))
for p in optimizer.param_groups:
p['lr'] = lr
total_loss = 0
total_correct = 0
time_before_load = time.perf_counter()
for (batch_idx, (data, target)) in enumerate(train_loader):
time_after_load = time.perf_counter()
time_before_step = time.perf_counter()
(loss, correct) = train_step(data, target)
total_loss += loss
total_correct += correct
logger.info('[{}:{}/{}] LOSS={:.2} <LOSS>={:.2} ACC={:.2} <ACC>={:.2} time={:.2}+{:.2}'.format(epoch, batch_idx, len(train_loader), loss, (total_loss / (batch_idx + 1)), (correct / len(data)), ((total_correct / len(data)) / (batch_idx + 1)), (time_after_load - time_before_load), (time.perf_counter() - time_before_step)))
time_before_load = time.perf_counter()
torch.save(model.state_dict(), os.path.join(log_dir, 'state.pkl'))
|
def s2_near_identity_grid(max_beta=(np.pi / 8), n_alpha=8, n_beta=3):
'\n :return: rings around the north pole\n size of the kernel = n_alpha * n_beta\n '
beta = ((np.arange(start=1, stop=(n_beta + 1), dtype=np.float) * max_beta) / n_beta)
alpha = np.linspace(start=0, stop=(2 * np.pi), num=n_alpha, endpoint=False)
(B, A) = np.meshgrid(beta, alpha, indexing='ij')
B = B.flatten()
A = A.flatten()
grid = np.stack((B, A), axis=1)
return tuple((tuple(ba) for ba in grid))
|
def s2_equatorial_grid(max_beta=0, n_alpha=32, n_beta=1):
'\n :return: rings around the equator\n size of the kernel = n_alpha * n_beta\n '
beta = np.linspace(start=((np.pi / 2) - max_beta), stop=((np.pi / 2) + max_beta), num=n_beta, endpoint=True)
alpha = np.linspace(start=0, stop=(2 * np.pi), num=n_alpha, endpoint=False)
(B, A) = np.meshgrid(beta, alpha, indexing='ij')
B = B.flatten()
A = A.flatten()
grid = np.stack((B, A), axis=1)
return tuple((tuple(ba) for ba in grid))
|
def s2_soft_grid(b):
beta = (((np.arange((2 * b)) + 0.5) / (2 * b)) * np.pi)
alpha = np.linspace(start=0, stop=(2 * np.pi), num=(2 * b), endpoint=False)
(B, A) = np.meshgrid(beta, alpha, indexing='ij')
B = B.flatten()
A = A.flatten()
grid = np.stack((B, A), axis=1)
return tuple((tuple(ba) for ba in grid))
|
def s2_mm(x, y):
'\n :param x: [l * m, batch, feature_in, complex]\n :param y: [l * m, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n '
from s2cnn.utils.complex import complex_mm
assert (y.size(3) == 2)
assert (x.size(3) == 2)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert (y.size(1) == nfeature_in)
nspec = x.size(0)
assert (y.size(0) == nspec)
if x.is_cuda:
return _cuda_S2_mm.apply(x, y)
nl = round((nspec ** 0.5))
Fz_list = []
begin = 0
for l in range(nl):
L = ((2 * l) + 1)
size = L
Fx = x[begin:(begin + size)]
Fy = y[begin:(begin + size)]
Fx = Fx.view((L * nbatch), nfeature_in, 2)
Fy = Fy.transpose(0, 1)
Fy = Fy.contiguous()
Fy = Fy.view(nfeature_in, (L * nfeature_out), 2)
Fz = complex_mm(Fx, Fy, conj_y=True)
Fz = Fz.view(L, nbatch, L, nfeature_out, 2)
Fz = Fz.transpose(1, 2)
Fz = Fz.contiguous()
Fz = Fz.view((L * L), nbatch, nfeature_out, 2)
Fz_list.append(Fz)
begin += size
z = torch.cat(Fz_list, 0)
return z
|
class _cuda_S2_mm(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return _cuda_s2_mm(x, y)
@staticmethod
def backward(ctx, gradz):
import s2cnn.utils.cuda as cuda_utils
(x, y) = ctx.saved_tensors
nl = round((x.size(0) ** 0.5))
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
nspec = ((((4 * (nl ** 2)) - 1) * nl) // 3)
device = torch.cuda.current_device()
gradx_cuda_kernel = _setup_s2mm_gradx_cuda_kernel(nbatch=nbatch, nspec=nspec, nl=nl, nfeature_in=nfeature_in, nfeature_out=nfeature_out, device=device)
grady_cuda_kernel = _setup_s2mm_grady_cuda_kernel(nbatch=nbatch, nspec=nspec, nl=nl, nfeature_in=nfeature_in, nfeature_out=nfeature_out, device=device)
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
gradx = grady = None
if ctx.needs_input_grad[0]:
gradx = gradz.new_empty(((nl ** 2), nbatch, nfeature_in, 2))
gradx_cuda_kernel(block=(cuda_utils.CUDA_NUM_THREADS, 1, 1), grid=(cuda_utils.get_blocks((((nl ** 2) * nbatch) * nfeature_in), 1024), 1, 1), args=[gradz.contiguous().data_ptr(), y.contiguous().data_ptr(), gradx.data_ptr()], stream=stream)
if ctx.needs_input_grad[1]:
grady = gradz.new_empty(((nl ** 2), nfeature_in, nfeature_out, 2))
grady_cuda_kernel(block=(cuda_utils.CUDA_NUM_THREADS, 1, 1), grid=(cuda_utils.get_blocks((((nl ** 2) * nfeature_in) * nfeature_out), 1024), 1, 1), args=[gradz.contiguous().data_ptr(), x.contiguous().data_ptr(), grady.data_ptr()], stream=stream)
return (gradx, grady)
|
def _cuda_s2_mm(x, y):
'\n :param x: [l * m, batch, feature_in, complex]\n :param y: [l * m, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n '
import s2cnn.utils.cuda as cuda_utils
assert (x.is_cuda and (x.dtype == torch.float32))
assert (y.is_cuda and (y.dtype == torch.float32))
assert (y.size(3) == 2)
assert (x.size(3) == 2)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert (y.size(1) == nfeature_in)
assert (y.size(0) == x.size(0))
nl = round((x.size(0) ** 0.5))
nspec = ((((4 * (nl ** 2)) - 1) * nl) // 3)
assert (x.size(0) == (nl ** 2))
assert (y.size(0) == (nl ** 2))
device = torch.cuda.current_device()
cuda_kernel = _setup_s2mm_cuda_kernel(nbatch=nbatch, nspec=nspec, nfeature_in=nfeature_in, nfeature_out=nfeature_out, device=device)
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
output = x.new_empty((nspec, nbatch, nfeature_out, 2))
cuda_kernel(block=(cuda_utils.CUDA_NUM_THREADS, 1, 1), grid=(cuda_utils.get_blocks(((nspec * nbatch) * nfeature_out), 1024), 1, 1), args=[x.contiguous().data_ptr(), y.contiguous().data_ptr(), output.data_ptr()], stream=stream)
return output
|
@lru_cache(maxsize=32)
def _setup_s2mm_cuda_kernel(nbatch, nspec, nfeature_in, nfeature_out, device=0):
kernel = Template('\n#define COMPUTE_LMN(s) int l = powf(3.0/4.0 * s, 1.0/3.0) - 0.5; int L = l * (4 * l * l - 1) / 3; int rest = s - L; if (rest >= (2 * l + 1) * (2 * l + 1)) { ++l; L = l * (4 * l * l - 1) / 3; rest = s - L; } int m = rest / (2 * l + 1) - l; int n = rest % (2 * l + 1) - l;\n\n#define EXTRACT(i1, i2, n2, i3, n3) int i1 = index; int i3 = i1 % (n3); i1 /= n3; int i2 = i1 % (n2); i1 /= n2;\n\n#define CONTRACT1(s1, i2, n2, i3, n3) ( ( (l * l + (l + (s1))) * (n2) + (i2) ) * (n3) + (i3) )\n\n#define CONTRACT2(s1, s2, i2, n2, i3, n3) ( ( (L + (l + (s1)) * (2 * l + 1) + (l + (s2))) * (n2) + (i2) ) * (n3) + (i3) )\n\nextern "C"\n__global__ void main_(const float* in_x, const float* in_y, float* out) {\n for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < ${nspec} * ${nbatch} * ${nfeature_out}; index += blockDim.x * gridDim.x) {\n EXTRACT(s, i, ${nbatch}, f_out, ${nfeature_out})\n\n // compute s -> (l,m,n)\n COMPUTE_LMN(s)\n\n float out_re = 0.0;\n float out_im = 0.0;\n\n for (int f_in = 0; f_in < ${nfeature_in}; ++f_in) {\n float x_re = in_x[CONTRACT1(m, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 0];\n float x_im = in_x[CONTRACT1(m, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 1];\n float y_re = in_y[CONTRACT1(n, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 0];\n float y_im = in_y[CONTRACT1(n, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 1];\n\n // x times y conjugate\n out_re += x_re * y_re + x_im * y_im;\n out_im += x_im * y_re - x_re * y_im;\n }\n\n out[index * 2 + 0] = out_re;\n out[index * 2 + 1] = out_im;\n }\n}\n').substitute({'nbatch': nbatch, 'nspec': nspec, 'nfeature_in': nfeature_in, 'nfeature_out': nfeature_out})
import s2cnn.utils.cuda as cuda_utils
return cuda_utils.compile_kernel(kernel, 's2mm.cu', 'main_')
|
@lru_cache(maxsize=32)
def _setup_s2mm_gradx_cuda_kernel(nbatch, nspec, nl, nfeature_in, nfeature_out, device=0):
kernel = Template('\n#define COMPUTE_LM(s) int l = sqrtf(s); int L = (4 * l * l - 1) * l / 3; int m = s - l * l - l;\n\n#define EXTRACT(i1, i2, n2, i3, n3) int i1 = index; int i3 = i1 % (n3); i1 /= n3; int i2 = i1 % (n2); i1 /= n2;\n\n#define CONTRACT1(s1, i2, n2, i3, n3) ( ( (l * l + (l + (s1))) * (n2) + (i2) ) * (n3) + (i3) )\n\n#define CONTRACT2(s1, s2, i2, n2, i3, n3) ( ( (L + (l + (s1)) * (2 * l + 1) + (l + (s2))) * (n2) + (i2) ) * (n3) + (i3) )\n\nextern "C"\n__global__ void main_(const float* grad_z, const float* y, float* grad_x) {\n for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (${nl} * ${nl}) * ${nbatch} * ${nfeature_in}; index += blockDim.x * gridDim.x) {\n EXTRACT(s, i, ${nbatch}, f_in, ${nfeature_in})\n\n // compute s -> (l,m)\n COMPUTE_LM(s)\n\n float out_re = 0.0;\n float out_im = 0.0;\n\n for (int f_out = 0; f_out < ${nfeature_out}; ++f_out) {\n for (int k = -l; k <= l; ++k) {\n float grad_z_re = grad_z[CONTRACT2(m, k, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 0];\n float grad_z_im = grad_z[CONTRACT2(m, k, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 1];\n float y_re = y[CONTRACT1(k, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 0];\n float y_im = y[CONTRACT1(k, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 1];\n\n // grad_z times y\n out_re += grad_z_re * y_re - grad_z_im * y_im;\n out_im += grad_z_re * y_im + grad_z_im * y_re;\n }\n }\n\n grad_x[index * 2 + 0] = out_re;\n grad_x[index * 2 + 1] = out_im;\n }\n}\n').substitute({'nbatch': nbatch, 'nspec': nspec, 'nl': nl, 'nfeature_in': nfeature_in, 'nfeature_out': nfeature_out})
import s2cnn.utils.cuda as cuda_utils
return cuda_utils.compile_kernel(kernel, 's2mm_gradx.cu', 'main_')
|
@lru_cache(maxsize=32)
def _setup_s2mm_grady_cuda_kernel(nbatch, nspec, nl, nfeature_in, nfeature_out, device=0):
kernel = Template('\n#define COMPUTE_LM(s) int l = powf(s, 0.5); int L = (4 * l * l - 1) * l / 3; int m = s - l * l - l;\n\n#define EXTRACT(i1, i2, n2, i3, n3) int i1 = index; int i3 = i1 % (n3); i1 /= n3; int i2 = i1 % (n2); i1 /= n2;\n\n#define CONTRACT1(s1, i2, n2, i3, n3) ( ( (l * l + (l + (s1))) * (n2) + (i2) ) * (n3) + (i3) )\n\n#define CONTRACT2(s1, s2, i2, n2, i3, n3) ( ( (L + (l + (s1)) * (2 * l + 1) + (l + (s2))) * (n2) + (i2) ) * (n3) + (i3) )\n\nextern "C"\n__global__ void main_(const float* grad_z, const float* x, float* grad_y) {\n for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (${nl} * ${nl}) * ${nfeature_in} * ${nfeature_out}; index += blockDim.x * gridDim.x) {\n EXTRACT(s, f_in, ${nfeature_in}, f_out, ${nfeature_out})\n\n // compute s -> (l,m)\n COMPUTE_LM(s)\n\n float out_re = 0.0;\n float out_im = 0.0;\n\n for (int i = 0; i < ${nbatch}; ++i) {\n for (int k = -l; k <= l; ++k) {\n float grad_z_re = grad_z[CONTRACT2(k, m, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 0];\n float grad_z_im = grad_z[CONTRACT2(k, m, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 1];\n float x_re = x[CONTRACT1(k, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 0];\n float x_im = x[CONTRACT1(k, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 1];\n\n // conjugate grad_z times x\n out_re += grad_z_re * x_re + grad_z_im * x_im;\n out_im += grad_z_re * x_im - grad_z_im * x_re;\n }\n }\n\n grad_y[index * 2 + 0] = out_re;\n grad_y[index * 2 + 1] = out_im;\n }\n}\n').substitute({'nbatch': nbatch, 'nspec': nspec, 'nl': nl, 'nfeature_in': nfeature_in, 'nfeature_out': nfeature_out})
import s2cnn.utils.cuda as cuda_utils
return cuda_utils.compile_kernel(kernel, 's2mm_grady.cu', 'main_')
|
def test_compare_cuda_cpu():
x = torch.rand((((1 + 3) + 5) + 7), 2, 3, 2)
y = torch.rand((((1 + 3) + 5) + 7), 3, 5, 2)
z1 = s2_mm(x, y)
z2 = s2_mm(x.cuda(), y.cuda()).cpu()
q = ((z1 - z2).abs().max().item() / z1.std().item())
print(q)
assert (q < 0.0001)
|
def so3_rft(x, b, grid):
'\n Real Fourier Transform\n :param x: [..., beta_alpha_gamma]\n :param b: output bandwidth signal\n :param grid: tuple of (beta, alpha, gamma) tuples\n :return: [l * m * n, ..., complex]\n '
F = _setup_so3_ft(b, grid, device_type=x.device.type, device_index=x.device.index)
assert (x.size((- 1)) == F.size(0))
sz = x.size()
x = torch.einsum('ia,afc->fic', (x.view((- 1), x.size((- 1))), F.clone()))
x = x.view((- 1), *sz[:(- 1)], 2)
return x
|
@cached_dirpklgz('cache/setup_so3_ft')
def __setup_so3_ft(b, grid):
from lie_learn.representations.SO3.wigner_d import wigner_D_matrix
n_spatial = len(grid)
n_spectral = np.sum([(((2 * l) + 1) ** 2) for l in range(b)])
F = np.zeros((n_spatial, n_spectral), dtype=complex)
for (i, (beta, alpha, gamma)) in enumerate(grid):
Dmats = [wigner_D_matrix(l, alpha, beta, gamma, field='complex', normalization='quantum', order='centered', condon_shortley='cs').conj() for l in range(b)]
F[i] = np.hstack([Dl.flatten() for Dl in Dmats])
F = F.view('float').reshape(((- 1), n_spectral, 2))
return F
|
@lru_cache(maxsize=32)
def _setup_so3_ft(b, grid, device_type, device_index):
F = __setup_so3_ft(b, grid)
F = torch.tensor(F.astype(np.float32), dtype=torch.float32, device=torch.device(device_type, device_index))
return F
|
def so3_mm(x, y):
'\n :param x: [l * m * n, batch, feature_in, complex]\n :param y: [l * m * n, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n '
from s2cnn.utils.complex import complex_mm
import math
assert (y.size(3) == 2)
assert (x.size(3) == 2)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert (y.size(1) == nfeature_in)
nspec = x.size(0)
assert (y.size(0) == nspec)
nl = math.ceil((((3 / 4) * nspec) ** (1 / 3)))
assert (nspec == ((nl * ((4 * (nl ** 2)) - 1)) // 3))
if x.is_cuda:
return _cuda_SO3_mm.apply(x, y)
Fz_list = []
begin = 0
for l in range(nl):
L = ((2 * l) + 1)
size = (L ** 2)
Fx = x[begin:(begin + size)]
Fy = y[begin:(begin + size)]
Fx = Fx.view(L, L, nbatch, nfeature_in, 2)
Fx = Fx.transpose(0, 1)
Fx = Fx.transpose(0, 2)
Fx = Fx.transpose(2, 3)
Fx = Fx.contiguous()
Fx = Fx.view((nbatch * L), (nfeature_in * L), 2)
Fy = Fy.view(L, L, nfeature_in, nfeature_out, 2)
Fy = Fy.transpose(0, 2)
Fy = Fy.contiguous()
Fy = Fy.view((nfeature_in * L), (L * nfeature_out), 2)
Fz = complex_mm(Fx, Fy, conj_y=True)
Fz = Fz.view(nbatch, (L * L), nfeature_out, 2)
Fz = Fz.transpose(0, 1)
Fz_list.append(Fz)
begin += size
z = torch.cat(Fz_list, 0)
return z
|
class _cuda_SO3_mm(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
'\n :param x: [l * m * n, batch, feature_in, complex]\n :param y: [l * m * n, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n '
assert (x.is_cuda and (x.dtype == torch.float32))
assert (y.is_cuda and (y.dtype == torch.float32))
assert (y.size(3) == 2)
assert (x.size(3) == 2)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert (y.size(1) == nfeature_in)
nspec = x.size(0)
assert (y.size(0) == nspec)
nl = round((((3 / 4) * nspec) ** (1 / 3)))
assert (nspec == ((nl * ((4 * (nl ** 2)) - 1)) // 3))
ctx.save_for_backward(x, y)
device = torch.cuda.current_device()
cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nbatch, nj=nfeature_out, nk=nfeature_in, conj_y=True, trans_y_spec=True, device=device)
output = x.new_empty((nspec, nbatch, nfeature_out, 2))
cuda_kernel(x, y, output)
return output
@staticmethod
def backward(ctx, gradz):
(x, y) = ctx.saved_tensors
nspec = x.size(0)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
nl = round((((3 / 4) * nspec) ** (1 / 3)))
assert (nspec == ((nl * ((4 * (nl ** 2)) - 1)) // 3))
gradx = grady = None
device = torch.cuda.current_device()
if ctx.needs_input_grad[0]:
gradx_cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nbatch, nj=nfeature_in, nk=nfeature_out, trans_y_feature=True, device=device)
gradx = gradz.new_empty((nspec, nbatch, nfeature_in, 2))
gradx_cuda_kernel(gradz, y, gradx)
if ctx.needs_input_grad[1]:
grady_cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nfeature_out, nj=nfeature_in, nk=nbatch, trans_out_feature=True, conj_x=True, trans_x_spec=True, trans_x_feature=True, device=device)
grady = gradz.new_empty((nspec, nfeature_in, nfeature_out, 2))
grady_cuda_kernel(gradz, x, grady)
return (gradx, grady)
|
@lru_cache(maxsize=32)
def _setup_so3mm_cuda_kernel(nl, ni, nj, nk, conj_x=False, conj_y=False, trans_x_spec=False, trans_x_feature=False, trans_y_spec=False, trans_y_feature=False, trans_out_feature=False, device=0):
'\n return a function that computes\n out[l*m*n, i, j] = sum_k sum_p x[l*m*p, i, k] y[l*p*n, k, j]\n where out, x, y are complex valued\n\n if conj_x is set to True, x is conjugated\n if conj_y is set to True, y is conjugated\n if trans_x_spec is set to True m and p are permuted in x[...]\n if trans_y_spec is set to True p and n are permuted in y[...]\n if trans_x_feature is set to True i and k are permuted in x[...]\n if trans_y_feature is set to True k and j are permuted in y[...]\n if trans_out_feature is set to True i and j are permuted in out[...]\n '
kernel = '\n#define NI {}\n#define NJ {}\n#define NK {}\n'.format(ni, nj, nk)
if ((not trans_x_spec) and (not trans_x_feature)):
kernel += '#define INDEX_X (((L0 + m * L + p) * NI + i) * NK + k)\n'
if ((not trans_x_spec) and trans_x_feature):
kernel += '#define INDEX_X (((L0 + m * L + p) * NK + k) * NI + i)\n'
if (trans_x_spec and (not trans_x_feature)):
kernel += '#define INDEX_X (((L0 + p * L + m) * NI + i) * NK + k)\n'
if (trans_x_spec and trans_x_feature):
kernel += '#define INDEX_X (((L0 + p * L + m) * NK + k) * NI + i)\n'
if ((not trans_y_spec) and (not trans_y_feature)):
kernel += '#define INDEX_Y (((L0 + p * L + n) * NK + k) * NJ + j)\n'
if ((not trans_y_spec) and trans_y_feature):
kernel += '#define INDEX_Y (((L0 + p * L + n) * NJ + j) * NK + k)\n'
if (trans_y_spec and (not trans_y_feature)):
kernel += '#define INDEX_Y (((L0 + n * L + p) * NK + k) * NJ + j)\n'
if (trans_y_spec and trans_y_feature):
kernel += '#define INDEX_Y (((L0 + n * L + p) * NJ + j) * NK + k)\n'
if (not trans_out_feature):
kernel += '#define INDEX_OUT (((L0 + m * L + n) * NI + i) * NJ + j)\n'
if trans_out_feature:
kernel += '#define INDEX_OUT (((L0 + m * L + n) * NJ + j) * NI + i)\n'
kernel += '\n#define CONJ_X {}\n#define CONJ_Y {}\n'.format(('x_im = -x_im;' if conj_x else ';'), ('y_im = -y_im;' if conj_y else ';'))
kernel += '\n#define CEIL_DIV(x, y) (((x) + (y) - 1) / (y))\n\nextern "C"\n__global__ void main_(const float* in_x, const float* in_y, float* out)\n{\n // start of thread independant code\n int l = blockIdx.z;\n int L = 2 * l + 1;\n int L0 = (4 * l*l - 1) * l / 3;\n\n if (blockIdx.y * 32 >= L * NI || blockIdx.x * 32 >= L * NJ) {\n return;\n }\n\n int ntile = CEIL_DIV(L * NK, 32);\n // end of thread independant code\n\n int mi = blockIdx.y * 32 + threadIdx.y;\n int m = mi / NI;\n int i = mi % NI;\n int nj = blockIdx.x * 32 + threadIdx.x;\n int n = nj / NJ;\n int j = nj % NJ;\n\n float sum_re = 0.0;\n float sum_im = 0.0;\n\n for (int tile = 0; tile < ntile; ++tile) {\n __shared__ float tileX[2][32][32];\n __shared__ float tileY[2][32][32];\n\n int pk = tile * 32 + threadIdx.x;\n int p = pk / NK;\n int k = pk % NK;\n int index = INDEX_X * 2;\n tileX[0][threadIdx.y][threadIdx.x] = m < L && p < L ? in_x[index + 0] : 0.0;\n tileX[1][threadIdx.y][threadIdx.x] = m < L && p < L ? in_x[index + 1] : 0.0;\n\n pk = tile * 32 + threadIdx.y;\n p = pk / NK;\n k = pk % NK;\n index = INDEX_Y * 2;\n tileY[0][threadIdx.y][threadIdx.x] = p < L && n < L ? in_y[index + 0] : 0.0;\n tileY[1][threadIdx.y][threadIdx.x] = p < L && n < L ? in_y[index + 1] : 0.0;\n\n __syncthreads();\n\n for (int any = 0; any < 32; ++any) {\n float x_re = tileX[0][threadIdx.y][any];\n float x_im = tileX[1][threadIdx.y][any];\n float y_re = tileY[0][any][threadIdx.x];\n float y_im = tileY[1][any][threadIdx.x];\n\n CONJ_X\n CONJ_Y\n\n sum_re += x_re * y_re - x_im * y_im;\n sum_im += x_re * y_im + x_im * y_re;\n }\n\n __syncthreads();\n }\n\n if (m < L && n < L) {\n int index = INDEX_OUT * 2;\n out[index + 0] = sum_re;\n out[index + 1] = sum_im;\n }\n}\n'
import s2cnn.utils.cuda as cuda_utils
kernel = cuda_utils.compile_kernel(kernel, 'so3_mm.cu', 'main_')
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
def fun(x, y, output):
assert output.is_contiguous()
kernel(block=(32, 32, 1), grid=(math.ceil(((((2 * nl) - 1) * nj) / 32)), math.ceil(((((2 * nl) - 1) * ni) / 32)), nl), args=[x.contiguous().data_ptr(), y.contiguous().data_ptr(), output.data_ptr()], stream=stream)
return fun
|
def test_compare_cuda_cpu():
x = torch.rand((((1 + 9) + 25) + 49), 2, 3, 2)
y = torch.rand((((1 + 9) + 25) + 49), 3, 5, 2)
z1 = so3_mm(x, y)
z2 = so3_mm(x.cuda(), y.cuda()).cpu()
q = ((z1 - z2).abs().max().item() / z1.std().item())
print(q)
assert (q < 0.0001)
|
class S2Convolution(Module):
def __init__(self, nfeature_in, nfeature_out, b_in, b_out, grid):
"\n :param nfeature_in: number of input fearures\n :param nfeature_out: number of output features\n :param b_in: input bandwidth (precision of the input SOFT grid)\n :param b_out: output bandwidth\n :param grid: points of the sphere defining the kernel, tuple of (alpha, beta)'s\n "
super(S2Convolution, self).__init__()
self.nfeature_in = nfeature_in
self.nfeature_out = nfeature_out
self.b_in = b_in
self.b_out = b_out
self.grid = grid
self.kernel = Parameter(torch.empty(nfeature_in, nfeature_out, len(grid)).uniform_((- 1), 1))
self.scaling = (1.0 / math.sqrt((((len(self.grid) * self.nfeature_in) * (self.b_out ** 4.0)) / (self.b_in ** 2.0))))
self.bias = Parameter(torch.zeros(1, nfeature_out, 1, 1, 1))
def forward(self, x):
'\n :x: [batch, feature_in, beta, alpha]\n :return: [batch, feature_out, beta, alpha, gamma]\n '
assert (x.size(1) == self.nfeature_in)
assert (x.size(2) == (2 * self.b_in))
assert (x.size(3) == (2 * self.b_in))
x = S2_fft_real.apply(x, self.b_out)
y = s2_rft((self.kernel * self.scaling), self.b_out, self.grid)
z = s2_mm(x, y)
z = SO3_ifft_real.apply(z)
z = (z + self.bias)
return z
|
class SO3Convolution(Module):
def __init__(self, nfeature_in, nfeature_out, b_in, b_out, grid):
"\n :param nfeature_in: number of input fearures\n :param nfeature_out: number of output features\n :param b_in: input bandwidth (precision of the input SOFT grid)\n :param b_out: output bandwidth\n :param grid: points of the SO(3) group defining the kernel, tuple of (alpha, beta, gamma)'s\n "
super(SO3Convolution, self).__init__()
self.nfeature_in = nfeature_in
self.nfeature_out = nfeature_out
self.b_in = b_in
self.b_out = b_out
self.grid = grid
self.kernel = Parameter(torch.empty(nfeature_in, nfeature_out, len(grid)).uniform_((- 1), 1))
self.bias = Parameter(torch.zeros(1, nfeature_out, 1, 1, 1))
self.scaling = (1.0 / math.sqrt((((len(self.grid) * self.nfeature_in) * (self.b_out ** 3.0)) / (self.b_in ** 3.0))))
def forward(self, x):
'\n :x: [batch, feature_in, beta, alpha, gamma]\n :return: [batch, feature_out, beta, alpha, gamma]\n '
assert (x.size(1) == self.nfeature_in)
assert (x.size(2) == (2 * self.b_in))
assert (x.size(3) == (2 * self.b_in))
assert (x.size(4) == (2 * self.b_in))
x = SO3_fft_real.apply(x, self.b_out)
y = so3_rft((self.kernel * self.scaling), self.b_out, self.grid)
assert (x.size(0) == y.size(0))
assert (x.size(2) == y.size(1))
z = so3_mm(x, y)
assert (z.size(0) == x.size(0))
assert (z.size(1) == x.size(1))
assert (z.size(2) == y.size(2))
z = SO3_ifft_real.apply(z)
z = (z + self.bias)
return z
|
class SO3Shortcut(Module):
'\n Useful for ResNet\n '
def __init__(self, nfeature_in, nfeature_out, b_in, b_out):
super(SO3Shortcut, self).__init__()
assert (b_out <= b_in)
if ((nfeature_in != nfeature_out) or (b_in != b_out)):
self.conv = SO3Convolution(nfeature_in=nfeature_in, nfeature_out=nfeature_out, b_in=b_in, b_out=b_out, grid=((0, 0, 0),))
else:
self.conv = None
def forward(self, x):
'\n :x: [batch, feature_in, beta, alpha, gamma]\n :return: [batch, feature_out, beta, alpha, gamma]\n '
if (self.conv is not None):
return self.conv(x)
else:
return x
|
def so3_integrate(x):
'\n Integrate a signal on SO(3) using the Haar measure\n \n :param x: [..., beta, alpha, gamma] (..., 2b, 2b, 2b)\n :return y: [...] (...)\n '
assert (x.size((- 1)) == x.size((- 2)))
assert (x.size((- 2)) == x.size((- 3)))
b = (x.size((- 1)) // 2)
w = _setup_so3_integrate(b, device_type=x.device.type, device_index=x.device.index)
x = torch.sum(x, dim=(- 1)).squeeze((- 1))
x = torch.sum(x, dim=(- 1)).squeeze((- 1))
sz = x.size()
x = x.view((- 1), (2 * b))
w = w.view((2 * b), 1)
x = torch.mm(x, w).squeeze((- 1))
x = x.view(*sz[:(- 1)])
return x
|
@lru_cache(maxsize=32)
@show_running
def _setup_so3_integrate(b, device_type, device_index):
import lie_learn.spaces.S3 as S3
return torch.tensor(S3.quadrature_weights(b), dtype=torch.float32, device=torch.device(device_type, device_index))
|
def so3_rotation(x, alpha, beta, gamma):
'\n :param x: [..., beta, alpha, gamma] (..., 2b, 2b, 2b)\n '
b = (x.size()[(- 1)] // 2)
x_size = x.size()
Us = _setup_so3_rotation(b, alpha, beta, gamma, device_type=x.device.type, device_index=x.device.index)
x = SO3_fft_real.apply(x)
Fz_list = []
begin = 0
for l in range(b):
L = ((2 * l) + 1)
size = (L ** 2)
Fx = x[begin:(begin + size)]
Fx = Fx.view(L, (- 1), 2)
U = Us[l].view(L, L, 2)
Fz = complex_mm(U, Fx, conj_x=True)
Fz = Fz.view(size, (- 1), 2)
Fz_list.append(Fz)
begin += size
Fz = torch.cat(Fz_list, 0)
z = SO3_ifft_real.apply(Fz)
z = z.contiguous()
z = z.view(*x_size)
return z
|
@cached_dirpklgz('cache/setup_so3_rotation')
def __setup_so3_rotation(b, alpha, beta, gamma):
from lie_learn.representations.SO3.wigner_d import wigner_D_matrix
Us = [wigner_D_matrix(l, alpha, beta, gamma, field='complex', normalization='quantum', order='centered', condon_shortley='cs') for l in range(b)]
Us = [Us[l].astype(np.complex64).view(np.float32).reshape((((2 * l) + 1), ((2 * l) + 1), 2)) for l in range(b)]
return Us
|
@lru_cache(maxsize=32)
def _setup_so3_rotation(b, alpha, beta, gamma, device_type, device_index):
Us = __setup_so3_rotation(b, alpha, beta, gamma)
Us = [torch.tensor(U, dtype=torch.float32, device=torch.device(device_type, device_index)) for U in Us]
return Us
|
def get_blocks(n, num_threads):
n_per_instance = (((n + (num_threads * CUDA_MAX_GRID_DIM)) - 1) // (num_threads * CUDA_MAX_GRID_DIM))
return (((n + (num_threads * n_per_instance)) - 1) // (num_threads * n_per_instance))
|
def compile_kernel(kernel, filename, functioname):
program = Program(kernel, filename)
ptx = program.compile()
m = function.Module()
m.load(bytes(ptx.encode()))
f = m.get_function(functioname)
return f
|
class WaitPrint(threading.Thread):
def __init__(self, t, message):
super().__init__()
self.t = t
self.message = message
self.running = True
def stop(self):
self.running = False
def run(self):
for _ in range(int((self.t // 0.1))):
time.sleep(0.1)
if (not self.running):
return
print(self.message, end='')
|
def show_running(func):
@wraps(func)
def g(*args, **kargs):
x = WaitPrint(2, '{}({})... '.format(func.__name__, ', '.join(([repr(x) for x in args] + ['{}={}'.format(key, repr(value)) for (key, value) in kargs.items()]))))
x.start()
t = time.perf_counter()
r = func(*args, **kargs)
if x.is_alive():
x.stop()
else:
print('done in {:.0f} seconds'.format((time.perf_counter() - t)))
return r
return g
|
def cached_dirpklgz(dirname):
'\n Cache a function with a directory\n '
def decorator(func):
'\n The actual decorator\n '
@lru_cache(maxsize=None)
@wraps(func)
def wrapper(*args):
'\n The wrapper of the function\n '
try:
os.makedirs(dirname)
except FileExistsError:
pass
indexfile = os.path.join(dirname, 'index.pkl')
try:
with open(indexfile, 'rb') as file:
index = pickle.load(file)
except FileNotFoundError:
index = {}
try:
filename = index[args]
except KeyError:
index[args] = filename = '{}.pkl.gz'.format(len(index))
with open(indexfile, 'wb') as file:
pickle.dump(index, file)
filepath = os.path.join(dirname, filename)
try:
with gzip.open(filepath, 'rb') as file:
print('load {}... '.format(filename), end='')
result = pickle.load(file)
except FileNotFoundError:
print('compute {}... '.format(filename), end='')
sys.stdout.flush()
result = func(*args)
print('save {}... '.format(filename), end='')
with gzip.open(filepath, 'wb') as file:
pickle.dump(result, file)
print('done')
return result
return wrapper
return decorator
|
def test_so3_rfft(b_in, b_out, device):
x = torch.randn((2 * b_in), (2 * b_in), (2 * b_in), dtype=torch.float, device=device)
from s2cnn.soft.so3_fft import so3_rfft
y1 = so3_rfft(x, b_out=b_out)
from s2cnn import so3_rft, so3_soft_grid
import lie_learn.spaces.S3 as S3
weights = torch.tensor(S3.quadrature_weights(b_in), dtype=torch.float, device=device)
x2 = torch.einsum('bac,b->bac', (x, weights))
y2 = so3_rft(x2.view((- 1)), b_out, so3_soft_grid(b_in))
assert ((y1 - y2).abs().max().item() < (0.0001 * y1.abs().mean().item()))
|
def test_inverse(f, g, b_in, b_out, device, complex):
if complex:
x = torch.randn((2 * b_in), (2 * b_in), (2 * b_in), 2, dtype=torch.float, device=device)
else:
x = torch.randn((2 * b_in), (2 * b_in), (2 * b_in), dtype=torch.float, device=device)
x = g(f(x, b_out=b_out), b_out=b_in)
y = g(f(x, b_out=b_out), b_out=b_in)
assert ((x - y).abs().max().item() < (0.0001 * y.abs().mean().item()))
|
def test_inverse2(f, g, b_in, b_out, device):
x = torch.randn(((b_in * ((4 * (b_in ** 2)) - 1)) // 3), 2, dtype=torch.float, device=device)
x = g(f(x, b_out=b_out), b_out=b_in)
y = g(f(x, b_out=b_out), b_out=b_in)
assert ((x - y).abs().max().item() < (0.0001 * y.abs().mean().item()))
|
def compare_cpu_gpu(f, x):
z1 = f(x.cpu())
z2 = f(x.cuda()).cpu()
q = ((z1 - z2).abs().max().item() / z1.std().item())
assert (q < 0.0001)
|
def eval_batch_mlp(mlp, data, batch_idxs, criterion, device_id=0):
' evaluate a batch for the baseline mlp '
atom_types = to_one_hot(data['features']['atom_types'][(batch_idxs, ...)], NUM_ATOM_TYPES)
targets = data['targets'][(batch_idxs, ...)]
atom_types = Variable(atom_types)
targets = Variable(targets)
if torch.cuda.is_available():
atom_types = atom_types.cuda(device_id)
targets = targets.cuda(device_id)
outputs = mlp(atom_types)
loss = criterion(outputs, targets)
return loss
|
def eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion, device_id=0):
' evaluate a batch for the s2cnn '
geometry = data['features']['geometry'][(batch_idxs, ...)]
atom_types = data['features']['atom_types'][(batch_idxs, ...)]
atom_types_one_hot = to_one_hot(atom_types, NUM_ATOM_TYPES)
targets = data['targets'][(batch_idxs, ...)]
geometry = Variable(geometry)
atom_types = Variable(atom_types)
atom_types_one_hot = Variable(atom_types_one_hot)
targets = Variable(targets)
if torch.cuda.is_available():
atom_types_one_hot = atom_types_one_hot.cuda(device_id)
geometry = geometry.cuda(device_id)
atom_types = atom_types.cuda(device_id)
targets = targets.cuda(device_id)
outputs = mlp(atom_types_one_hot)
outputs += s2cnn(geometry, atom_types)
loss = criterion(outputs, targets)
return loss
|
def train_baseline(mlp, data, train_batches, test_batches, num_epochs, learning_rate_mlp, device_id=0):
' train the baseline model '
optim = OPTIMIZER(mlp.parameters(), lr=learning_rate_mlp)
criterion = nn.MSELoss()
if torch.cuda.is_available():
criterion = criterion.cuda(device_id)
for epoch in range(num_epochs):
train_losses = []
print('training')
for (iteration, batch_idxs) in enumerate(train_batches):
mlp.train()
optim.zero_grad()
loss = eval_batch_mlp(mlp, data, batch_idxs, criterion, device_id)
loss.backward()
optim.step()
train_losses.append(loss.item())
print('\riteration {}/{}'.format((iteration + 1), train_batches.num_iterations()), end='')
print()
test_losses = []
print('evaluating')
for (iteration, batch_idxs) in enumerate(test_batches):
mlp.eval()
loss = eval_batch_mlp(mlp, data, batch_idxs, criterion)
test_losses.append(loss.item())
print('\riteration {}/{}'.format((iteration + 1), test_batches.num_iterations()), end='')
print()
train_loss = np.sqrt(np.mean(train_losses))
test_loss = np.sqrt(np.mean(test_losses))
print('epoch {}/{} - avg train loss: {}, test loss: {}'.format((epoch + 1), num_epochs, train_loss, test_loss))
return (train_loss, test_loss)
|
def train_s2cnn(mlp, s2cnn, data, train_batches, test_batches, num_epochs, init_learning_rate_s2cnn, learning_rate_decay_epochs, device_id=0):
' train the s2cnn keeping the baseline frozen '
optim = OPTIMIZER(s2cnn.parameters(), lr=init_learning_rate_s2cnn)
criterion = nn.MSELoss()
if torch.cuda.is_available():
criterion = criterion.cuda(device_id)
for epoch in range(num_epochs):
optim = exp_lr_scheduler(optim, epoch, init_lr=init_learning_rate_s2cnn, lr_decay_epoch=learning_rate_decay_epochs)
train_losses = []
print('training')
for (iteration, batch_idxs) in enumerate(train_batches):
s2cnn.train()
mlp.eval()
optim.zero_grad()
loss = eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion)
loss.backward()
optim.step()
train_losses.append(loss.item())
print('\riteration {}/{} - batch loss: {}'.format((iteration + 1), train_batches.num_iterations(), np.sqrt(train_losses[(- 1)])), end='')
print()
test_losses = []
print('evaluating')
for (iteration, batch_idxs) in enumerate(test_batches):
s2cnn.eval()
mlp.eval()
loss = eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion)
test_losses.append(loss.item())
print('\riteration {}/{} - batch loss: {}'.format((iteration + 1), test_batches.num_iterations(), np.sqrt(test_losses[(- 1)])), end='')
print()
train_loss = np.sqrt(np.mean(train_losses))
test_loss = np.sqrt(np.mean(test_losses))
print('epoch {}/{} - avg train loss: {}, test loss: {}'.format((epoch + 1), num_epochs, train_loss, test_loss))
return (train_loss, test_loss)
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default='data.joblib')
parser.add_argument('--test_strat', type=int, default=0)
parser.add_argument('--device_id', type=int, default=0)
parser.add_argument('--num_epochs_s2cnn', type=int, default=30)
parser.add_argument('--num_epochs_mlp', type=int, default=30)
parser.add_argument('--batch_size_s2cnn', type=int, default=32)
parser.add_argument('--batch_size_mlp', type=int, default=32)
parser.add_argument('--init_learning_rate_s2cnn', type=int, default=0.001)
parser.add_argument('--learning_rate_mlp', type=int, default=0.001)
parser.add_argument('--learning_rate_decay_epochs', type=int, default=10)
args = parser.parse_args()
torch.cuda.set_device(args.device_id)
print('evaluating on {}'.format(args.test_strat))
print('loading data...', end='')
(data, train_idxs, test_idxs) = load_data(args.data_path, args.test_strat, cuda=args.device_id)
print('done!')
mlp = BaselineRegressor()
s2cnn = S2CNNRegressor()
if torch.cuda.is_available():
for model in [mlp, s2cnn]:
model.cuda(args.device_id)
print('training baseline model')
print('mlp #params: {}'.format(count_params(mlp)))
train_baseline(mlp, data, IndexBatcher(train_idxs, args.batch_size_mlp, cuda=args.device_id), IndexBatcher(test_idxs, args.batch_size_mlp, cuda=args.device_id), args.num_epochs_mlp, args.learning_rate_mlp, args.device_id)
print('training residual s2cnn model')
print('s2cnn #params: {}'.format(count_params(s2cnn)))
train_s2cnn(mlp, s2cnn, data, IndexBatcher(train_idxs, args.batch_size_s2cnn, cuda=args.device_id), IndexBatcher(test_idxs, args.batch_size_s2cnn, cuda=args.device_id), args.num_epochs_s2cnn, args.init_learning_rate_s2cnn, args.learning_rate_decay_epochs, args.device_id)
|
class S2Block(nn.Module):
' simple s2 convolution block '
def __init__(self, b_in, b_out, f_in, f_out):
' b_in/b_out: bandwidth of input/output signals\n f_in/f_out: filters in input/output signals '
super(S2Block, self).__init__()
self.grid_s2 = s2_near_identity_grid(n_alpha=(2 * b_in), n_beta=2)
self.cnn = S2Convolution(nfeature_in=f_in, nfeature_out=f_out, b_in=b_in, b_out=b_out, grid=self.grid_s2)
self.bn = nn.BatchNorm3d(f_out, affine=AFFINE)
def forward(self, x):
x = self.cnn(x)
x = self.bn(x)
x = nonlinearity(x)
return x
|
class So3Block(nn.Module):
' simple so3 convolution block '
def __init__(self, b_in, b_out, f_in, f_out):
' b_in/b_out: bandwidth of input/output signals\n f_in/f_out: filters in input/output signals '
super(So3Block, self).__init__()
self.grid_so3 = so3_near_identity_grid(n_alpha=(2 * b_in), n_beta=2, n_gamma=2)
self.cnn = SO3Convolution(nfeature_in=f_in, nfeature_out=f_out, b_in=b_in, b_out=b_out, grid=self.grid_so3)
self.bn = nn.BatchNorm3d(f_out, affine=AFFINE)
def forward(self, x):
x = self.cnn(x)
x = self.bn(x)
x = nonlinearity(x)
return x
|
class DeepSet(nn.Module):
' deep set block '
def __init__(self, f, h1, h_latent, h2, n_objs):
' f: input filters\n h1, h2: hidden units for encoder/decoder mlps\n h_latent: dimensions\n n_objs: of objects to aggregate in latent space '
super(DeepSet, self).__init__()
self.f = f
self.h1 = h1
self.h3 = h2
self.n_objs = n_objs
self.emb_h = nn.Linear(f, h1)
self.emb_rep = nn.Linear(h1, h_latent)
self.proj_h = nn.Linear(h_latent, h2)
self.proj = nn.Linear(h2, 1)
self.bn1 = nn.BatchNorm1d(h1, affine=AFFINE)
self.bn2 = nn.BatchNorm1d(h_latent, affine=AFFINE)
self.bn3 = nn.BatchNorm1d(h2, affine=AFFINE)
def forward(self, x, mask):
x = self.emb_h(x)
x = self.bn1(x)
x = nonlinearity(x)
x = self.emb_rep(x)
x = self.bn2(x)
x = nonlinearity(x)
(n, h_latent) = x.size()
x = x.view((n // self.n_objs), self.n_objs, h_latent)
x = torch.sum((x * mask), dim=1)
x = self.proj_h(x)
x = self.bn3(x)
x = nonlinearity(x)
x = self.proj(x)
return x
|
class S2CNNRegressor(nn.Module):
' approximate energy using spherical representations '
def __init__(self):
super(S2CNNRegressor, self).__init__()
n_objs = 23
self.blocks = [S2Block(b_in=10, f_in=5, b_out=8, f_out=8), So3Block(b_in=8, b_out=6, f_in=8, f_out=16), So3Block(b_in=6, b_out=4, f_in=16, f_out=32), So3Block(b_in=4, b_out=2, f_in=32, f_out=64)]
for (i, block) in enumerate(self.blocks):
setattr(self, 'block{0}'.format(i), block)
self.ds = DeepSet(64, 256, 64, 512, n_objs)
def forward(self, x, atom_types):
(n_batch, n_atoms, n_features, bandwidth, _) = x.size()
mask = (atom_types > 0).view(n_batch, n_atoms, 1).float()
x = x.view((n_batch * n_atoms), n_features, bandwidth, bandwidth)
for block in self.blocks:
x = block(x)
x = so3_integrate(x)
y = self.ds(x, mask)
return y
|
class IndexBatcher():
def __init__(self, indices, n_batch, cuda=None):
self.indices = indices.astype(np.int64)
self.n_batch = n_batch
self.pos = 0
self.cuda = cuda
self.internal_indices = np.arange(len(indices)).astype(np.int64)
np.random.shuffle(self.internal_indices)
def __iter__(self):
return self
def reset(self):
self.pos = 0
np.random.shuffle(self.internal_indices)
def __next__(self):
start = self.pos
end = np.minimum((self.pos + self.n_batch), len(self.indices))
self.pos += self.n_batch
if (self.pos >= len(self.indices)):
self.reset()
raise StopIteration
tensor = torch.LongTensor(self.indices[self.internal_indices[start:end]])
if (self.cuda is not None):
tensor.cuda(self.cuda)
return tensor
def num_iterations(self):
return (len(self.indices) // self.n_batch)
next = __next__
|
def to_one_hot(x, n):
x_ = torch.unsqueeze(x, 2)
dims = (*x.size(), n)
one_hot = torch.FloatTensor(*dims).zero_()
one_hot.scatter_(2, x_, 1)
return one_hot
|
def load_data(path, test_strat_id=None, cuda=None):
'\n Loads the data\n\n path: path to the molecule .gz\n batch_size: size of a mini batch\n test_strat_id: id of strat being used as test set\n '
data = joblib.load(path)
type_remap = (- np.ones((int(data['features']['atom_types'].max()) + 1)))
unique_types = np.unique(data['features']['atom_types']).astype(int)
type_remap[unique_types] = np.arange(len(unique_types))
data['features']['atom_types'] = type_remap[data['features']['atom_types'].astype(int)]
data['features']['geometry'] = torch.FloatTensor(data['features']['geometry'].astype(np.float32))
data['features']['atom_types'] = torch.LongTensor(data['features']['atom_types'].astype(np.int64))
data['targets'] = torch.from_numpy(data['targets'])
if (cuda is not None):
data['features']['geometry'].cuda(cuda)
data['features']['atom_types'].cuda(cuda)
data['targets'].cuda(cuda)
train = np.ndarray(0)
test = np.ndarray(0)
if (not test_strat_id):
test_strat_id = np.random.randint(len(data['strats']))
for i in range(len(data['strats'])):
if (i != test_strat_id):
train = np.concatenate((train, data['strats'][i]))
else:
test = np.concatenate((test, data['strats'][i]))
return (data, train, test)
|
def exp_lr_scheduler(optimizer, epoch, init_lr=0.005, lr_decay_epoch=40):
'Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs.'
lr = (init_lr * (0.1 ** (epoch // lr_decay_epoch)))
if ((epoch % lr_decay_epoch) == 0):
print('LR is set to {}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
|
def count_params(model):
return sum([np.prod(p.size()) for p in model.parameters() if p.requires_grad])
|
class Model(nn.Module):
def __init__(self, nclasses):
super().__init__()
self.features = [6, 100, 100, nclasses]
self.bandwidths = [64, 16, 10]
assert (len(self.bandwidths) == (len(self.features) - 1))
sequence = []
grid = s2_equatorial_grid(max_beta=0, n_alpha=(2 * self.bandwidths[0]), n_beta=1)
sequence.append(S2Convolution(self.features[0], self.features[1], self.bandwidths[0], self.bandwidths[1], grid))
for l in range(1, (len(self.features) - 2)):
nfeature_in = self.features[l]
nfeature_out = self.features[(l + 1)]
b_in = self.bandwidths[l]
b_out = self.bandwidths[(l + 1)]
sequence.append(nn.BatchNorm3d(nfeature_in, affine=True))
sequence.append(nn.ReLU())
grid = so3_equatorial_grid(max_beta=0, max_gamma=0, n_alpha=(2 * b_in), n_beta=1, n_gamma=1)
sequence.append(SO3Convolution(nfeature_in, nfeature_out, b_in, b_out, grid))
sequence.append(nn.BatchNorm3d(self.features[(- 2)], affine=True))
sequence.append(nn.ReLU())
self.sequential = nn.Sequential(*sequence)
output_features = self.features[(- 2)]
self.out_layer = nn.Linear(output_features, self.features[(- 1)])
def forward(self, x):
x = self.sequential(x)
x = so3_integrate(x)
x = self.out_layer(x)
return F.log_softmax(x, dim=1)
|
class Model(nn.Module):
def __init__(self, nclasses):
super().__init__()
self.features = [6, 50, 70, 350, nclasses]
self.bandwidths = [128, 32, 22, 7]
assert (len(self.bandwidths) == (len(self.features) - 1))
sequence = []
grid = s2_equatorial_grid(max_beta=0, n_alpha=(2 * self.bandwidths[0]), n_beta=1)
sequence.append(S2Convolution(self.features[0], self.features[1], self.bandwidths[0], self.bandwidths[1], grid))
for l in range(1, (len(self.features) - 2)):
nfeature_in = self.features[l]
nfeature_out = self.features[(l + 1)]
b_in = self.bandwidths[l]
b_out = self.bandwidths[(l + 1)]
sequence.append(nn.BatchNorm3d(nfeature_in, affine=True))
sequence.append(nn.ReLU())
grid = so3_equatorial_grid(max_beta=0, max_gamma=0, n_alpha=(2 * b_in), n_beta=1, n_gamma=1)
sequence.append(SO3Convolution(nfeature_in, nfeature_out, b_in, b_out, grid))
sequence.append(nn.BatchNorm3d(self.features[(- 2)], affine=True))
sequence.append(nn.ReLU())
self.sequential = nn.Sequential(*sequence)
self.out_layer = nn.Sequential(nn.BatchNorm1d(self.features[(- 2)], affine=False), nn.Linear(self.features[(- 2)], self.features[(- 1)]))
def forward(self, x):
x = self.sequential(x)
x = x.view(x.size(0), x.size(1), (- 1)).max((- 1))[0]
x = self.out_layer(x)
return F.log_softmax(x, dim=1)
|
class KeepName():
def __init__(self, transform):
self.transform = transform
def __call__(self, file_name):
return (file_name, self.transform(file_name))
|
def main(log_dir, augmentation, dataset, batch_size, num_workers):
print(check_output(['nodejs', '--version']).decode('utf-8'))
torch.backends.cudnn.benchmark = True
transform = torchvision.transforms.Compose([CacheNPY(prefix='b64_', repeat=augmentation, pick_randomly=False, transform=torchvision.transforms.Compose([ToMesh(random_rotations=True, random_translation=0.1), ProjectOnSphere(bandwidth=64)])), (lambda xs: torch.stack([torch.FloatTensor(x) for x in xs]))])
transform = KeepName(transform)
test_set = Shrec17('data', dataset, perturbed=True, download=True, transform=transform)
loader = importlib.machinery.SourceFileLoader('model', os.path.join(log_dir, 'model.py'))
mod = types.ModuleType(loader.name)
loader.exec_module(mod)
model = mod.Model(55)
model.cuda()
model.load_state_dict(torch.load(os.path.join(log_dir, 'state.pkl')))
resdir = os.path.join(log_dir, (dataset + '_perturbed'))
if os.path.isdir(resdir):
shutil.rmtree(resdir)
os.mkdir(resdir)
predictions = []
ids = []
loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True, drop_last=False)
for (batch_idx, data) in enumerate(loader):
model.eval()
if (dataset != 'test'):
data = data[0]
(file_names, data) = data
(batch_size, rep) = data.size()[:2]
data = data.view((- 1), *data.size()[2:])
data = data.cuda()
with torch.no_grad():
pred = model(data).data
pred = pred.view(batch_size, rep, (- 1))
pred = pred.sum(1)
predictions.append(pred.cpu().numpy())
ids.extend([x.split('/')[(- 1)].split('.')[0] for x in file_names])
print('[{}/{}] '.format(batch_idx, len(loader)))
predictions = np.concatenate(predictions)
predictions_class = np.argmax(predictions, axis=1)
for i in range(len(ids)):
if ((i % 100) == 0):
print('{}/{} '.format(i, len(ids)), end='\r')
idfile = os.path.join(resdir, ids[i])
retrieved = [(predictions[(j, predictions_class[j])], ids[j]) for j in range(len(ids)) if (predictions_class[j] == predictions_class[i])]
retrieved = sorted(retrieved, reverse=True)
retrieved = [i for (_, i) in retrieved]
with open(idfile, 'w') as f:
f.write('\n'.join(retrieved))
url = 'https://shapenet.cs.stanford.edu/shrec17/code/evaluator.zip'
file_path = 'evaluator.zip'
r = requests.get(url, stream=True)
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=(16 * (1024 ** 2))):
if chunk:
f.write(chunk)
f.flush()
zip_ref = zipfile.ZipFile(file_path, 'r')
zip_ref.extractall('.')
zip_ref.close()
print(check_output(['nodejs', 'evaluate.js', (os.path.join('..', log_dir) + '/')], cwd='evaluator').decode('utf-8'))
shutil.copy2(os.path.join('evaluator', (log_dir + '.summary.csv')), os.path.join(log_dir, 'summary.csv'))
|
def main(log_dir, model_path, augmentation, dataset, batch_size, learning_rate, num_workers):
arguments = copy.deepcopy(locals())
os.mkdir(log_dir)
shutil.copy2(__file__, os.path.join(log_dir, 'script.py'))
shutil.copy2(model_path, os.path.join(log_dir, 'model.py'))
logger = logging.getLogger('train')
logger.setLevel(logging.DEBUG)
logger.handlers = []
ch = logging.StreamHandler()
logger.addHandler(ch)
fh = logging.FileHandler(os.path.join(log_dir, 'log.txt'))
logger.addHandler(fh)
logger.info('%s', repr(arguments))
torch.backends.cudnn.benchmark = True
loader = importlib.machinery.SourceFileLoader('model', os.path.join(log_dir, 'model.py'))
mod = types.ModuleType(loader.name)
loader.exec_module(mod)
model = mod.Model(55)
model.cuda()
logger.info('{} paramerters in total'.format(sum((x.numel() for x in model.parameters()))))
logger.info('{} paramerters in the last layer'.format(sum((x.numel() for x in model.out_layer.parameters()))))
bw = model.bandwidths[0]
transform = CacheNPY(prefix='b{}_'.format(bw), repeat=augmentation, transform=torchvision.transforms.Compose([ToMesh(random_rotations=True, random_translation=0.1), ProjectOnSphere(bandwidth=bw)]))
def target_transform(x):
classes = ['02691156', '02747177', '02773838', '02801938', '02808440', '02818832', '02828884', '02843684', '02871439', '02876657', '02880940', '02924116', '02933112', '02942699', '02946921', '02954340', '02958343', '02992529', '03001627', '03046257', '03085013', '03207941', '03211117', '03261776', '03325088', '03337140', '03467517', '03513137', '03593526', '03624134', '03636649', '03642806', '03691459', '03710193', '03759954', '03761084', '03790512', '03797390', '03928116', '03938244', '03948459', '03991062', '04004475', '04074963', '04090263', '04099429', '04225987', '04256520', '04330267', '04379243', '04401088', '04460130', '04468005', '04530566', '04554684']
return classes.index(x[0])
train_set = Shrec17('data', dataset, perturbed=True, download=True, transform=transform, target_transform=target_transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True, drop_last=True)
optimizer = torch.optim.SGD(model.parameters(), lr=0, momentum=0.9)
def train_step(data, target):
model.train()
(data, target) = (data.cuda(), target.cuda())
prediction = model(data)
loss = F.nll_loss(prediction, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
correct = prediction.data.max(1)[1].eq(target.data).long().cpu().sum()
return (loss.item(), correct.item())
def get_learning_rate(epoch):
limits = [100, 200]
lrs = [1, 0.1, 0.01]
assert (len(lrs) == (len(limits) + 1))
for (lim, lr) in zip(limits, lrs):
if (epoch < lim):
return (lr * learning_rate)
return (lrs[(- 1)] * learning_rate)
for epoch in range(300):
lr = get_learning_rate(epoch)
logger.info('learning rate = {} and batch size = {}'.format(lr, train_loader.batch_size))
for p in optimizer.param_groups:
p['lr'] = lr
total_loss = 0
total_correct = 0
time_before_load = time.perf_counter()
for (batch_idx, (data, target)) in enumerate(train_loader):
time_after_load = time.perf_counter()
time_before_step = time.perf_counter()
(loss, correct) = train_step(data, target)
total_loss += loss
total_correct += correct
logger.info('[{}:{}/{}] LOSS={:.2} <LOSS>={:.2} ACC={:.2} <ACC>={:.2} time={:.2}+{:.2}'.format(epoch, batch_idx, len(train_loader), loss, (total_loss / (batch_idx + 1)), (correct / len(data)), ((total_correct / len(data)) / (batch_idx + 1)), (time_after_load - time_before_load), (time.perf_counter() - time_before_step)))
time_before_load = time.perf_counter()
torch.save(model.state_dict(), os.path.join(log_dir, 'state.pkl'))
|
def s2_near_identity_grid(max_beta=(np.pi / 8), n_alpha=8, n_beta=3):
'\n :return: rings around the north pole\n size of the kernel = n_alpha * n_beta\n '
beta = ((np.arange(start=1, stop=(n_beta + 1), dtype=np.float) * max_beta) / n_beta)
alpha = np.linspace(start=0, stop=(2 * np.pi), num=n_alpha, endpoint=False)
(B, A) = np.meshgrid(beta, alpha, indexing='ij')
B = B.flatten()
A = A.flatten()
grid = np.stack((B, A), axis=1)
return tuple((tuple(ba) for ba in grid))
|
def s2_equatorial_grid(max_beta=0, n_alpha=32, n_beta=1):
'\n :return: rings around the equator\n size of the kernel = n_alpha * n_beta\n '
beta = np.linspace(start=((np.pi / 2) - max_beta), stop=((np.pi / 2) + max_beta), num=n_beta, endpoint=True)
alpha = np.linspace(start=0, stop=(2 * np.pi), num=n_alpha, endpoint=False)
(B, A) = np.meshgrid(beta, alpha, indexing='ij')
B = B.flatten()
A = A.flatten()
grid = np.stack((B, A), axis=1)
return tuple((tuple(ba) for ba in grid))
|
def s2_soft_grid(b):
beta = (((np.arange((2 * b)) + 0.5) / (2 * b)) * np.pi)
alpha = np.linspace(start=0, stop=(2 * np.pi), num=(2 * b), endpoint=False)
(B, A) = np.meshgrid(beta, alpha, indexing='ij')
B = B.flatten()
A = A.flatten()
grid = np.stack((B, A), axis=1)
return tuple((tuple(ba) for ba in grid))
|
def s2_mm(x, y):
'\n :param x: [l * m, batch, feature_in, complex]\n :param y: [l * m, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n '
from s2cnn.utils.complex import complex_mm
assert (y.size(3) == 2)
assert (x.size(3) == 2)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert (y.size(1) == nfeature_in)
nspec = x.size(0)
assert (y.size(0) == nspec)
if x.is_cuda:
return _cuda_S2_mm.apply(x, y)
nl = round((nspec ** 0.5))
Fz_list = []
begin = 0
for l in range(nl):
L = ((2 * l) + 1)
size = L
Fx = x[begin:(begin + size)]
Fy = y[begin:(begin + size)]
Fx = Fx.view((L * nbatch), nfeature_in, 2)
Fy = Fy.transpose(0, 1)
Fy = Fy.contiguous()
Fy = Fy.view(nfeature_in, (L * nfeature_out), 2)
Fz = complex_mm(Fx, Fy, conj_y=True)
Fz = Fz.view(L, nbatch, L, nfeature_out, 2)
Fz = Fz.transpose(1, 2)
Fz = Fz.contiguous()
Fz = Fz.view((L * L), nbatch, nfeature_out, 2)
Fz_list.append(Fz)
begin += size
z = torch.cat(Fz_list, 0)
return z
|
class _cuda_S2_mm(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return _cuda_s2_mm(x, y)
@staticmethod
def backward(ctx, gradz):
import s2cnn.utils.cuda as cuda_utils
(x, y) = ctx.saved_tensors
nl = round((x.size(0) ** 0.5))
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
nspec = ((((4 * (nl ** 2)) - 1) * nl) // 3)
device = torch.cuda.current_device()
gradx_cuda_kernel = _setup_s2mm_gradx_cuda_kernel(nbatch=nbatch, nspec=nspec, nl=nl, nfeature_in=nfeature_in, nfeature_out=nfeature_out, device=device)
grady_cuda_kernel = _setup_s2mm_grady_cuda_kernel(nbatch=nbatch, nspec=nspec, nl=nl, nfeature_in=nfeature_in, nfeature_out=nfeature_out, device=device)
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
gradx = grady = None
if ctx.needs_input_grad[0]:
gradx = gradz.new_empty(((nl ** 2), nbatch, nfeature_in, 2))
gradx_cuda_kernel(block=(cuda_utils.CUDA_NUM_THREADS, 1, 1), grid=(cuda_utils.get_blocks((((nl ** 2) * nbatch) * nfeature_in), 1024), 1, 1), args=[gradz.contiguous().data_ptr(), y.contiguous().data_ptr(), gradx.data_ptr()], stream=stream)
if ctx.needs_input_grad[1]:
grady = gradz.new_empty(((nl ** 2), nfeature_in, nfeature_out, 2))
grady_cuda_kernel(block=(cuda_utils.CUDA_NUM_THREADS, 1, 1), grid=(cuda_utils.get_blocks((((nl ** 2) * nfeature_in) * nfeature_out), 1024), 1, 1), args=[gradz.contiguous().data_ptr(), x.contiguous().data_ptr(), grady.data_ptr()], stream=stream)
return (gradx, grady)
|
def _cuda_s2_mm(x, y):
'\n :param x: [l * m, batch, feature_in, complex]\n :param y: [l * m, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n '
import s2cnn.utils.cuda as cuda_utils
assert (x.is_cuda and (x.dtype == torch.float32))
assert (y.is_cuda and (y.dtype == torch.float32))
assert (y.size(3) == 2)
assert (x.size(3) == 2)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert (y.size(1) == nfeature_in)
assert (y.size(0) == x.size(0))
nl = round((x.size(0) ** 0.5))
nspec = ((((4 * (nl ** 2)) - 1) * nl) // 3)
assert (x.size(0) == (nl ** 2))
assert (y.size(0) == (nl ** 2))
device = torch.cuda.current_device()
cuda_kernel = _setup_s2mm_cuda_kernel(nbatch=nbatch, nspec=nspec, nfeature_in=nfeature_in, nfeature_out=nfeature_out, device=device)
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
output = x.new_empty((nspec, nbatch, nfeature_out, 2))
cuda_kernel(block=(cuda_utils.CUDA_NUM_THREADS, 1, 1), grid=(cuda_utils.get_blocks(((nspec * nbatch) * nfeature_out), 1024), 1, 1), args=[x.contiguous().data_ptr(), y.contiguous().data_ptr(), output.data_ptr()], stream=stream)
return output
|
@lru_cache(maxsize=32)
def _setup_s2mm_cuda_kernel(nbatch, nspec, nfeature_in, nfeature_out, device=0):
kernel = Template('\n#define COMPUTE_LMN(s) int l = powf(3.0/4.0 * s, 1.0/3.0) - 0.5; int L = l * (4 * l * l - 1) / 3; int rest = s - L; if (rest >= (2 * l + 1) * (2 * l + 1)) { ++l; L = l * (4 * l * l - 1) / 3; rest = s - L; } int m = rest / (2 * l + 1) - l; int n = rest % (2 * l + 1) - l;\n\n#define EXTRACT(i1, i2, n2, i3, n3) int i1 = index; int i3 = i1 % (n3); i1 /= n3; int i2 = i1 % (n2); i1 /= n2;\n\n#define CONTRACT1(s1, i2, n2, i3, n3) ( ( (l * l + (l + (s1))) * (n2) + (i2) ) * (n3) + (i3) )\n\n#define CONTRACT2(s1, s2, i2, n2, i3, n3) ( ( (L + (l + (s1)) * (2 * l + 1) + (l + (s2))) * (n2) + (i2) ) * (n3) + (i3) )\n\nextern "C"\n__global__ void main_(const float* in_x, const float* in_y, float* out) {\n for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < ${nspec} * ${nbatch} * ${nfeature_out}; index += blockDim.x * gridDim.x) {\n EXTRACT(s, i, ${nbatch}, f_out, ${nfeature_out})\n\n // compute s -> (l,m,n)\n COMPUTE_LMN(s)\n\n float out_re = 0.0;\n float out_im = 0.0;\n\n for (int f_in = 0; f_in < ${nfeature_in}; ++f_in) {\n float x_re = in_x[CONTRACT1(m, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 0];\n float x_im = in_x[CONTRACT1(m, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 1];\n float y_re = in_y[CONTRACT1(n, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 0];\n float y_im = in_y[CONTRACT1(n, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 1];\n\n // x times y conjugate\n out_re += x_re * y_re + x_im * y_im;\n out_im += x_im * y_re - x_re * y_im;\n }\n\n out[index * 2 + 0] = out_re;\n out[index * 2 + 1] = out_im;\n }\n}\n').substitute({'nbatch': nbatch, 'nspec': nspec, 'nfeature_in': nfeature_in, 'nfeature_out': nfeature_out})
import s2cnn.utils.cuda as cuda_utils
return cuda_utils.compile_kernel(kernel, 's2mm.cu', 'main_')
|
@lru_cache(maxsize=32)
def _setup_s2mm_gradx_cuda_kernel(nbatch, nspec, nl, nfeature_in, nfeature_out, device=0):
kernel = Template('\n#define COMPUTE_LM(s) int l = sqrtf(s); int L = (4 * l * l - 1) * l / 3; int m = s - l * l - l;\n\n#define EXTRACT(i1, i2, n2, i3, n3) int i1 = index; int i3 = i1 % (n3); i1 /= n3; int i2 = i1 % (n2); i1 /= n2;\n\n#define CONTRACT1(s1, i2, n2, i3, n3) ( ( (l * l + (l + (s1))) * (n2) + (i2) ) * (n3) + (i3) )\n\n#define CONTRACT2(s1, s2, i2, n2, i3, n3) ( ( (L + (l + (s1)) * (2 * l + 1) + (l + (s2))) * (n2) + (i2) ) * (n3) + (i3) )\n\nextern "C"\n__global__ void main_(const float* grad_z, const float* y, float* grad_x) {\n for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (${nl} * ${nl}) * ${nbatch} * ${nfeature_in}; index += blockDim.x * gridDim.x) {\n EXTRACT(s, i, ${nbatch}, f_in, ${nfeature_in})\n\n // compute s -> (l,m)\n COMPUTE_LM(s)\n\n float out_re = 0.0;\n float out_im = 0.0;\n\n for (int f_out = 0; f_out < ${nfeature_out}; ++f_out) {\n for (int k = -l; k <= l; ++k) {\n float grad_z_re = grad_z[CONTRACT2(m, k, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 0];\n float grad_z_im = grad_z[CONTRACT2(m, k, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 1];\n float y_re = y[CONTRACT1(k, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 0];\n float y_im = y[CONTRACT1(k, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 1];\n\n // grad_z times y\n out_re += grad_z_re * y_re - grad_z_im * y_im;\n out_im += grad_z_re * y_im + grad_z_im * y_re;\n }\n }\n\n grad_x[index * 2 + 0] = out_re;\n grad_x[index * 2 + 1] = out_im;\n }\n}\n').substitute({'nbatch': nbatch, 'nspec': nspec, 'nl': nl, 'nfeature_in': nfeature_in, 'nfeature_out': nfeature_out})
import s2cnn.utils.cuda as cuda_utils
return cuda_utils.compile_kernel(kernel, 's2mm_gradx.cu', 'main_')
|
@lru_cache(maxsize=32)
def _setup_s2mm_grady_cuda_kernel(nbatch, nspec, nl, nfeature_in, nfeature_out, device=0):
kernel = Template('\n#define COMPUTE_LM(s) int l = powf(s, 0.5); int L = (4 * l * l - 1) * l / 3; int m = s - l * l - l;\n\n#define EXTRACT(i1, i2, n2, i3, n3) int i1 = index; int i3 = i1 % (n3); i1 /= n3; int i2 = i1 % (n2); i1 /= n2;\n\n#define CONTRACT1(s1, i2, n2, i3, n3) ( ( (l * l + (l + (s1))) * (n2) + (i2) ) * (n3) + (i3) )\n\n#define CONTRACT2(s1, s2, i2, n2, i3, n3) ( ( (L + (l + (s1)) * (2 * l + 1) + (l + (s2))) * (n2) + (i2) ) * (n3) + (i3) )\n\nextern "C"\n__global__ void main_(const float* grad_z, const float* x, float* grad_y) {\n for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (${nl} * ${nl}) * ${nfeature_in} * ${nfeature_out}; index += blockDim.x * gridDim.x) {\n EXTRACT(s, f_in, ${nfeature_in}, f_out, ${nfeature_out})\n\n // compute s -> (l,m)\n COMPUTE_LM(s)\n\n float out_re = 0.0;\n float out_im = 0.0;\n\n for (int i = 0; i < ${nbatch}; ++i) {\n for (int k = -l; k <= l; ++k) {\n float grad_z_re = grad_z[CONTRACT2(k, m, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 0];\n float grad_z_im = grad_z[CONTRACT2(k, m, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 1];\n float x_re = x[CONTRACT1(k, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 0];\n float x_im = x[CONTRACT1(k, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 1];\n\n // conjugate grad_z times x\n out_re += grad_z_re * x_re + grad_z_im * x_im;\n out_im += grad_z_re * x_im - grad_z_im * x_re;\n }\n }\n\n grad_y[index * 2 + 0] = out_re;\n grad_y[index * 2 + 1] = out_im;\n }\n}\n').substitute({'nbatch': nbatch, 'nspec': nspec, 'nl': nl, 'nfeature_in': nfeature_in, 'nfeature_out': nfeature_out})
import s2cnn.utils.cuda as cuda_utils
return cuda_utils.compile_kernel(kernel, 's2mm_grady.cu', 'main_')
|
def test_compare_cuda_cpu():
x = torch.rand((((1 + 3) + 5) + 7), 2, 3, 2)
y = torch.rand((((1 + 3) + 5) + 7), 3, 5, 2)
z1 = s2_mm(x, y)
z2 = s2_mm(x.cuda(), y.cuda()).cpu()
q = ((z1 - z2).abs().max().item() / z1.std().item())
print(q)
assert (q < 0.0001)
|
def so3_rft(x, b, grid):
'\n Real Fourier Transform\n :param x: [..., beta_alpha_gamma]\n :param b: output bandwidth signal\n :param grid: tuple of (beta, alpha, gamma) tuples\n :return: [l * m * n, ..., complex]\n '
F = _setup_so3_ft(b, grid, device_type=x.device.type, device_index=x.device.index)
assert (x.size((- 1)) == F.size(0))
sz = x.size()
x = torch.einsum('ia,afc->fic', (x.view((- 1), x.size((- 1))), F.clone()))
x = x.view((- 1), *sz[:(- 1)], 2)
return x
|
@cached_dirpklgz('cache/setup_so3_ft')
def __setup_so3_ft(b, grid):
from lie_learn.representations.SO3.wigner_d import wigner_D_matrix
n_spatial = len(grid)
n_spectral = np.sum([(((2 * l) + 1) ** 2) for l in range(b)])
F = np.zeros((n_spatial, n_spectral), dtype=complex)
for (i, (beta, alpha, gamma)) in enumerate(grid):
Dmats = [wigner_D_matrix(l, alpha, beta, gamma, field='complex', normalization='quantum', order='centered', condon_shortley='cs').conj() for l in range(b)]
F[i] = np.hstack([Dl.flatten() for Dl in Dmats])
F = F.view('float').reshape(((- 1), n_spectral, 2))
return F
|
@lru_cache(maxsize=32)
def _setup_so3_ft(b, grid, device_type, device_index):
F = __setup_so3_ft(b, grid)
F = torch.tensor(F.astype(np.float32), dtype=torch.float32, device=torch.device(device_type, device_index))
return F
|
def so3_mm(x, y):
'\n :param x: [l * m * n, batch, feature_in, complex]\n :param y: [l * m * n, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n '
from s2cnn.utils.complex import complex_mm
import math
assert (y.size(3) == 2)
assert (x.size(3) == 2)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert (y.size(1) == nfeature_in)
nspec = x.size(0)
assert (y.size(0) == nspec)
nl = math.ceil((((3 / 4) * nspec) ** (1 / 3)))
assert (nspec == ((nl * ((4 * (nl ** 2)) - 1)) // 3))
if x.is_cuda:
return _cuda_SO3_mm.apply(x, y)
Fz_list = []
begin = 0
for l in range(nl):
L = ((2 * l) + 1)
size = (L ** 2)
Fx = x[begin:(begin + size)]
Fy = y[begin:(begin + size)]
Fx = Fx.view(L, L, nbatch, nfeature_in, 2)
Fx = Fx.transpose(0, 1)
Fx = Fx.transpose(0, 2)
Fx = Fx.transpose(2, 3)
Fx = Fx.contiguous()
Fx = Fx.view((nbatch * L), (nfeature_in * L), 2)
Fy = Fy.view(L, L, nfeature_in, nfeature_out, 2)
Fy = Fy.transpose(0, 2)
Fy = Fy.contiguous()
Fy = Fy.view((nfeature_in * L), (L * nfeature_out), 2)
Fz = complex_mm(Fx, Fy, conj_y=True)
Fz = Fz.view(nbatch, (L * L), nfeature_out, 2)
Fz = Fz.transpose(0, 1)
Fz_list.append(Fz)
begin += size
z = torch.cat(Fz_list, 0)
return z
|
class _cuda_SO3_mm(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
'\n :param x: [l * m * n, batch, feature_in, complex]\n :param y: [l * m * n, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n '
assert (x.is_cuda and (x.dtype == torch.float32))
assert (y.is_cuda and (y.dtype == torch.float32))
assert (y.size(3) == 2)
assert (x.size(3) == 2)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert (y.size(1) == nfeature_in)
nspec = x.size(0)
assert (y.size(0) == nspec)
nl = round((((3 / 4) * nspec) ** (1 / 3)))
assert (nspec == ((nl * ((4 * (nl ** 2)) - 1)) // 3))
ctx.save_for_backward(x, y)
device = torch.cuda.current_device()
cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nbatch, nj=nfeature_out, nk=nfeature_in, conj_y=True, trans_y_spec=True, device=device)
output = x.new_empty((nspec, nbatch, nfeature_out, 2))
cuda_kernel(x, y, output)
return output
@staticmethod
def backward(ctx, gradz):
(x, y) = ctx.saved_tensors
nspec = x.size(0)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
nl = round((((3 / 4) * nspec) ** (1 / 3)))
assert (nspec == ((nl * ((4 * (nl ** 2)) - 1)) // 3))
gradx = grady = None
device = torch.cuda.current_device()
if ctx.needs_input_grad[0]:
gradx_cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nbatch, nj=nfeature_in, nk=nfeature_out, trans_y_feature=True, device=device)
gradx = gradz.new_empty((nspec, nbatch, nfeature_in, 2))
gradx_cuda_kernel(gradz, y, gradx)
if ctx.needs_input_grad[1]:
grady_cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nfeature_out, nj=nfeature_in, nk=nbatch, trans_out_feature=True, conj_x=True, trans_x_spec=True, trans_x_feature=True, device=device)
grady = gradz.new_empty((nspec, nfeature_in, nfeature_out, 2))
grady_cuda_kernel(gradz, x, grady)
return (gradx, grady)
|
@lru_cache(maxsize=32)
def _setup_so3mm_cuda_kernel(nl, ni, nj, nk, conj_x=False, conj_y=False, trans_x_spec=False, trans_x_feature=False, trans_y_spec=False, trans_y_feature=False, trans_out_feature=False, device=0):
'\n return a function that computes\n out[l*m*n, i, j] = sum_k sum_p x[l*m*p, i, k] y[l*p*n, k, j]\n where out, x, y are complex valued\n\n if conj_x is set to True, x is conjugated\n if conj_y is set to True, y is conjugated\n if trans_x_spec is set to True m and p are permuted in x[...]\n if trans_y_spec is set to True p and n are permuted in y[...]\n if trans_x_feature is set to True i and k are permuted in x[...]\n if trans_y_feature is set to True k and j are permuted in y[...]\n if trans_out_feature is set to True i and j are permuted in out[...]\n '
kernel = '\n#define NI {}\n#define NJ {}\n#define NK {}\n'.format(ni, nj, nk)
if ((not trans_x_spec) and (not trans_x_feature)):
kernel += '#define INDEX_X (((L0 + m * L + p) * NI + i) * NK + k)\n'
if ((not trans_x_spec) and trans_x_feature):
kernel += '#define INDEX_X (((L0 + m * L + p) * NK + k) * NI + i)\n'
if (trans_x_spec and (not trans_x_feature)):
kernel += '#define INDEX_X (((L0 + p * L + m) * NI + i) * NK + k)\n'
if (trans_x_spec and trans_x_feature):
kernel += '#define INDEX_X (((L0 + p * L + m) * NK + k) * NI + i)\n'
if ((not trans_y_spec) and (not trans_y_feature)):
kernel += '#define INDEX_Y (((L0 + p * L + n) * NK + k) * NJ + j)\n'
if ((not trans_y_spec) and trans_y_feature):
kernel += '#define INDEX_Y (((L0 + p * L + n) * NJ + j) * NK + k)\n'
if (trans_y_spec and (not trans_y_feature)):
kernel += '#define INDEX_Y (((L0 + n * L + p) * NK + k) * NJ + j)\n'
if (trans_y_spec and trans_y_feature):
kernel += '#define INDEX_Y (((L0 + n * L + p) * NJ + j) * NK + k)\n'
if (not trans_out_feature):
kernel += '#define INDEX_OUT (((L0 + m * L + n) * NI + i) * NJ + j)\n'
if trans_out_feature:
kernel += '#define INDEX_OUT (((L0 + m * L + n) * NJ + j) * NI + i)\n'
kernel += '\n#define CONJ_X {}\n#define CONJ_Y {}\n'.format(('x_im = -x_im;' if conj_x else ';'), ('y_im = -y_im;' if conj_y else ';'))
kernel += '\n#define CEIL_DIV(x, y) (((x) + (y) - 1) / (y))\n\nextern "C"\n__global__ void main_(const float* in_x, const float* in_y, float* out)\n{\n // start of thread independant code\n int l = blockIdx.z;\n int L = 2 * l + 1;\n int L0 = (4 * l*l - 1) * l / 3;\n\n if (blockIdx.y * 32 >= L * NI || blockIdx.x * 32 >= L * NJ) {\n return;\n }\n\n int ntile = CEIL_DIV(L * NK, 32);\n // end of thread independant code\n\n int mi = blockIdx.y * 32 + threadIdx.y;\n int m = mi / NI;\n int i = mi % NI;\n int nj = blockIdx.x * 32 + threadIdx.x;\n int n = nj / NJ;\n int j = nj % NJ;\n\n float sum_re = 0.0;\n float sum_im = 0.0;\n\n for (int tile = 0; tile < ntile; ++tile) {\n __shared__ float tileX[2][32][32];\n __shared__ float tileY[2][32][32];\n\n int pk = tile * 32 + threadIdx.x;\n int p = pk / NK;\n int k = pk % NK;\n int index = INDEX_X * 2;\n tileX[0][threadIdx.y][threadIdx.x] = m < L && p < L ? in_x[index + 0] : 0.0;\n tileX[1][threadIdx.y][threadIdx.x] = m < L && p < L ? in_x[index + 1] : 0.0;\n\n pk = tile * 32 + threadIdx.y;\n p = pk / NK;\n k = pk % NK;\n index = INDEX_Y * 2;\n tileY[0][threadIdx.y][threadIdx.x] = p < L && n < L ? in_y[index + 0] : 0.0;\n tileY[1][threadIdx.y][threadIdx.x] = p < L && n < L ? in_y[index + 1] : 0.0;\n\n __syncthreads();\n\n for (int any = 0; any < 32; ++any) {\n float x_re = tileX[0][threadIdx.y][any];\n float x_im = tileX[1][threadIdx.y][any];\n float y_re = tileY[0][any][threadIdx.x];\n float y_im = tileY[1][any][threadIdx.x];\n\n CONJ_X\n CONJ_Y\n\n sum_re += x_re * y_re - x_im * y_im;\n sum_im += x_re * y_im + x_im * y_re;\n }\n\n __syncthreads();\n }\n\n if (m < L && n < L) {\n int index = INDEX_OUT * 2;\n out[index + 0] = sum_re;\n out[index + 1] = sum_im;\n }\n}\n'
import s2cnn.utils.cuda as cuda_utils
kernel = cuda_utils.compile_kernel(kernel, 'so3_mm.cu', 'main_')
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
def fun(x, y, output):
assert output.is_contiguous()
kernel(block=(32, 32, 1), grid=(math.ceil(((((2 * nl) - 1) * nj) / 32)), math.ceil(((((2 * nl) - 1) * ni) / 32)), nl), args=[x.contiguous().data_ptr(), y.contiguous().data_ptr(), output.data_ptr()], stream=stream)
return fun
|
def test_compare_cuda_cpu():
x = torch.rand((((1 + 9) + 25) + 49), 2, 3, 2)
y = torch.rand((((1 + 9) + 25) + 49), 3, 5, 2)
z1 = so3_mm(x, y)
z2 = so3_mm(x.cuda(), y.cuda()).cpu()
q = ((z1 - z2).abs().max().item() / z1.std().item())
print(q)
assert (q < 0.0001)
|
class S2Convolution(Module):
def __init__(self, nfeature_in, nfeature_out, b_in, b_out, grid):
"\n :param nfeature_in: number of input fearures\n :param nfeature_out: number of output features\n :param b_in: input bandwidth (precision of the input SOFT grid)\n :param b_out: output bandwidth\n :param grid: points of the sphere defining the kernel, tuple of (alpha, beta)'s\n "
super(S2Convolution, self).__init__()
self.nfeature_in = nfeature_in
self.nfeature_out = nfeature_out
self.b_in = b_in
self.b_out = b_out
self.grid = grid
self.kernel = Parameter(torch.empty(nfeature_in, nfeature_out, len(grid)).uniform_((- 1), 1))
self.scaling = (1.0 / math.sqrt((((len(self.grid) * self.nfeature_in) * (self.b_out ** 4.0)) / (self.b_in ** 2.0))))
self.bias = Parameter(torch.zeros(1, nfeature_out, 1, 1, 1))
def forward(self, x):
'\n :x: [batch, feature_in, beta, alpha]\n :return: [batch, feature_out, beta, alpha, gamma]\n '
assert (x.size(1) == self.nfeature_in)
assert (x.size(2) == (2 * self.b_in))
assert (x.size(3) == (2 * self.b_in))
x = S2_fft_real.apply(x, self.b_out)
y = s2_rft((self.kernel * self.scaling), self.b_out, self.grid)
z = s2_mm(x, y)
z = SO3_ifft_real.apply(z)
z = (z + self.bias)
return z
|
class SO3Convolution(Module):
def __init__(self, nfeature_in, nfeature_out, b_in, b_out, grid):
"\n :param nfeature_in: number of input fearures\n :param nfeature_out: number of output features\n :param b_in: input bandwidth (precision of the input SOFT grid)\n :param b_out: output bandwidth\n :param grid: points of the SO(3) group defining the kernel, tuple of (alpha, beta, gamma)'s\n "
super(SO3Convolution, self).__init__()
self.nfeature_in = nfeature_in
self.nfeature_out = nfeature_out
self.b_in = b_in
self.b_out = b_out
self.grid = grid
self.kernel = Parameter(torch.empty(nfeature_in, nfeature_out, len(grid)).uniform_((- 1), 1))
self.bias = Parameter(torch.zeros(1, nfeature_out, 1, 1, 1))
self.scaling = (1.0 / math.sqrt((((len(self.grid) * self.nfeature_in) * (self.b_out ** 3.0)) / (self.b_in ** 3.0))))
def forward(self, x):
'\n :x: [batch, feature_in, beta, alpha, gamma]\n :return: [batch, feature_out, beta, alpha, gamma]\n '
assert (x.size(1) == self.nfeature_in)
assert (x.size(2) == (2 * self.b_in))
assert (x.size(3) == (2 * self.b_in))
assert (x.size(4) == (2 * self.b_in))
x = SO3_fft_real.apply(x, self.b_out)
y = so3_rft((self.kernel * self.scaling), self.b_out, self.grid)
assert (x.size(0) == y.size(0))
assert (x.size(2) == y.size(1))
z = so3_mm(x, y)
assert (z.size(0) == x.size(0))
assert (z.size(1) == x.size(1))
assert (z.size(2) == y.size(2))
z = SO3_ifft_real.apply(z)
z = (z + self.bias)
return z
|
class SO3Shortcut(Module):
'\n Useful for ResNet\n '
def __init__(self, nfeature_in, nfeature_out, b_in, b_out):
super(SO3Shortcut, self).__init__()
assert (b_out <= b_in)
if ((nfeature_in != nfeature_out) or (b_in != b_out)):
self.conv = SO3Convolution(nfeature_in=nfeature_in, nfeature_out=nfeature_out, b_in=b_in, b_out=b_out, grid=((0, 0, 0),))
else:
self.conv = None
def forward(self, x):
'\n :x: [batch, feature_in, beta, alpha, gamma]\n :return: [batch, feature_out, beta, alpha, gamma]\n '
if (self.conv is not None):
return self.conv(x)
else:
return x
|
def so3_integrate(x):
'\n Integrate a signal on SO(3) using the Haar measure\n \n :param x: [..., beta, alpha, gamma] (..., 2b, 2b, 2b)\n :return y: [...] (...)\n '
assert (x.size((- 1)) == x.size((- 2)))
assert (x.size((- 2)) == x.size((- 3)))
b = (x.size((- 1)) // 2)
w = _setup_so3_integrate(b, device_type=x.device.type, device_index=x.device.index)
x = torch.sum(x, dim=(- 1)).squeeze((- 1))
x = torch.sum(x, dim=(- 1)).squeeze((- 1))
sz = x.size()
x = x.view((- 1), (2 * b))
w = w.view((2 * b), 1)
x = torch.mm(x, w).squeeze((- 1))
x = x.view(*sz[:(- 1)])
return x
|
@lru_cache(maxsize=32)
@show_running
def _setup_so3_integrate(b, device_type, device_index):
import lie_learn.spaces.S3 as S3
return torch.tensor(S3.quadrature_weights(b), dtype=torch.float32, device=torch.device(device_type, device_index))
|
def so3_rotation(x, alpha, beta, gamma):
'\n :param x: [..., beta, alpha, gamma] (..., 2b, 2b, 2b)\n '
b = (x.size()[(- 1)] // 2)
x_size = x.size()
Us = _setup_so3_rotation(b, alpha, beta, gamma, device_type=x.device.type, device_index=x.device.index)
x = SO3_fft_real.apply(x)
Fz_list = []
begin = 0
for l in range(b):
L = ((2 * l) + 1)
size = (L ** 2)
Fx = x[begin:(begin + size)]
Fx = Fx.view(L, (- 1), 2)
U = Us[l].view(L, L, 2)
Fz = complex_mm(U, Fx, conj_x=True)
Fz = Fz.view(size, (- 1), 2)
Fz_list.append(Fz)
begin += size
Fz = torch.cat(Fz_list, 0)
z = SO3_ifft_real.apply(Fz)
z = z.contiguous()
z = z.view(*x_size)
return z
|
@cached_dirpklgz('cache/setup_so3_rotation')
def __setup_so3_rotation(b, alpha, beta, gamma):
from lie_learn.representations.SO3.wigner_d import wigner_D_matrix
Us = [wigner_D_matrix(l, alpha, beta, gamma, field='complex', normalization='quantum', order='centered', condon_shortley='cs') for l in range(b)]
Us = [Us[l].astype(np.complex64).view(np.float32).reshape((((2 * l) + 1), ((2 * l) + 1), 2)) for l in range(b)]
return Us
|
@lru_cache(maxsize=32)
def _setup_so3_rotation(b, alpha, beta, gamma, device_type, device_index):
Us = __setup_so3_rotation(b, alpha, beta, gamma)
Us = [torch.tensor(U, dtype=torch.float32, device=torch.device(device_type, device_index)) for U in Us]
return Us
|
def get_blocks(n, num_threads):
n_per_instance = (((n + (num_threads * CUDA_MAX_GRID_DIM)) - 1) // (num_threads * CUDA_MAX_GRID_DIM))
return (((n + (num_threads * n_per_instance)) - 1) // (num_threads * n_per_instance))
|
def compile_kernel(kernel, filename, functioname):
program = Program(kernel, filename)
ptx = program.compile()
m = function.Module()
m.load(bytes(ptx.encode()))
f = m.get_function(functioname)
return f
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.