code stringlengths 17 6.64M |
|---|
class MuSigmaEncoder(nn.Module):
'\n Maps a representation r to mu and sigma which will define the normal\n distribution from which we sample the latent variable z.\n\n Parameters\n ----------\n r_dim : int\n Dimension of output representation r.\n\n z_dim : int\n Dimension of latent variable z.\n '
def __init__(self, r_dim, z_dim):
super(MuSigmaEncoder, self).__init__()
self.r_dim = r_dim
self.z_dim = z_dim
self.r_to_hidden = nn.Linear(r_dim, r_dim)
self.hidden_to_mu = nn.Linear(r_dim, z_dim)
self.hidden_to_sigma = nn.Linear(r_dim, z_dim)
def forward(self, r):
'\n r : torch.Tensor\n Shape (batch_size, r_dim)\n '
hidden = torch.relu(self.r_to_hidden(r))
mu = self.hidden_to_mu(hidden)
sigma = (0.1 + (0.9 * torch.sigmoid(self.hidden_to_sigma(hidden))))
return (mu, sigma)
|
class Decoder(nn.Module):
'\n Maps target input x_target and samples z (encoding information about the\n context points) to predictions y_target.\n\n Parameters\n ----------\n x_dim : int\n Dimension of x values.\n\n z_dim : int\n Dimension of latent variable z.\n\n h_dim : int\n Dimension of hidden layer.\n\n y_dim : int\n Dimension of y values.\n '
def __init__(self, x_dim, z_dim, h_dim, y_dim):
super(Decoder, self).__init__()
self.x_dim = x_dim
self.z_dim = z_dim
self.h_dim = h_dim
self.y_dim = y_dim
layers = [nn.Linear((x_dim + z_dim), h_dim), nn.ReLU(inplace=True), nn.Linear(h_dim, h_dim), nn.ReLU(inplace=True), nn.Linear(h_dim, h_dim), nn.ReLU(inplace=True)]
self.xz_to_hidden = nn.Sequential(*layers)
self.hidden_to_mu = nn.Linear(h_dim, y_dim)
self.hidden_to_sigma = nn.Linear(h_dim, y_dim)
def forward(self, x, z):
'\n x : torch.Tensor\n Shape (batch_size, num_points, x_dim)\n\n z : torch.Tensor\n Shape (batch_size, z_dim)\n\n Returns\n -------\n Returns mu and sigma for output distribution. Both have shape\n (batch_size, num_points, y_dim).\n '
(batch_size, num_points, _) = x.size()
z = z.unsqueeze(1).repeat(1, num_points, 1)
x_flat = x.view((batch_size * num_points), self.x_dim)
z_flat = z.view((batch_size * num_points), self.z_dim)
input_pairs = torch.cat((x_flat, z_flat), dim=1)
hidden = self.xz_to_hidden(input_pairs)
mu = self.hidden_to_mu(hidden)
pre_sigma = self.hidden_to_sigma(hidden)
mu = mu.view(batch_size, num_points, self.y_dim)
pre_sigma = pre_sigma.view(batch_size, num_points, self.y_dim)
sigma = (0.1 + (0.9 * F.softplus(pre_sigma)))
return (mu, sigma)
|
class NeuralProcess(nn.Module):
'\n Implements Neural Process for functions of arbitrary dimensions.\n\n Parameters\n ----------\n x_dim : int\n Dimension of x values.\n\n y_dim : int\n Dimension of y values.\n\n r_dim : int\n Dimension of output representation r.\n\n z_dim : int\n Dimension of latent variable z.\n\n h_dim : int\n Dimension of hidden layer in encoder and decoder.\n '
def __init__(self, x_dim, y_dim, r_dim, z_dim, h_dim):
super(NeuralProcess, self).__init__()
self.x_dim = x_dim
self.y_dim = y_dim
self.r_dim = r_dim
self.z_dim = z_dim
self.h_dim = h_dim
self.xy_to_r = Encoder(x_dim, y_dim, h_dim, r_dim)
self.r_to_mu_sigma = MuSigmaEncoder(r_dim, z_dim)
self.xz_to_y = Decoder(x_dim, z_dim, h_dim, y_dim)
def aggregate(self, r_i):
'\n Aggregates representations for every (x_i, y_i) pair into a single\n representation.\n\n Parameters\n ----------\n r_i : torch.Tensor\n Shape (batch_size, num_points, r_dim)\n '
return torch.mean(r_i, dim=1)
def xy_to_mu_sigma(self, x, y):
'\n Maps (x, y) pairs into the mu and sigma parameters defining the normal\n distribution of the latent variables z.\n\n Parameters\n ----------\n x : torch.Tensor\n Shape (batch_size, num_points, x_dim)\n\n y : torch.Tensor\n Shape (batch_size, num_points, y_dim)\n '
(batch_size, num_points, _) = x.size()
x_flat = x.view((batch_size * num_points), self.x_dim)
y_flat = y.contiguous().view((batch_size * num_points), self.y_dim)
r_i_flat = self.xy_to_r(x_flat, y_flat)
r_i = r_i_flat.view(batch_size, num_points, self.r_dim)
r = self.aggregate(r_i)
return self.r_to_mu_sigma(r)
def forward(self, x_context, y_context, x_target, y_target=None):
'\n Given context pairs (x_context, y_context) and target points x_target,\n returns a distribution over target points y_target.\n\n Parameters\n ----------\n x_context : torch.Tensor\n Shape (batch_size, num_context, x_dim). Note that x_context is a\n subset of x_target.\n\n y_context : torch.Tensor\n Shape (batch_size, num_context, y_dim)\n\n x_target : torch.Tensor\n Shape (batch_size, num_target, x_dim)\n\n y_target : torch.Tensor or None\n Shape (batch_size, num_target, y_dim). Only used during training.\n\n Note\n ----\n We follow the convention given in "Empirical Evaluation of Neural\n Process Objectives" where context is a subset of target points. This was\n shown to work best empirically.\n '
(batch_size, num_context, x_dim) = x_context.size()
(_, num_target, _) = x_target.size()
(_, _, y_dim) = y_context.size()
if self.training:
(mu_target, sigma_target) = self.xy_to_mu_sigma(x_target, y_target)
(mu_context, sigma_context) = self.xy_to_mu_sigma(x_context, y_context)
q_target = Normal(mu_target, sigma_target)
q_context = Normal(mu_context, sigma_context)
z_sample = q_target.rsample()
(y_pred_mu, y_pred_sigma) = self.xz_to_y(x_target, z_sample)
p_y_pred = Normal(y_pred_mu, y_pred_sigma)
return (p_y_pred, q_target, q_context)
else:
(mu_context, sigma_context) = self.xy_to_mu_sigma(x_context, y_context)
q_context = Normal(mu_context, sigma_context)
z_sample = q_context.rsample()
(y_pred_mu, y_pred_sigma) = self.xz_to_y(x_target, z_sample)
p_y_pred = Normal(y_pred_mu, y_pred_sigma)
return p_y_pred
|
class NeuralProcessImg(nn.Module):
'\n Wraps regular Neural Process for image processing.\n\n Parameters\n ----------\n img_size : tuple of ints\n E.g. (1, 28, 28) or (3, 32, 32)\n\n r_dim : int\n Dimension of output representation r.\n\n z_dim : int\n Dimension of latent variable z.\n\n h_dim : int\n Dimension of hidden layer in encoder and decoder.\n '
def __init__(self, img_size, r_dim, z_dim, h_dim):
super(NeuralProcessImg, self).__init__()
self.img_size = img_size
(self.num_channels, self.height, self.width) = img_size
self.r_dim = r_dim
self.z_dim = z_dim
self.h_dim = h_dim
self.neural_process = NeuralProcess(x_dim=2, y_dim=self.num_channels, r_dim=r_dim, z_dim=z_dim, h_dim=h_dim)
def forward(self, img, context_mask, target_mask):
'\n Given an image and masks of context and target points, returns a\n distribution over pixel intensities at the target points.\n\n Parameters\n ----------\n img : torch.Tensor\n Shape (batch_size, channels, height, width)\n\n context_mask : torch.ByteTensor\n Shape (batch_size, height, width). Binary mask indicating\n the pixels to be used as context.\n\n target_mask : torch.ByteTensor\n Shape (batch_size, height, width). Binary mask indicating\n the pixels to be used as target.\n '
(x_context, y_context) = img_mask_to_np_input(img, context_mask)
(x_target, y_target) = img_mask_to_np_input(img, target_mask)
return self.neural_process(x_context, y_context, x_target, y_target)
|
class NeuralProcessTrainer():
'\n Class to handle training of Neural Processes for functions and images.\n\n Parameters\n ----------\n device : torch.device\n\n neural_process : neural_process.NeuralProcess or NeuralProcessImg instance\n\n optimizer : one of torch.optim optimizers\n\n num_context_range : tuple of ints\n Number of context points will be sampled uniformly in the range given\n by num_context_range.\n\n num_extra_target_range : tuple of ints\n Number of extra target points (as we always include context points in\n target points, i.e. context points are a subset of target points) will\n be sampled uniformly in the range given by num_extra_target_range.\n\n print_freq : int\n Frequency with which to print loss information during training.\n '
def __init__(self, device, neural_process, optimizer, num_context_range, num_extra_target_range, print_freq=100):
self.device = device
self.neural_process = neural_process
self.optimizer = optimizer
self.num_context_range = num_context_range
self.num_extra_target_range = num_extra_target_range
self.print_freq = print_freq
self.is_img = isinstance(self.neural_process, NeuralProcessImg)
self.steps = 0
self.epoch_loss_history = []
def train(self, data_loader, epochs):
'\n Trains Neural Process.\n\n Parameters\n ----------\n dataloader : torch.utils.DataLoader instance\n\n epochs : int\n Number of epochs to train for.\n '
for epoch in range(epochs):
epoch_loss = 0.0
for (i, data) in enumerate(data_loader):
self.optimizer.zero_grad()
num_context = randint(*self.num_context_range)
num_extra_target = randint(*self.num_extra_target_range)
if self.is_img:
(img, _) = data
batch_size = img.size(0)
(context_mask, target_mask) = batch_context_target_mask(self.neural_process.img_size, num_context, num_extra_target, batch_size)
img = img.to(self.device)
context_mask = context_mask.to(self.device)
target_mask = target_mask.to(self.device)
(p_y_pred, q_target, q_context) = self.neural_process(img, context_mask, target_mask)
(_, y_target) = img_mask_to_np_input(img, target_mask)
else:
(x, y) = data
(x_context, y_context, x_target, y_target) = context_target_split(x, y, num_context, num_extra_target)
(p_y_pred, q_target, q_context) = self.neural_process(x_context, y_context, x_target, y_target)
loss = self._loss(p_y_pred, y_target, q_target, q_context)
loss.backward()
self.optimizer.step()
epoch_loss += loss.item()
self.steps += 1
if ((self.steps % self.print_freq) == 0):
print('iteration {}, loss {:.3f}'.format(self.steps, loss.item()))
print('Epoch: {}, Avg_loss: {}'.format(epoch, (epoch_loss / len(data_loader))))
self.epoch_loss_history.append((epoch_loss / len(data_loader)))
def _loss(self, p_y_pred, y_target, q_target, q_context):
'\n Computes Neural Process loss.\n\n Parameters\n ----------\n p_y_pred : one of torch.distributions.Distribution\n Distribution over y output by Neural Process.\n\n y_target : torch.Tensor\n Shape (batch_size, num_target, y_dim)\n\n q_target : one of torch.distributions.Distribution\n Latent distribution for target points.\n\n q_context : one of torch.distributions.Distribution\n Latent distribution for context points.\n '
log_likelihood = p_y_pred.log_prob(y_target).mean(dim=0).sum()
kl = kl_divergence(q_target, q_context).mean(dim=0).sum()
return ((- log_likelihood) + kl)
|
def _is_pil_image(img):
return isinstance(img, Image.Image)
|
def _is_numpy_image(img):
return (isinstance(img, np.ndarray) and (img.ndim in {2, 3}))
|
class RandomHorizontalFlip(object):
def __call__(self, sample):
(image, depth) = (sample['image'], sample['depth'])
if (not _is_pil_image(image)):
raise TypeError('img should be PIL Image. Got {}'.format(type(image)))
if (not _is_pil_image(depth)):
raise TypeError('img should be PIL Image. Got {}'.format(type(depth)))
if (random.random() < 0.5):
image = image.transpose(Image.FLIP_LEFT_RIGHT)
depth = depth.transpose(Image.FLIP_LEFT_RIGHT)
return {'image': image, 'depth': depth}
|
class RandomChannelSwap(object):
def __init__(self, probability):
from itertools import permutations
self.probability = probability
self.indices = list(permutations(range(3), 3))
def __call__(self, sample):
(image, depth) = (sample['image'], sample['depth'])
if (not _is_pil_image(image)):
raise TypeError('img should be PIL Image. Got {}'.format(type(image)))
if (not _is_pil_image(depth)):
raise TypeError('img should be PIL Image. Got {}'.format(type(depth)))
if (random.random() < self.probability):
image = np.asarray(image)
image = Image.fromarray(image[(..., list(self.indices[random.randint(0, (len(self.indices) - 1))]))])
return {'image': image, 'depth': depth}
|
def loadZipToMem(zip_file):
print('Loading dataset zip file...', end='')
from zipfile import ZipFile
input_zip = ZipFile(zip_file)
data = {name: input_zip.read(name) for name in input_zip.namelist()}
nyu2_train = list((row.split(',') for row in data['data/nyu2_train.csv'].decode('utf-8').split('\n') if (len(row) > 0)))
from sklearn.utils import shuffle
nyu2_train = shuffle(nyu2_train, random_state=0)
print('Loaded ({0}).'.format(len(nyu2_train)))
return (data, nyu2_train)
|
class depthDatasetMemory(Dataset):
def __init__(self, data, nyu2_train, transform=None):
(self.data, self.nyu_dataset) = (data, nyu2_train)
self.transform = transform
def __getitem__(self, idx):
sample = self.nyu_dataset[idx]
image = Image.open(BytesIO(self.data[sample[0]]))
depth = Image.open(BytesIO(self.data[sample[1]]))
sample = {'image': image, 'depth': depth}
if self.transform:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.nyu_dataset)
|
class ToTensor(object):
def __init__(self, is_test=False):
self.is_test = is_test
def __call__(self, sample):
(image, depth) = (sample['image'], sample['depth'])
image = self.to_tensor(image)
depth = depth.resize((320, 240))
if self.is_test:
depth = (self.to_tensor(depth).float() / 1000)
else:
depth = (self.to_tensor(depth).float() * 1000)
depth = torch.clamp(depth, 10, 1000)
return {'image': image, 'depth': depth}
def to_tensor(self, pic):
if (not (_is_pil_image(pic) or _is_numpy_image(pic))):
raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
if isinstance(pic, np.ndarray):
img = torch.from_numpy(pic.transpose((2, 0, 1)))
return img.float().div(255)
if (pic.mode == 'I'):
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif (pic.mode == 'I;16'):
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
if (pic.mode == 'YCbCr'):
nchannel = 3
elif (pic.mode == 'I;16'):
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
|
def getNoTransform(is_test=False):
return transforms.Compose([ToTensor(is_test=is_test)])
|
def getDefaultTrainTransform():
return transforms.Compose([RandomHorizontalFlip(), RandomChannelSwap(0.5), ToTensor()])
|
def getTrainingTestingData(batch_size):
(data, nyu2_train) = loadZipToMem('nyu_data.zip')
transformed_training = depthDatasetMemory(data, nyu2_train, transform=getDefaultTrainTransform())
transformed_testing = depthDatasetMemory(data, nyu2_train, transform=getNoTransform())
return (DataLoader(transformed_training, batch_size, shuffle=True), DataLoader(transformed_testing, batch_size, shuffle=False))
|
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(((- ((x - (window_size // 2)) ** 2)) / float((2 * (sigma ** 2))))) for x in range(window_size)])
return (gauss / gauss.sum())
|
def create_window(window_size, channel=1):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
return window
|
def ssim(img1, img2, val_range, window_size=11, window=None, size_average=True, full=False):
L = val_range
padd = 0
(_, channel, height, width) = img1.size()
if (window is None):
real_size = min(window_size, height, width)
window = create_window(real_size, channel=channel).to(img1.device)
mu1 = F.conv2d(img1, window, padding=padd, groups=channel)
mu2 = F.conv2d(img2, window, padding=padd, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = (mu1 * mu2)
sigma1_sq = (F.conv2d((img1 * img1), window, padding=padd, groups=channel) - mu1_sq)
sigma2_sq = (F.conv2d((img2 * img2), window, padding=padd, groups=channel) - mu2_sq)
sigma12 = (F.conv2d((img1 * img2), window, padding=padd, groups=channel) - mu1_mu2)
C1 = ((0.01 * L) ** 2)
C2 = ((0.03 * L) ** 2)
v1 = ((2.0 * sigma12) + C2)
v2 = ((sigma1_sq + sigma2_sq) + C2)
cs = torch.mean((v1 / v2))
ssim_map = ((((2 * mu1_mu2) + C1) * v1) / (((mu1_sq + mu2_sq) + C1) * v2))
if size_average:
ret = ssim_map.mean()
else:
ret = ssim_map.mean(1).mean(1).mean(1)
if full:
return (ret, cs)
return ret
|
class UpSample(nn.Sequential):
def __init__(self, skip_input, output_features):
super(UpSample, self).__init__()
self.convA = nn.Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1)
self.leakyreluA = nn.LeakyReLU(0.2)
self.convB = nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1)
self.leakyreluB = nn.LeakyReLU(0.2)
def forward(self, x, concat_with):
up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size(3)], mode='bilinear', align_corners=True)
return self.leakyreluB(self.convB(self.leakyreluA(self.convA(torch.cat([up_x, concat_with], dim=1)))))
|
class Decoder(nn.Module):
def __init__(self, num_features=2208, decoder_width=0.5):
super(Decoder, self).__init__()
features = int((num_features * decoder_width))
self.conv2 = nn.Conv2d(num_features, features, kernel_size=1, stride=1, padding=1)
self.up1 = UpSample(skip_input=((features // 1) + 384), output_features=(features // 2))
self.up2 = UpSample(skip_input=((features // 2) + 192), output_features=(features // 4))
self.up3 = UpSample(skip_input=((features // 4) + 96), output_features=(features // 8))
self.up4 = UpSample(skip_input=((features // 8) + 96), output_features=(features // 16))
self.conv3 = nn.Conv2d((features // 16), 1, kernel_size=3, stride=1, padding=1)
def forward(self, features):
(x_block0, x_block1, x_block2, x_block3, x_block4) = (features[3], features[4], features[6], features[8], features[11])
x_d0 = self.conv2(x_block4)
x_d1 = self.up1(x_d0, x_block3)
x_d2 = self.up2(x_d1, x_block2)
x_d3 = self.up3(x_d2, x_block1)
x_d4 = self.up4(x_d3, x_block0)
return self.conv3(x_d4)
|
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
import torchvision.models as models
self.original_model = models.densenet161(pretrained=True)
def forward(self, x):
features = [x]
for (k, v) in self.original_model.features._modules.items():
features.append(v(features[(- 1)]))
return features
|
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder()
def forward(self, x):
return self.decoder(self.encoder(x))
|
def main():
parser = argparse.ArgumentParser(description='High Quality Monocular Depth Estimation via Transfer Learning')
parser.add_argument('--epochs', default=20, type=int, help='number of total epochs to run')
parser.add_argument('--lr', '--learning-rate', default=0.0001, type=float, help='initial learning rate')
parser.add_argument('--bs', default=4, type=int, help='batch size')
args = parser.parse_args()
model = Model().cuda()
print('Model created.')
optimizer = torch.optim.Adam(model.parameters(), args.lr)
batch_size = args.bs
prefix = ('densenet_' + str(batch_size))
(train_loader, test_loader) = getTrainingTestingData(batch_size=batch_size)
writer = SummaryWriter(comment='{}-lr{}-e{}-bs{}'.format(prefix, args.lr, args.epochs, args.bs), flush_secs=30)
l1_criterion = nn.L1Loss()
for epoch in range(args.epochs):
batch_time = AverageMeter()
losses = AverageMeter()
N = len(train_loader)
model.train()
end = time.time()
for (i, sample_batched) in enumerate(train_loader):
optimizer.zero_grad()
image = torch.autograd.Variable(sample_batched['image'].cuda())
depth = torch.autograd.Variable(sample_batched['depth'].cuda(non_blocking=True))
depth_n = DepthNorm(depth)
output = model(image)
l_depth = l1_criterion(output, depth_n)
l_ssim = torch.clamp(((1 - ssim(output, depth_n, val_range=(1000.0 / 10.0))) * 0.5), 0, 1)
loss = ((1.0 * l_ssim) + (0.1 * l_depth))
losses.update(loss.data.item(), image.size(0))
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
eta = str(datetime.timedelta(seconds=int((batch_time.val * (N - i)))))
niter = ((epoch * N) + i)
if ((i % 5) == 0):
print('Epoch: [{0}][{1}/{2}]\tTime {batch_time.val:.3f} ({batch_time.sum:.3f})\tETA {eta}\tLoss {loss.val:.4f} ({loss.avg:.4f})'.format(epoch, i, N, batch_time=batch_time, loss=losses, eta=eta))
writer.add_scalar('Train/Loss', losses.val, niter)
if ((i % 300) == 0):
LogProgress(model, writer, test_loader, niter)
LogProgress(model, writer, test_loader, niter)
writer.add_scalar('Train/Loss.avg', losses.avg, epoch)
|
def LogProgress(model, writer, test_loader, epoch):
model.eval()
sequential = test_loader
sample_batched = next(iter(sequential))
image = torch.autograd.Variable(sample_batched['image'].cuda())
depth = torch.autograd.Variable(sample_batched['depth'].cuda(non_blocking=True))
if (epoch == 0):
writer.add_image('Train.1.Image', vutils.make_grid(image.data, nrow=6, normalize=True), epoch)
if (epoch == 0):
writer.add_image('Train.2.Depth', colorize(vutils.make_grid(depth.data, nrow=6, normalize=False)), epoch)
output = DepthNorm(model(image))
writer.add_image('Train.3.Ours', colorize(vutils.make_grid(output.data, nrow=6, normalize=False)), epoch)
writer.add_image('Train.3.Diff', colorize(vutils.make_grid(torch.abs((output - depth)).data, nrow=6, normalize=False)), epoch)
del image
del depth
del output
|
class DataLoader():
def __init__(self, csv_file='data/nyu2_train.csv', DEBUG=False):
self.shape_rgb = (480, 640, 3)
self.shape_depth = (240, 320, 1)
self.read_nyu_data(csv_file, DEBUG=DEBUG)
def nyu_resize(self, img, resolution=480, padding=6):
from skimage.transform import resize
return resize(img, (resolution, int(((resolution * 4) / 3))), preserve_range=True, mode='reflect', anti_aliasing=True)
def read_nyu_data(self, csv_file, DEBUG=False):
csv = open(csv_file, 'r').read()
nyu2_train = list((row.split(',') for row in csv.split('\n') if (len(row) > 0)))
nyu2_train = shuffle(nyu2_train, random_state=0)
if DEBUG:
nyu2_train = nyu2_train[:10]
self.filenames = [i[0] for i in nyu2_train]
self.labels = [i[1] for i in nyu2_train]
self.length = len(self.filenames)
def _parse_function(self, filename, label):
image_decoded = tf.image.decode_jpeg(tf.io.read_file(filename))
depth_resized = tf.image.resize(tf.image.decode_jpeg(tf.io.read_file(label)), [self.shape_depth[0], self.shape_depth[1]])
rgb = tf.image.convert_image_dtype(image_decoded, dtype=tf.float32)
depth = tf.image.convert_image_dtype((depth_resized / 255.0), dtype=tf.float32)
depth = (1000 / tf.clip_by_value((depth * 1000), 10, 1000))
return (rgb, depth)
def get_batched_dataset(self, batch_size):
self.dataset = tf.data.Dataset.from_tensor_slices((self.filenames, self.labels))
self.dataset = self.dataset.shuffle(buffer_size=len(self.filenames), reshuffle_each_iteration=True)
self.dataset = self.dataset.repeat()
self.dataset = self.dataset.map(map_func=self._parse_function, num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.dataset = self.dataset.batch(batch_size=batch_size)
return self.dataset
|
def depth_loss_function(y_true, y_pred, theta=0.1, maxDepthVal=(1000.0 / 10.0)):
l_depth = K.mean(K.abs((y_pred - y_true)), axis=(- 1))
(dy_true, dx_true) = tf.image.image_gradients(y_true)
(dy_pred, dx_pred) = tf.image.image_gradients(y_pred)
l_edges = K.mean((K.abs((dy_pred - dy_true)) + K.abs((dx_pred - dx_true))), axis=(- 1))
l_ssim = K.clip(((1 - tf.image.ssim(y_true, y_pred, maxDepthVal)) * 0.5), 0, 1)
w1 = 1.0
w2 = 1.0
w3 = theta
return (((w1 * l_ssim) + (w2 * K.mean(l_edges))) + (w3 * K.mean(l_depth)))
|
class UpscaleBlock(Model):
def __init__(self, filters, name):
super(UpscaleBlock, self).__init__()
self.up = UpSampling2D(size=(2, 2), interpolation='bilinear', name=(name + '_upsampling2d'))
self.concat = Concatenate(name=(name + '_concat'))
self.convA = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=(name + '_convA'))
self.reluA = LeakyReLU(alpha=0.2)
self.convB = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=(name + '_convB'))
self.reluB = LeakyReLU(alpha=0.2)
def call(self, x):
b = self.reluB(self.convB(self.reluA(self.convA(self.concat([self.up(x[0]), x[1]])))))
return b
|
class Encoder(Model):
def __init__(self):
super(Encoder, self).__init__()
self.base_model = DenseNet169(input_shape=(None, None, 3), include_top=False, weights='imagenet')
print('Base model loaded {}'.format(DenseNet169.__name__))
outputs = [self.base_model.outputs[(- 1)]]
for name in ['pool1', 'pool2_pool', 'pool3_pool', 'conv1/relu']:
outputs.append(self.base_model.get_layer(name).output)
self.encoder = Model(inputs=self.base_model.inputs, outputs=outputs)
def call(self, x):
return self.encoder(x)
|
class Decoder(Model):
def __init__(self, decode_filters):
super(Decoder, self).__init__()
self.conv2 = Conv2D(filters=decode_filters, kernel_size=1, padding='same', name='conv2')
self.up1 = UpscaleBlock(filters=(decode_filters // 2), name='up1')
self.up2 = UpscaleBlock(filters=(decode_filters // 4), name='up2')
self.up3 = UpscaleBlock(filters=(decode_filters // 8), name='up3')
self.up4 = UpscaleBlock(filters=(decode_filters // 16), name='up4')
self.conv3 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv3')
def call(self, features):
(x, pool1, pool2, pool3, conv1) = (features[0], features[1], features[2], features[3], features[4])
up0 = self.conv2(x)
up1 = self.up1([up0, pool3])
up2 = self.up2([up1, pool2])
up3 = self.up3([up2, pool1])
up4 = self.up4([up3, conv1])
return self.conv3(up4)
|
class DepthEstimate(Model):
def __init__(self):
super(DepthEstimate, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder(decode_filters=int((self.encoder.layers[(- 1)].output[0].shape[(- 1)] // 2)))
print('\nModel created.')
def call(self, x):
return self.decoder(self.encoder(x))
|
def keras_to_tensorflow(keras_model, output_dir, model_name, out_prefix='output_', log_tensorboard=True):
if (os.path.exists(output_dir) == False):
os.mkdir(output_dir)
full_model = tf.function((lambda x: keras_model(x)))
full_model = full_model.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
frozen_func = convert_variables_to_constants_v2(full_model)
frozen_func.graph.as_graph_def()
tf.io.write_graph(frozen_func.graph, output_dir, name=model_name, as_text=False)
|
def tensorflow_reshape_input_shape(path, model_name, reshape, reshape_tensor_name, reshape_dtype):
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
filePath = os.path.join(path, model_name)
with gfile.FastGFile(filePath, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
f.close()
sess.graph.as_default()
inputReshape = tf.compat.v1.placeholder(shape=reshape, name=reshape_tensor_name, dtype=reshape_dtype)
inputIndex = (reshape_tensor_name + ':0')
tf.import_graph_def(graph_def, input_map={inputIndex: inputReshape})
tf.io.write_graph(sess.graph, path, name=model_name, as_text=False)
sess.close()
|
def tensorflow_inference(path, model_name, input_tensor_name, output_tensor_name, inputs):
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
filePath = os.path.join(path, model_name)
with gfile.FastGFile(filePath, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
f.close()
sess.graph.as_default()
tf.import_graph_def(graph_def)
tensor_input = sess.graph.get_tensor_by_name(input_tensor_name)
tensor_output = sess.graph.get_tensor_by_name(output_tensor_name)
print('\nStart tensorflow2.2 inference')
t0 = time.time()
outputs = sess.run(tensor_output, {tensor_input: inputs})
t1 = time.time()
inferenceTime = round((t1 - t0), 3)
print('\nTensorflow2.2 inference time:{0} seconds.'.format(inferenceTime))
sess.close()
return (outputs, inferenceTime)
|
def convert_to_tensorflow_lite(path, tensorflowLite_model_name, model_name, input_tensor_name, output_tensor_name, quantize=[]):
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
filePath = os.path.join(path, tensorflowLite_model_name)
with gfile.FastGFile(filePath, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def)
tensor_input = sess.graph.get_tensor_by_name(input_tensor_name)
tensor_output = sess.graph.get_tensor_by_name(output_tensor_name)
print('\nStart converting to tensorflow lite')
t0 = time.time()
converter = tf.compat.v1.lite.TFLiteConverter.from_session(sess, [tensor_input], [tensor_output])
converter.optimizations = quantize
tfliteModel = converter.convert()
t1 = time.time()
print('\nTensorflow lite converting time:{0} seconds.'.format(round((t1 - t0), 3)))
t0 = time.time()
open(model_name, 'wb').write(tfliteModel)
t1 = time.time()
print('\nTensorflow lite saving model time:{0} seconds.'.format(round((t1 - t0), 3)))
sess.close()
|
def tensorflow_lite_inference(path, model_name, inputs, inputs_astype):
filePath = os.path.join(path, model_name)
interpreter = tf.compat.v1.lite.Interpreter(filePath)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
inputs = inputs.astype(inputs_astype)
interpreter.set_tensor(input_details[0]['index'], inputs)
print('\nStart tensorflowLite inference')
t0 = time.time()
interpreter.invoke()
t1 = time.time()
inferenceTime = round((t1 - t0), 3)
print('\nTensorflowLite inference time:{0} seconds.'.format(inferenceTime))
outputs = interpreter.get_tensor(output_details[0]['index'])
return (outputs, inferenceTime)
|
def reverse_depth_value(depthMap):
maxValue = np.max(depthMap)
minValue = np.min(depthMap)
return ((maxValue - depthMap) + minValue)
|
def summary_tensorflow_grpah(path, model_name):
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
filePath = os.path.join(path, model_name)
with gfile.FastGFile(filePath, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
f.close()
sess.graph.as_default()
tf.import_graph_def(graph_def)
tf.compat.v1.summary.FileWriter('log', graph=sess.graph)
sess.close()
|
def extract_zip(input_zip):
input_zip = ZipFile(input_zip)
return {name: input_zip.read(name) for name in input_zip.namelist()}
|
def nyu_resize(img, resolution=480, padding=6):
from skimage.transform import resize
return resize(img, (resolution, int(((resolution * 4) / 3))), preserve_range=True, mode='reflect', anti_aliasing=True)
|
def get_nyu_data(batch_size, nyu_data_zipfile='nyu_data.zip'):
data = extract_zip(nyu_data_zipfile)
nyu2_train = list((row.split(',') for row in data['data/nyu2_train.csv'].decode('utf-8').split('\n') if (len(row) > 0)))
nyu2_test = list((row.split(',') for row in data['data/nyu2_test.csv'].decode('utf-8').split('\n') if (len(row) > 0)))
shape_rgb = (batch_size, 480, 640, 3)
shape_depth = (batch_size, 240, 320, 1)
if False:
nyu2_train = nyu2_train[:10]
nyu2_test = nyu2_test[:10]
return (data, nyu2_train, nyu2_test, shape_rgb, shape_depth)
|
def get_nyu_train_test_data(batch_size):
(data, nyu2_train, nyu2_test, shape_rgb, shape_depth) = get_nyu_data(batch_size)
train_generator = NYU_BasicAugmentRGBSequence(data, nyu2_train, batch_size=batch_size, shape_rgb=shape_rgb, shape_depth=shape_depth)
test_generator = NYU_BasicRGBSequence(data, nyu2_test, batch_size=batch_size, shape_rgb=shape_rgb, shape_depth=shape_depth)
return (train_generator, test_generator)
|
class NYU_BasicAugmentRGBSequence(Sequence):
def __init__(self, data, dataset, batch_size, shape_rgb, shape_depth, is_flip=False, is_addnoise=False, is_erase=False):
self.data = data
self.dataset = dataset
self.policy = BasicPolicy(color_change_ratio=0.5, mirror_ratio=0.5, flip_ratio=(0.0 if (not is_flip) else 0.2), add_noise_peak=(0 if (not is_addnoise) else 20), erase_ratio=((- 1.0) if (not is_erase) else 0.5))
self.batch_size = batch_size
self.shape_rgb = shape_rgb
self.shape_depth = shape_depth
self.maxDepth = 1000.0
from sklearn.utils import shuffle
self.dataset = shuffle(self.dataset, random_state=0)
self.N = len(self.dataset)
def __len__(self):
return int(np.ceil((self.N / float(self.batch_size))))
def __getitem__(self, idx, is_apply_policy=True):
(batch_x, batch_y) = (np.zeros(self.shape_rgb), np.zeros(self.shape_depth))
for i in range(batch_x.shape[0]):
index = min(((idx * self.batch_size) + i), (self.N - 1))
sample = self.dataset[index]
x = np.clip((np.asarray(Image.open(BytesIO(self.data[sample[0]]))).reshape(480, 640, 3) / 255), 0, 1)
y = np.clip(((np.asarray(Image.open(BytesIO(self.data[sample[1]]))).reshape(480, 640, 1) / 255) * self.maxDepth), 0, self.maxDepth)
y = DepthNorm(y, maxDepth=self.maxDepth)
batch_x[i] = nyu_resize(x, 480)
batch_y[i] = nyu_resize(y, 240)
if is_apply_policy:
(batch_x[i], batch_y[i]) = self.policy(batch_x[i], batch_y[i])
return (batch_x, batch_y)
|
class NYU_BasicRGBSequence(Sequence):
def __init__(self, data, dataset, batch_size, shape_rgb, shape_depth):
self.data = data
self.dataset = dataset
self.batch_size = batch_size
self.N = len(self.dataset)
self.shape_rgb = shape_rgb
self.shape_depth = shape_depth
self.maxDepth = 1000.0
def __len__(self):
return int(np.ceil((self.N / float(self.batch_size))))
def __getitem__(self, idx):
(batch_x, batch_y) = (np.zeros(self.shape_rgb), np.zeros(self.shape_depth))
for i in range(self.batch_size):
index = min(((idx * self.batch_size) + i), (self.N - 1))
sample = self.dataset[index]
x = np.clip((np.asarray(Image.open(BytesIO(self.data[sample[0]]))).reshape(480, 640, 3) / 255), 0, 1)
y = (np.asarray(Image.open(BytesIO(self.data[sample[1]])), dtype=np.float32).reshape(480, 640, 1).copy().astype(float) / 10.0)
y = DepthNorm(y, maxDepth=self.maxDepth)
batch_x[i] = nyu_resize(x, 480)
batch_y[i] = nyu_resize(y, 240)
return (batch_x, batch_y)
|
def get_unreal_data(batch_size, unreal_data_file='unreal_data.h5'):
shape_rgb = (batch_size, 480, 640, 3)
shape_depth = (batch_size, 240, 320, 1)
import h5py
data = h5py.File(unreal_data_file, 'r')
from sklearn.utils import shuffle
keys = shuffle(list(data['x'].keys()), random_state=0)
unreal_train = keys[:(len(keys) - 100)]
unreal_test = keys[(len(keys) - 100):]
if False:
unreal_train = unreal_train[:10]
unreal_test = unreal_test[:10]
return (data, unreal_train, unreal_test, shape_rgb, shape_depth)
|
def get_unreal_train_test_data(batch_size):
(data, unreal_train, unreal_test, shape_rgb, shape_depth) = get_unreal_data(batch_size)
train_generator = Unreal_BasicAugmentRGBSequence(data, unreal_train, batch_size=batch_size, shape_rgb=shape_rgb, shape_depth=shape_depth)
test_generator = Unreal_BasicAugmentRGBSequence(data, unreal_test, batch_size=batch_size, shape_rgb=shape_rgb, shape_depth=shape_depth, is_skip_policy=True)
return (train_generator, test_generator)
|
class Unreal_BasicAugmentRGBSequence(Sequence):
def __init__(self, data, dataset, batch_size, shape_rgb, shape_depth, is_flip=False, is_addnoise=False, is_erase=False, is_skip_policy=False):
self.data = data
self.dataset = dataset
self.policy = BasicPolicy(color_change_ratio=0.5, mirror_ratio=0.5, flip_ratio=(0.0 if (not is_flip) else 0.2), add_noise_peak=(0 if (not is_addnoise) else 20), erase_ratio=((- 1.0) if (not is_erase) else 0.5))
self.batch_size = batch_size
self.shape_rgb = shape_rgb
self.shape_depth = shape_depth
self.maxDepth = 1000.0
self.N = len(self.dataset)
self.is_skip_policy = is_skip_policy
def __len__(self):
return int(np.ceil((self.N / float(self.batch_size))))
def __getitem__(self, idx, is_apply_policy=True):
(batch_x, batch_y) = (np.zeros(self.shape_rgb), np.zeros(self.shape_depth))
if self.is_skip_policy:
is_apply_policy = False
for i in range(batch_x.shape[0]):
index = min(((idx * self.batch_size) + i), (self.N - 1))
sample = self.dataset[index]
rgb_sample = cv2.imdecode(np.asarray(self.data['x/{}'.format(sample)]), 1)
depth_sample = self.data['y/{}'.format(sample)]
depth_sample = resize(depth_sample, (self.shape_depth[1], self.shape_depth[2]), preserve_range=True, mode='reflect', anti_aliasing=True)
x = np.clip((rgb_sample / 255), 0, 1)
y = np.clip(depth_sample, 10, self.maxDepth)
y = DepthNorm(y, maxDepth=self.maxDepth)
batch_x[i] = x
batch_y[i] = y
if is_apply_policy:
(batch_x[i], batch_y[i]) = self.policy(batch_x[i], batch_y[i])
return (batch_x, batch_y)
|
def normalize_tuple(value, n, name):
'Transforms a single int or iterable of ints into an int tuple.\n # Arguments\n value: The value to validate and convert. Could be an int, or any iterable\n of ints.\n n: The size of the tuple to be returned.\n name: The name of the argument being validated, e.g. `strides` or\n `kernel_size`. This is only used to format error messages.\n # Returns\n A tuple of n integers.\n # Raises\n ValueError: If something else than an int/long or iterable thereof was\n passed.\n '
if isinstance(value, int):
return ((value,) * n)
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError('The `{}` argument must be a tuple of {} integers. Received: {}'.format(name, n, value))
if (len(value_tuple) != n):
raise ValueError('The `{}` argument must be a tuple of {} integers. Received: {}'.format(name, n, value))
for single_value in value_tuple:
try:
int(single_value)
except ValueError:
raise ValueError('The `{}` argument must be a tuple of {} integers. Received: {} including element {} of type {}'.format(name, n, value, single_value, type(single_value)))
return value_tuple
|
def normalize_data_format(value):
"Checks that the value correspond to a valid data format.\n Copy of the function in keras-team/keras because it's not public API.\n # Arguments\n value: String or None. `'channels_first'` or `'channels_last'`.\n # Returns\n A string, either `'channels_first'` or `'channels_last'`\n # Example\n ```python\n >>> from keras import backend as K\n >>> K.normalize_data_format(None)\n 'channels_first'\n >>> K.normalize_data_format('channels_last')\n 'channels_last'\n ```\n # Raises\n ValueError: if `value` or the global `data_format` invalid.\n "
if (value is None):
value = K.image_data_format()
data_format = value.lower()
if (data_format not in {'channels_first', 'channels_last'}):
raise ValueError(('The `data_format` argument must be one of "channels_first", "channels_last". Received: ' + str(value)))
return data_format
|
class BilinearUpSampling2D(Layer):
def __init__(self, size=(2, 2), data_format=None, **kwargs):
super(BilinearUpSampling2D, self).__init__(**kwargs)
self.data_format = normalize_data_format(data_format)
self.size = normalize_tuple(size, 2, 'size')
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
if (self.data_format == 'channels_first'):
height = ((self.size[0] * input_shape[2]) if (input_shape[2] is not None) else None)
width = ((self.size[1] * input_shape[3]) if (input_shape[3] is not None) else None)
return (input_shape[0], input_shape[1], height, width)
elif (self.data_format == 'channels_last'):
height = ((self.size[0] * input_shape[1]) if (input_shape[1] is not None) else None)
width = ((self.size[1] * input_shape[2]) if (input_shape[2] is not None) else None)
return (input_shape[0], height, width, input_shape[3])
def call(self, inputs):
input_shape = K.shape(inputs)
if (self.data_format == 'channels_first'):
height = ((self.size[0] * input_shape[2]) if (input_shape[2] is not None) else None)
width = ((self.size[1] * input_shape[3]) if (input_shape[3] is not None) else None)
elif (self.data_format == 'channels_last'):
height = ((self.size[0] * input_shape[1]) if (input_shape[1] is not None) else None)
width = ((self.size[1] * input_shape[2]) if (input_shape[2] is not None) else None)
return tf.compat.v1.image.resize(images=inputs, size=[height, width], align_corners=True)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(BilinearUpSampling2D, self).get_config()
return dict((list(base_config.items()) + list(config.items())))
|
def depth_loss_function(y_true, y_pred, theta=0.1, maxDepthVal=(1000.0 / 10.0)):
l_depth = K.mean(K.abs((y_pred - y_true)), axis=(- 1))
(dy_true, dx_true) = tf.image.image_gradients(y_true)
(dy_pred, dx_pred) = tf.image.image_gradients(y_pred)
l_edges = K.mean((K.abs((dy_pred - dy_true)) + K.abs((dx_pred - dx_true))), axis=(- 1))
l_ssim = K.clip(((1 - tf.image.ssim(y_true, y_pred, maxDepthVal)) * 0.5), 0, 1)
w1 = 1.0
w2 = 1.0
w3 = theta
return (((w1 * l_ssim) + (w2 * K.mean(l_edges))) + (w3 * K.mean(l_depth)))
|
def create_model(existing='', is_twohundred=False, is_halffeatures=True):
if (len(existing) == 0):
print('Loading base model (DenseNet)..')
if is_twohundred:
base_model = applications.DenseNet201(input_shape=(None, None, 3), include_top=False)
else:
base_model = applications.DenseNet169(input_shape=(None, None, 3), include_top=False)
print('Base model loaded.')
base_model_output_shape = base_model.layers[(- 1)].output.shape
for layer in base_model.layers:
layer.trainable = True
if is_halffeatures:
decode_filters = int((int(base_model_output_shape[(- 1)]) / 2))
else:
decode_filters = int(base_model_output_shape[(- 1)])
def upproject(tensor, filters, name, concat_with):
up_i = BilinearUpSampling2D((2, 2), name=(name + '_upsampling2d'))(tensor)
up_i = Concatenate(name=(name + '_concat'))([up_i, base_model.get_layer(concat_with).output])
up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=(name + '_convA'))(up_i)
up_i = LeakyReLU(alpha=0.2)(up_i)
up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=(name + '_convB'))(up_i)
up_i = LeakyReLU(alpha=0.2)(up_i)
return up_i
decoder = Conv2D(filters=decode_filters, kernel_size=1, padding='same', input_shape=base_model_output_shape, name='conv2')(base_model.output)
decoder = upproject(decoder, int((decode_filters / 2)), 'up1', concat_with='pool3_pool')
decoder = upproject(decoder, int((decode_filters / 4)), 'up2', concat_with='pool2_pool')
decoder = upproject(decoder, int((decode_filters / 8)), 'up3', concat_with='pool1')
decoder = upproject(decoder, int((decode_filters / 16)), 'up4', concat_with='conv1/relu')
if False:
decoder = upproject(decoder, int((decode_filters / 32)), 'up5', concat_with='input_1')
conv3 = Conv2D(filters=1, kernel_size=3, strides=1, padding='same', name='conv3')(decoder)
model = Model(inputs=base_model.input, outputs=conv3)
else:
if (not existing.endswith('.h5')):
sys.exit('Please provide a correct model file when using [existing] argument.')
custom_objects = {'BilinearUpSampling2D': BilinearUpSampling2D, 'depth_loss_function': depth_loss_function}
model = load_model(existing, custom_objects=custom_objects)
print('\nExisting model loaded.\n')
print('Model created.')
return model
|
def load_model(args):
if (args.model == 'clip_vis'):
model = CLIP_Visual(classes=classes, device=device, inet=(args.dataset == 'imagenet')).to(device)
elif (args.model == 'clip_zero'):
model = CLIP_Zero_Shot(classes=classes, prompt=prompt, device=device).to(device)
else:
raise ValueError(f'model = {args.model}, is not supported at the moment')
if (args.model != 'clip_zero'):
model.load_state_dict(torch.load(os.path.join(save_dir, args.dataset, args.exp_name, f'epoch_{args.epoch}.pth')))
else:
os.makedirs(os.path.join(save_dir, args.dataset, args.exp_name), exist_ok=True)
model.eval()
return model
|
def predict(image):
global model, zero_shot_model, preprocess, device
image = Image.fromarray(image.astype('uint8'), 'RGB')
input_tensor = preprocess(image)
input_batch = input_tensor.unsqueeze(0)
input_batch = input_batch.to(device)
model = model.to(device)
zero_shot_model = zero_shot_model.to(device)
with torch.no_grad():
clippr_pred = int(np.round(model(input_batch)[0].item()))
clip_pred = zero_shot_model(input_batch).argmax(dim=1, keepdim=True)[0].item()
return (clippr_pred, clip_pred)
|
def sample_assumed_distribution(dist_parameters, num_samples):
dist_type = dist_parameters['dist_type']
if (dist_type == 'gaussian'):
distribution = torch.distributions.Normal(loc=dist_parameters['mean'], scale=dist_parameters['std'])
sample = distribution.sample([num_samples])
sample = torch.clip(sample, min=dist_parameters['min'], max=dist_parameters['max'])
return sample
elif (dist_type == 'costum'):
sample = np.random.choice(dist_parameters['example'], size=num_samples, replace=True)
return torch.tensor(sample)
else:
raise ValueError(f'No such supported assumed distribution type as {dist_type}')
|
class DictX(dict):
'\n Taken From https://dev.to/0xbf/use-dot-syntax-to-access-dictionary-key-python-tips-10ec\n '
def __getattr__(self, key):
try:
return self[key]
except KeyError as k:
raise AttributeError(k)
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError as k:
raise AttributeError(k)
def __repr__(self):
return (('<DictX ' + dict.__repr__(self)) + '>')
|
def save_experiment_hyper_params(args, exp_dir, verbose=True):
with open(join(exp_dir, f'args.txt'), 'w+') as f:
f.write('\n\n\n')
f.write('Experiment Args:\n\n')
for k in args:
f.write(f''' {k}: {args[k]}
''')
f.write('\n\n\n')
if verbose:
with open(join(exp_dir, f'args.txt'), 'r') as f:
for line in f:
print(line)
return
|
def eval_batch_mlp(mlp, data, batch_idxs, criterion, device_id=0):
' evaluate a batch for the baseline mlp '
atom_types = to_one_hot(data['features']['atom_types'][(batch_idxs, ...)], NUM_ATOM_TYPES)
targets = data['targets'][(batch_idxs, ...)]
atom_types = Variable(atom_types)
targets = Variable(targets)
if torch.cuda.is_available():
atom_types = atom_types.cuda(device_id)
targets = targets.cuda(device_id)
outputs = mlp(atom_types)
loss = criterion(outputs, targets)
return loss
|
def eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion, device_id=0):
' evaluate a batch for the s2cnn '
geometry = data['features']['geometry'][(batch_idxs, ...)]
atom_types = data['features']['atom_types'][(batch_idxs, ...)]
atom_types_one_hot = to_one_hot(atom_types, NUM_ATOM_TYPES)
targets = data['targets'][(batch_idxs, ...)]
geometry = Variable(geometry)
atom_types = Variable(atom_types)
atom_types_one_hot = Variable(atom_types_one_hot)
targets = Variable(targets)
if torch.cuda.is_available():
atom_types_one_hot = atom_types_one_hot.cuda(device_id)
geometry = geometry.cuda(device_id)
atom_types = atom_types.cuda(device_id)
targets = targets.cuda(device_id)
outputs = mlp(atom_types_one_hot)
outputs += s2cnn(geometry, atom_types)
loss = criterion(outputs, targets)
return loss
|
def train_baseline(mlp, data, train_batches, test_batches, num_epochs, learning_rate_mlp, device_id=0):
' train the baseline model '
optim = OPTIMIZER(mlp.parameters(), lr=learning_rate_mlp)
criterion = nn.MSELoss()
if torch.cuda.is_available():
criterion = criterion.cuda(device_id)
for epoch in range(num_epochs):
train_losses = []
print('training')
for (iteration, batch_idxs) in enumerate(train_batches):
mlp.train()
optim.zero_grad()
loss = eval_batch_mlp(mlp, data, batch_idxs, criterion, device_id)
loss.backward()
optim.step()
train_losses.append(loss.item())
print('\riteration {}/{}'.format((iteration + 1), train_batches.num_iterations()), end='')
print()
test_losses = []
print('evaluating')
for (iteration, batch_idxs) in enumerate(test_batches):
mlp.eval()
loss = eval_batch_mlp(mlp, data, batch_idxs, criterion)
test_losses.append(loss.item())
print('\riteration {}/{}'.format((iteration + 1), test_batches.num_iterations()), end='')
print()
train_loss = np.sqrt(np.mean(train_losses))
test_loss = np.sqrt(np.mean(test_losses))
print('epoch {}/{} - avg train loss: {}, test loss: {}'.format((epoch + 1), num_epochs, train_loss, test_loss))
return (train_loss, test_loss)
|
def train_s2cnn(mlp, s2cnn, data, train_batches, test_batches, num_epochs, init_learning_rate_s2cnn, learning_rate_decay_epochs, device_id=0):
' train the s2cnn keeping the baseline frozen '
optim = OPTIMIZER(s2cnn.parameters(), lr=init_learning_rate_s2cnn)
criterion = nn.MSELoss()
if torch.cuda.is_available():
criterion = criterion.cuda(device_id)
for epoch in range(num_epochs):
optim = exp_lr_scheduler(optim, epoch, init_lr=init_learning_rate_s2cnn, lr_decay_epoch=learning_rate_decay_epochs)
train_losses = []
print('training')
for (iteration, batch_idxs) in enumerate(train_batches):
s2cnn.train()
mlp.eval()
optim.zero_grad()
loss = eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion)
loss.backward()
optim.step()
train_losses.append(loss.item())
print('\riteration {}/{} - batch loss: {}'.format((iteration + 1), train_batches.num_iterations(), np.sqrt(train_losses[(- 1)])), end='')
print()
test_losses = []
print('evaluating')
for (iteration, batch_idxs) in enumerate(test_batches):
s2cnn.eval()
mlp.eval()
loss = eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion)
test_losses.append(loss.item())
print('\riteration {}/{} - batch loss: {}'.format((iteration + 1), test_batches.num_iterations(), np.sqrt(test_losses[(- 1)])), end='')
print()
train_loss = np.sqrt(np.mean(train_losses))
test_loss = np.sqrt(np.mean(test_losses))
print('epoch {}/{} - avg train loss: {}, test loss: {}'.format((epoch + 1), num_epochs, train_loss, test_loss))
return (train_loss, test_loss)
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default='data.joblib')
parser.add_argument('--test_strat', type=int, default=0)
parser.add_argument('--device_id', type=int, default=0)
parser.add_argument('--num_epochs_s2cnn', type=int, default=30)
parser.add_argument('--num_epochs_mlp', type=int, default=30)
parser.add_argument('--batch_size_s2cnn', type=int, default=32)
parser.add_argument('--batch_size_mlp', type=int, default=32)
parser.add_argument('--init_learning_rate_s2cnn', type=int, default=0.001)
parser.add_argument('--learning_rate_mlp', type=int, default=0.001)
parser.add_argument('--learning_rate_decay_epochs', type=int, default=10)
args = parser.parse_args()
torch.cuda.set_device(args.device_id)
print('evaluating on {}'.format(args.test_strat))
print('loading data...', end='')
(data, train_idxs, test_idxs) = load_data(args.data_path, args.test_strat, cuda=args.device_id)
print('done!')
mlp = BaselineRegressor()
s2cnn = S2CNNRegressor()
if torch.cuda.is_available():
for model in [mlp, s2cnn]:
model.cuda(args.device_id)
print('training baseline model')
print('mlp #params: {}'.format(count_params(mlp)))
train_baseline(mlp, data, IndexBatcher(train_idxs, args.batch_size_mlp, cuda=args.device_id), IndexBatcher(test_idxs, args.batch_size_mlp, cuda=args.device_id), args.num_epochs_mlp, args.learning_rate_mlp, args.device_id)
print('training residual s2cnn model')
print('s2cnn #params: {}'.format(count_params(s2cnn)))
train_s2cnn(mlp, s2cnn, data, IndexBatcher(train_idxs, args.batch_size_s2cnn, cuda=args.device_id), IndexBatcher(test_idxs, args.batch_size_s2cnn, cuda=args.device_id), args.num_epochs_s2cnn, args.init_learning_rate_s2cnn, args.learning_rate_decay_epochs, args.device_id)
|
class S2Block(nn.Module):
' simple s2 convolution block '
def __init__(self, b_in, b_out, f_in, f_out):
' b_in/b_out: bandwidth of input/output signals\n f_in/f_out: filters in input/output signals '
super(S2Block, self).__init__()
self.grid_s2 = s2_near_identity_grid(n_alpha=(2 * b_in), n_beta=2)
self.cnn = S2Convolution(nfeature_in=f_in, nfeature_out=f_out, b_in=b_in, b_out=b_out, grid=self.grid_s2)
self.bn = nn.BatchNorm3d(f_out, affine=AFFINE)
def forward(self, x):
x = self.cnn(x)
x = self.bn(x)
x = nonlinearity(x)
return x
|
class So3Block(nn.Module):
' simple so3 convolution block '
def __init__(self, b_in, b_out, f_in, f_out):
' b_in/b_out: bandwidth of input/output signals\n f_in/f_out: filters in input/output signals '
super(So3Block, self).__init__()
self.grid_so3 = so3_near_identity_grid(n_alpha=(2 * b_in), n_beta=2, n_gamma=2)
self.cnn = SO3Convolution(nfeature_in=f_in, nfeature_out=f_out, b_in=b_in, b_out=b_out, grid=self.grid_so3)
self.bn = nn.BatchNorm3d(f_out, affine=AFFINE)
def forward(self, x):
x = self.cnn(x)
x = self.bn(x)
x = nonlinearity(x)
return x
|
class DeepSet(nn.Module):
' deep set block '
def __init__(self, f, h1, h_latent, h2, n_objs):
' f: input filters\n h1, h2: hidden units for encoder/decoder mlps\n h_latent: dimensions\n n_objs: of objects to aggregate in latent space '
super(DeepSet, self).__init__()
self.f = f
self.h1 = h1
self.h3 = h2
self.n_objs = n_objs
self.emb_h = nn.Linear(f, h1)
self.emb_rep = nn.Linear(h1, h_latent)
self.proj_h = nn.Linear(h_latent, h2)
self.proj = nn.Linear(h2, 1)
self.bn1 = nn.BatchNorm1d(h1, affine=AFFINE)
self.bn2 = nn.BatchNorm1d(h_latent, affine=AFFINE)
self.bn3 = nn.BatchNorm1d(h2, affine=AFFINE)
def forward(self, x, mask):
x = self.emb_h(x)
x = self.bn1(x)
x = nonlinearity(x)
x = self.emb_rep(x)
x = self.bn2(x)
x = nonlinearity(x)
(n, h_latent) = x.size()
x = x.view((n // self.n_objs), self.n_objs, h_latent)
x = torch.sum((x * mask), dim=1)
x = self.proj_h(x)
x = self.bn3(x)
x = nonlinearity(x)
x = self.proj(x)
return x
|
class S2CNNRegressor(nn.Module):
' approximate energy using spherical representations '
def __init__(self):
super(S2CNNRegressor, self).__init__()
n_objs = 23
self.blocks = [S2Block(b_in=10, f_in=5, b_out=8, f_out=8), So3Block(b_in=8, b_out=6, f_in=8, f_out=16), So3Block(b_in=6, b_out=4, f_in=16, f_out=32), So3Block(b_in=4, b_out=2, f_in=32, f_out=64)]
for (i, block) in enumerate(self.blocks):
setattr(self, 'block{0}'.format(i), block)
self.ds = DeepSet(64, 256, 64, 512, n_objs)
def forward(self, x, atom_types):
(n_batch, n_atoms, n_features, bandwidth, _) = x.size()
mask = (atom_types > 0).view(n_batch, n_atoms, 1).float()
x = x.view((n_batch * n_atoms), n_features, bandwidth, bandwidth)
for block in self.blocks:
x = block(x)
x = so3_integrate(x)
y = self.ds(x, mask)
return y
|
class IndexBatcher():
def __init__(self, indices, n_batch, cuda=None):
self.indices = indices.astype(np.int64)
self.n_batch = n_batch
self.pos = 0
self.cuda = cuda
self.internal_indices = np.arange(len(indices)).astype(np.int64)
np.random.shuffle(self.internal_indices)
def __iter__(self):
return self
def reset(self):
self.pos = 0
np.random.shuffle(self.internal_indices)
def __next__(self):
start = self.pos
end = np.minimum((self.pos + self.n_batch), len(self.indices))
self.pos += self.n_batch
if (self.pos >= len(self.indices)):
self.reset()
raise StopIteration
tensor = torch.LongTensor(self.indices[self.internal_indices[start:end]])
if (self.cuda is not None):
tensor.cuda(self.cuda)
return tensor
def num_iterations(self):
return (len(self.indices) // self.n_batch)
next = __next__
|
def to_one_hot(x, n):
x_ = torch.unsqueeze(x, 2)
dims = (*x.size(), n)
one_hot = torch.FloatTensor(*dims).zero_()
one_hot.scatter_(2, x_, 1)
return one_hot
|
def load_data(path, test_strat_id=None, cuda=None):
'\n Loads the data\n\n path: path to the molecule .gz\n batch_size: size of a mini batch\n test_strat_id: id of strat being used as test set\n '
data = joblib.load(path)
type_remap = (- np.ones((int(data['features']['atom_types'].max()) + 1)))
unique_types = np.unique(data['features']['atom_types']).astype(int)
type_remap[unique_types] = np.arange(len(unique_types))
data['features']['atom_types'] = type_remap[data['features']['atom_types'].astype(int)]
data['features']['geometry'] = torch.FloatTensor(data['features']['geometry'].astype(np.float32))
data['features']['atom_types'] = torch.LongTensor(data['features']['atom_types'].astype(np.int64))
data['targets'] = torch.from_numpy(data['targets'])
if (cuda is not None):
data['features']['geometry'].cuda(cuda)
data['features']['atom_types'].cuda(cuda)
data['targets'].cuda(cuda)
train = np.ndarray(0)
test = np.ndarray(0)
if (not test_strat_id):
test_strat_id = np.random.randint(len(data['strats']))
for i in range(len(data['strats'])):
if (i != test_strat_id):
train = np.concatenate((train, data['strats'][i]))
else:
test = np.concatenate((test, data['strats'][i]))
return (data, train, test)
|
def exp_lr_scheduler(optimizer, epoch, init_lr=0.005, lr_decay_epoch=40):
'Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs.'
lr = (init_lr * (0.1 ** (epoch // lr_decay_epoch)))
if ((epoch % lr_decay_epoch) == 0):
print('LR is set to {}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
|
def count_params(model):
return sum([np.prod(p.size()) for p in model.parameters() if p.requires_grad])
|
class Model(nn.Module):
def __init__(self, nclasses):
super().__init__()
self.features = [6, 100, 100, nclasses]
self.bandwidths = [64, 16, 10]
assert (len(self.bandwidths) == (len(self.features) - 1))
sequence = []
grid = s2_equatorial_grid(max_beta=0, n_alpha=(2 * self.bandwidths[0]), n_beta=1)
sequence.append(S2Convolution(self.features[0], self.features[1], self.bandwidths[0], self.bandwidths[1], grid))
for l in range(1, (len(self.features) - 2)):
nfeature_in = self.features[l]
nfeature_out = self.features[(l + 1)]
b_in = self.bandwidths[l]
b_out = self.bandwidths[(l + 1)]
sequence.append(nn.BatchNorm3d(nfeature_in, affine=True))
sequence.append(nn.ReLU())
grid = so3_equatorial_grid(max_beta=0, max_gamma=0, n_alpha=(2 * b_in), n_beta=1, n_gamma=1)
sequence.append(SO3Convolution(nfeature_in, nfeature_out, b_in, b_out, grid))
sequence.append(nn.BatchNorm3d(self.features[(- 2)], affine=True))
sequence.append(nn.ReLU())
self.sequential = nn.Sequential(*sequence)
output_features = self.features[(- 2)]
self.out_layer = nn.Linear(output_features, self.features[(- 1)])
def forward(self, x):
x = self.sequential(x)
x = so3_integrate(x)
x = self.out_layer(x)
return F.log_softmax(x, dim=1)
|
class Model(nn.Module):
def __init__(self, nclasses):
super().__init__()
self.features = [6, 50, 70, 350, nclasses]
self.bandwidths = [128, 32, 22, 7]
assert (len(self.bandwidths) == (len(self.features) - 1))
sequence = []
grid = s2_equatorial_grid(max_beta=0, n_alpha=(2 * self.bandwidths[0]), n_beta=1)
sequence.append(S2Convolution(self.features[0], self.features[1], self.bandwidths[0], self.bandwidths[1], grid))
for l in range(1, (len(self.features) - 2)):
nfeature_in = self.features[l]
nfeature_out = self.features[(l + 1)]
b_in = self.bandwidths[l]
b_out = self.bandwidths[(l + 1)]
sequence.append(nn.BatchNorm3d(nfeature_in, affine=True))
sequence.append(nn.ReLU())
grid = so3_equatorial_grid(max_beta=0, max_gamma=0, n_alpha=(2 * b_in), n_beta=1, n_gamma=1)
sequence.append(SO3Convolution(nfeature_in, nfeature_out, b_in, b_out, grid))
sequence.append(nn.BatchNorm3d(self.features[(- 2)], affine=True))
sequence.append(nn.ReLU())
self.sequential = nn.Sequential(*sequence)
self.out_layer = nn.Sequential(nn.BatchNorm1d(self.features[(- 2)], affine=False), nn.Linear(self.features[(- 2)], self.features[(- 1)]))
def forward(self, x):
x = self.sequential(x)
x = x.view(x.size(0), x.size(1), (- 1)).max((- 1))[0]
x = self.out_layer(x)
return F.log_softmax(x, dim=1)
|
class KeepName():
def __init__(self, transform):
self.transform = transform
def __call__(self, file_name):
return (file_name, self.transform(file_name))
|
def main(log_dir, augmentation, dataset, batch_size, num_workers):
print(check_output(['nodejs', '--version']).decode('utf-8'))
torch.backends.cudnn.benchmark = True
transform = torchvision.transforms.Compose([CacheNPY(prefix='b64_', repeat=augmentation, pick_randomly=False, transform=torchvision.transforms.Compose([ToMesh(random_rotations=True, random_translation=0.1), ProjectOnSphere(bandwidth=64)])), (lambda xs: torch.stack([torch.FloatTensor(x) for x in xs]))])
transform = KeepName(transform)
test_set = Shrec17('data', dataset, perturbed=True, download=True, transform=transform)
loader = importlib.machinery.SourceFileLoader('model', os.path.join(log_dir, 'model.py'))
mod = types.ModuleType(loader.name)
loader.exec_module(mod)
model = mod.Model(55)
model.cuda()
model.load_state_dict(torch.load(os.path.join(log_dir, 'state.pkl')))
resdir = os.path.join(log_dir, (dataset + '_perturbed'))
if os.path.isdir(resdir):
shutil.rmtree(resdir)
os.mkdir(resdir)
predictions = []
ids = []
loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True, drop_last=False)
for (batch_idx, data) in enumerate(loader):
model.eval()
if (dataset != 'test'):
data = data[0]
(file_names, data) = data
(batch_size, rep) = data.size()[:2]
data = data.view((- 1), *data.size()[2:])
data = data.cuda()
with torch.no_grad():
pred = model(data).data
pred = pred.view(batch_size, rep, (- 1))
pred = pred.sum(1)
predictions.append(pred.cpu().numpy())
ids.extend([x.split('/')[(- 1)].split('.')[0] for x in file_names])
print('[{}/{}] '.format(batch_idx, len(loader)))
predictions = np.concatenate(predictions)
predictions_class = np.argmax(predictions, axis=1)
for i in range(len(ids)):
if ((i % 100) == 0):
print('{}/{} '.format(i, len(ids)), end='\r')
idfile = os.path.join(resdir, ids[i])
retrieved = [(predictions[(j, predictions_class[j])], ids[j]) for j in range(len(ids)) if (predictions_class[j] == predictions_class[i])]
retrieved = sorted(retrieved, reverse=True)
retrieved = [i for (_, i) in retrieved]
with open(idfile, 'w') as f:
f.write('\n'.join(retrieved))
url = 'https://shapenet.cs.stanford.edu/shrec17/code/evaluator.zip'
file_path = 'evaluator.zip'
r = requests.get(url, stream=True)
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=(16 * (1024 ** 2))):
if chunk:
f.write(chunk)
f.flush()
zip_ref = zipfile.ZipFile(file_path, 'r')
zip_ref.extractall('.')
zip_ref.close()
print(check_output(['nodejs', 'evaluate.js', (os.path.join('..', log_dir) + '/')], cwd='evaluator').decode('utf-8'))
shutil.copy2(os.path.join('evaluator', (log_dir + '.summary.csv')), os.path.join(log_dir, 'summary.csv'))
|
def main(log_dir, model_path, augmentation, dataset, batch_size, learning_rate, num_workers):
arguments = copy.deepcopy(locals())
os.mkdir(log_dir)
shutil.copy2(__file__, os.path.join(log_dir, 'script.py'))
shutil.copy2(model_path, os.path.join(log_dir, 'model.py'))
logger = logging.getLogger('train')
logger.setLevel(logging.DEBUG)
logger.handlers = []
ch = logging.StreamHandler()
logger.addHandler(ch)
fh = logging.FileHandler(os.path.join(log_dir, 'log.txt'))
logger.addHandler(fh)
logger.info('%s', repr(arguments))
torch.backends.cudnn.benchmark = True
loader = importlib.machinery.SourceFileLoader('model', os.path.join(log_dir, 'model.py'))
mod = types.ModuleType(loader.name)
loader.exec_module(mod)
model = mod.Model(55)
model.cuda()
logger.info('{} paramerters in total'.format(sum((x.numel() for x in model.parameters()))))
logger.info('{} paramerters in the last layer'.format(sum((x.numel() for x in model.out_layer.parameters()))))
bw = model.bandwidths[0]
transform = CacheNPY(prefix='b{}_'.format(bw), repeat=augmentation, transform=torchvision.transforms.Compose([ToMesh(random_rotations=True, random_translation=0.1), ProjectOnSphere(bandwidth=bw)]))
def target_transform(x):
classes = ['02691156', '02747177', '02773838', '02801938', '02808440', '02818832', '02828884', '02843684', '02871439', '02876657', '02880940', '02924116', '02933112', '02942699', '02946921', '02954340', '02958343', '02992529', '03001627', '03046257', '03085013', '03207941', '03211117', '03261776', '03325088', '03337140', '03467517', '03513137', '03593526', '03624134', '03636649', '03642806', '03691459', '03710193', '03759954', '03761084', '03790512', '03797390', '03928116', '03938244', '03948459', '03991062', '04004475', '04074963', '04090263', '04099429', '04225987', '04256520', '04330267', '04379243', '04401088', '04460130', '04468005', '04530566', '04554684']
return classes.index(x[0])
train_set = Shrec17('data', dataset, perturbed=True, download=True, transform=transform, target_transform=target_transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True, drop_last=True)
optimizer = torch.optim.SGD(model.parameters(), lr=0, momentum=0.9)
def train_step(data, target):
model.train()
(data, target) = (data.cuda(), target.cuda())
prediction = model(data)
loss = F.nll_loss(prediction, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
correct = prediction.data.max(1)[1].eq(target.data).long().cpu().sum()
return (loss.item(), correct.item())
def get_learning_rate(epoch):
limits = [100, 200]
lrs = [1, 0.1, 0.01]
assert (len(lrs) == (len(limits) + 1))
for (lim, lr) in zip(limits, lrs):
if (epoch < lim):
return (lr * learning_rate)
return (lrs[(- 1)] * learning_rate)
for epoch in range(300):
lr = get_learning_rate(epoch)
logger.info('learning rate = {} and batch size = {}'.format(lr, train_loader.batch_size))
for p in optimizer.param_groups:
p['lr'] = lr
total_loss = 0
total_correct = 0
time_before_load = time.perf_counter()
for (batch_idx, (data, target)) in enumerate(train_loader):
time_after_load = time.perf_counter()
time_before_step = time.perf_counter()
(loss, correct) = train_step(data, target)
total_loss += loss
total_correct += correct
logger.info('[{}:{}/{}] LOSS={:.2} <LOSS>={:.2} ACC={:.2} <ACC>={:.2} time={:.2}+{:.2}'.format(epoch, batch_idx, len(train_loader), loss, (total_loss / (batch_idx + 1)), (correct / len(data)), ((total_correct / len(data)) / (batch_idx + 1)), (time_after_load - time_before_load), (time.perf_counter() - time_before_step)))
time_before_load = time.perf_counter()
torch.save(model.state_dict(), os.path.join(log_dir, 'state.pkl'))
|
def s2_near_identity_grid(max_beta=(np.pi / 8), n_alpha=8, n_beta=3):
'\n :return: rings around the north pole\n size of the kernel = n_alpha * n_beta\n '
beta = ((np.arange(start=1, stop=(n_beta + 1), dtype=np.float) * max_beta) / n_beta)
alpha = np.linspace(start=0, stop=(2 * np.pi), num=n_alpha, endpoint=False)
(B, A) = np.meshgrid(beta, alpha, indexing='ij')
B = B.flatten()
A = A.flatten()
grid = np.stack((B, A), axis=1)
return tuple((tuple(ba) for ba in grid))
|
def s2_equatorial_grid(max_beta=0, n_alpha=32, n_beta=1):
'\n :return: rings around the equator\n size of the kernel = n_alpha * n_beta\n '
beta = np.linspace(start=((np.pi / 2) - max_beta), stop=((np.pi / 2) + max_beta), num=n_beta, endpoint=True)
alpha = np.linspace(start=0, stop=(2 * np.pi), num=n_alpha, endpoint=False)
(B, A) = np.meshgrid(beta, alpha, indexing='ij')
B = B.flatten()
A = A.flatten()
grid = np.stack((B, A), axis=1)
return tuple((tuple(ba) for ba in grid))
|
def s2_soft_grid(b):
beta = (((np.arange((2 * b)) + 0.5) / (2 * b)) * np.pi)
alpha = np.linspace(start=0, stop=(2 * np.pi), num=(2 * b), endpoint=False)
(B, A) = np.meshgrid(beta, alpha, indexing='ij')
B = B.flatten()
A = A.flatten()
grid = np.stack((B, A), axis=1)
return tuple((tuple(ba) for ba in grid))
|
def s2_mm(x, y):
'\n :param x: [l * m, batch, feature_in, complex]\n :param y: [l * m, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n '
from s2cnn.utils.complex import complex_mm
assert (y.size(3) == 2)
assert (x.size(3) == 2)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert (y.size(1) == nfeature_in)
nspec = x.size(0)
assert (y.size(0) == nspec)
if x.is_cuda:
return _cuda_S2_mm.apply(x, y)
nl = round((nspec ** 0.5))
Fz_list = []
begin = 0
for l in range(nl):
L = ((2 * l) + 1)
size = L
Fx = x[begin:(begin + size)]
Fy = y[begin:(begin + size)]
Fx = Fx.view((L * nbatch), nfeature_in, 2)
Fy = Fy.transpose(0, 1)
Fy = Fy.contiguous()
Fy = Fy.view(nfeature_in, (L * nfeature_out), 2)
Fz = complex_mm(Fx, Fy, conj_y=True)
Fz = Fz.view(L, nbatch, L, nfeature_out, 2)
Fz = Fz.transpose(1, 2)
Fz = Fz.contiguous()
Fz = Fz.view((L * L), nbatch, nfeature_out, 2)
Fz_list.append(Fz)
begin += size
z = torch.cat(Fz_list, 0)
return z
|
class _cuda_S2_mm(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return _cuda_s2_mm(x, y)
@staticmethod
def backward(ctx, gradz):
import s2cnn.utils.cuda as cuda_utils
(x, y) = ctx.saved_tensors
nl = round((x.size(0) ** 0.5))
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
nspec = ((((4 * (nl ** 2)) - 1) * nl) // 3)
device = torch.cuda.current_device()
gradx_cuda_kernel = _setup_s2mm_gradx_cuda_kernel(nbatch=nbatch, nspec=nspec, nl=nl, nfeature_in=nfeature_in, nfeature_out=nfeature_out, device=device)
grady_cuda_kernel = _setup_s2mm_grady_cuda_kernel(nbatch=nbatch, nspec=nspec, nl=nl, nfeature_in=nfeature_in, nfeature_out=nfeature_out, device=device)
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
gradx = grady = None
if ctx.needs_input_grad[0]:
gradx = gradz.new_empty(((nl ** 2), nbatch, nfeature_in, 2))
gradx_cuda_kernel(block=(cuda_utils.CUDA_NUM_THREADS, 1, 1), grid=(cuda_utils.get_blocks((((nl ** 2) * nbatch) * nfeature_in), 1024), 1, 1), args=[gradz.contiguous().data_ptr(), y.contiguous().data_ptr(), gradx.data_ptr()], stream=stream)
if ctx.needs_input_grad[1]:
grady = gradz.new_empty(((nl ** 2), nfeature_in, nfeature_out, 2))
grady_cuda_kernel(block=(cuda_utils.CUDA_NUM_THREADS, 1, 1), grid=(cuda_utils.get_blocks((((nl ** 2) * nfeature_in) * nfeature_out), 1024), 1, 1), args=[gradz.contiguous().data_ptr(), x.contiguous().data_ptr(), grady.data_ptr()], stream=stream)
return (gradx, grady)
|
def _cuda_s2_mm(x, y):
'\n :param x: [l * m, batch, feature_in, complex]\n :param y: [l * m, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n '
import s2cnn.utils.cuda as cuda_utils
assert (x.is_cuda and (x.dtype == torch.float32))
assert (y.is_cuda and (y.dtype == torch.float32))
assert (y.size(3) == 2)
assert (x.size(3) == 2)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert (y.size(1) == nfeature_in)
assert (y.size(0) == x.size(0))
nl = round((x.size(0) ** 0.5))
nspec = ((((4 * (nl ** 2)) - 1) * nl) // 3)
assert (x.size(0) == (nl ** 2))
assert (y.size(0) == (nl ** 2))
device = torch.cuda.current_device()
cuda_kernel = _setup_s2mm_cuda_kernel(nbatch=nbatch, nspec=nspec, nfeature_in=nfeature_in, nfeature_out=nfeature_out, device=device)
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
output = x.new_empty((nspec, nbatch, nfeature_out, 2))
cuda_kernel(block=(cuda_utils.CUDA_NUM_THREADS, 1, 1), grid=(cuda_utils.get_blocks(((nspec * nbatch) * nfeature_out), 1024), 1, 1), args=[x.contiguous().data_ptr(), y.contiguous().data_ptr(), output.data_ptr()], stream=stream)
return output
|
@lru_cache(maxsize=32)
def _setup_s2mm_cuda_kernel(nbatch, nspec, nfeature_in, nfeature_out, device=0):
kernel = Template('\n#define COMPUTE_LMN(s) int l = powf(3.0/4.0 * s, 1.0/3.0) - 0.5; int L = l * (4 * l * l - 1) / 3; int rest = s - L; if (rest >= (2 * l + 1) * (2 * l + 1)) { ++l; L = l * (4 * l * l - 1) / 3; rest = s - L; } int m = rest / (2 * l + 1) - l; int n = rest % (2 * l + 1) - l;\n\n#define EXTRACT(i1, i2, n2, i3, n3) int i1 = index; int i3 = i1 % (n3); i1 /= n3; int i2 = i1 % (n2); i1 /= n2;\n\n#define CONTRACT1(s1, i2, n2, i3, n3) ( ( (l * l + (l + (s1))) * (n2) + (i2) ) * (n3) + (i3) )\n\n#define CONTRACT2(s1, s2, i2, n2, i3, n3) ( ( (L + (l + (s1)) * (2 * l + 1) + (l + (s2))) * (n2) + (i2) ) * (n3) + (i3) )\n\nextern "C"\n__global__ void main_(const float* in_x, const float* in_y, float* out) {\n for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < ${nspec} * ${nbatch} * ${nfeature_out}; index += blockDim.x * gridDim.x) {\n EXTRACT(s, i, ${nbatch}, f_out, ${nfeature_out})\n\n // compute s -> (l,m,n)\n COMPUTE_LMN(s)\n\n float out_re = 0.0;\n float out_im = 0.0;\n\n for (int f_in = 0; f_in < ${nfeature_in}; ++f_in) {\n float x_re = in_x[CONTRACT1(m, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 0];\n float x_im = in_x[CONTRACT1(m, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 1];\n float y_re = in_y[CONTRACT1(n, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 0];\n float y_im = in_y[CONTRACT1(n, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 1];\n\n // x times y conjugate\n out_re += x_re * y_re + x_im * y_im;\n out_im += x_im * y_re - x_re * y_im;\n }\n\n out[index * 2 + 0] = out_re;\n out[index * 2 + 1] = out_im;\n }\n}\n').substitute({'nbatch': nbatch, 'nspec': nspec, 'nfeature_in': nfeature_in, 'nfeature_out': nfeature_out})
import s2cnn.utils.cuda as cuda_utils
return cuda_utils.compile_kernel(kernel, 's2mm.cu', 'main_')
|
@lru_cache(maxsize=32)
def _setup_s2mm_gradx_cuda_kernel(nbatch, nspec, nl, nfeature_in, nfeature_out, device=0):
kernel = Template('\n#define COMPUTE_LM(s) int l = sqrtf(s); int L = (4 * l * l - 1) * l / 3; int m = s - l * l - l;\n\n#define EXTRACT(i1, i2, n2, i3, n3) int i1 = index; int i3 = i1 % (n3); i1 /= n3; int i2 = i1 % (n2); i1 /= n2;\n\n#define CONTRACT1(s1, i2, n2, i3, n3) ( ( (l * l + (l + (s1))) * (n2) + (i2) ) * (n3) + (i3) )\n\n#define CONTRACT2(s1, s2, i2, n2, i3, n3) ( ( (L + (l + (s1)) * (2 * l + 1) + (l + (s2))) * (n2) + (i2) ) * (n3) + (i3) )\n\nextern "C"\n__global__ void main_(const float* grad_z, const float* y, float* grad_x) {\n for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (${nl} * ${nl}) * ${nbatch} * ${nfeature_in}; index += blockDim.x * gridDim.x) {\n EXTRACT(s, i, ${nbatch}, f_in, ${nfeature_in})\n\n // compute s -> (l,m)\n COMPUTE_LM(s)\n\n float out_re = 0.0;\n float out_im = 0.0;\n\n for (int f_out = 0; f_out < ${nfeature_out}; ++f_out) {\n for (int k = -l; k <= l; ++k) {\n float grad_z_re = grad_z[CONTRACT2(m, k, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 0];\n float grad_z_im = grad_z[CONTRACT2(m, k, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 1];\n float y_re = y[CONTRACT1(k, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 0];\n float y_im = y[CONTRACT1(k, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 1];\n\n // grad_z times y\n out_re += grad_z_re * y_re - grad_z_im * y_im;\n out_im += grad_z_re * y_im + grad_z_im * y_re;\n }\n }\n\n grad_x[index * 2 + 0] = out_re;\n grad_x[index * 2 + 1] = out_im;\n }\n}\n').substitute({'nbatch': nbatch, 'nspec': nspec, 'nl': nl, 'nfeature_in': nfeature_in, 'nfeature_out': nfeature_out})
import s2cnn.utils.cuda as cuda_utils
return cuda_utils.compile_kernel(kernel, 's2mm_gradx.cu', 'main_')
|
@lru_cache(maxsize=32)
def _setup_s2mm_grady_cuda_kernel(nbatch, nspec, nl, nfeature_in, nfeature_out, device=0):
kernel = Template('\n#define COMPUTE_LM(s) int l = powf(s, 0.5); int L = (4 * l * l - 1) * l / 3; int m = s - l * l - l;\n\n#define EXTRACT(i1, i2, n2, i3, n3) int i1 = index; int i3 = i1 % (n3); i1 /= n3; int i2 = i1 % (n2); i1 /= n2;\n\n#define CONTRACT1(s1, i2, n2, i3, n3) ( ( (l * l + (l + (s1))) * (n2) + (i2) ) * (n3) + (i3) )\n\n#define CONTRACT2(s1, s2, i2, n2, i3, n3) ( ( (L + (l + (s1)) * (2 * l + 1) + (l + (s2))) * (n2) + (i2) ) * (n3) + (i3) )\n\nextern "C"\n__global__ void main_(const float* grad_z, const float* x, float* grad_y) {\n for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (${nl} * ${nl}) * ${nfeature_in} * ${nfeature_out}; index += blockDim.x * gridDim.x) {\n EXTRACT(s, f_in, ${nfeature_in}, f_out, ${nfeature_out})\n\n // compute s -> (l,m)\n COMPUTE_LM(s)\n\n float out_re = 0.0;\n float out_im = 0.0;\n\n for (int i = 0; i < ${nbatch}; ++i) {\n for (int k = -l; k <= l; ++k) {\n float grad_z_re = grad_z[CONTRACT2(k, m, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 0];\n float grad_z_im = grad_z[CONTRACT2(k, m, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 1];\n float x_re = x[CONTRACT1(k, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 0];\n float x_im = x[CONTRACT1(k, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 1];\n\n // conjugate grad_z times x\n out_re += grad_z_re * x_re + grad_z_im * x_im;\n out_im += grad_z_re * x_im - grad_z_im * x_re;\n }\n }\n\n grad_y[index * 2 + 0] = out_re;\n grad_y[index * 2 + 1] = out_im;\n }\n}\n').substitute({'nbatch': nbatch, 'nspec': nspec, 'nl': nl, 'nfeature_in': nfeature_in, 'nfeature_out': nfeature_out})
import s2cnn.utils.cuda as cuda_utils
return cuda_utils.compile_kernel(kernel, 's2mm_grady.cu', 'main_')
|
def test_compare_cuda_cpu():
x = torch.rand((((1 + 3) + 5) + 7), 2, 3, 2)
y = torch.rand((((1 + 3) + 5) + 7), 3, 5, 2)
z1 = s2_mm(x, y)
z2 = s2_mm(x.cuda(), y.cuda()).cpu()
q = ((z1 - z2).abs().max().item() / z1.std().item())
print(q)
assert (q < 0.0001)
|
def so3_rft(x, b, grid):
'\n Real Fourier Transform\n :param x: [..., beta_alpha_gamma]\n :param b: output bandwidth signal\n :param grid: tuple of (beta, alpha, gamma) tuples\n :return: [l * m * n, ..., complex]\n '
F = _setup_so3_ft(b, grid, device_type=x.device.type, device_index=x.device.index)
assert (x.size((- 1)) == F.size(0))
sz = x.size()
x = torch.einsum('ia,afc->fic', (x.view((- 1), x.size((- 1))), F.clone()))
x = x.view((- 1), *sz[:(- 1)], 2)
return x
|
@cached_dirpklgz('cache/setup_so3_ft')
def __setup_so3_ft(b, grid):
from lie_learn.representations.SO3.wigner_d import wigner_D_matrix
n_spatial = len(grid)
n_spectral = np.sum([(((2 * l) + 1) ** 2) for l in range(b)])
F = np.zeros((n_spatial, n_spectral), dtype=complex)
for (i, (beta, alpha, gamma)) in enumerate(grid):
Dmats = [wigner_D_matrix(l, alpha, beta, gamma, field='complex', normalization='quantum', order='centered', condon_shortley='cs').conj() for l in range(b)]
F[i] = np.hstack([Dl.flatten() for Dl in Dmats])
F = F.view('float').reshape(((- 1), n_spectral, 2))
return F
|
@lru_cache(maxsize=32)
def _setup_so3_ft(b, grid, device_type, device_index):
F = __setup_so3_ft(b, grid)
F = torch.tensor(F.astype(np.float32), dtype=torch.float32, device=torch.device(device_type, device_index))
return F
|
def so3_mm(x, y):
'\n :param x: [l * m * n, batch, feature_in, complex]\n :param y: [l * m * n, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n '
from s2cnn.utils.complex import complex_mm
import math
assert (y.size(3) == 2)
assert (x.size(3) == 2)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert (y.size(1) == nfeature_in)
nspec = x.size(0)
assert (y.size(0) == nspec)
nl = math.ceil((((3 / 4) * nspec) ** (1 / 3)))
assert (nspec == ((nl * ((4 * (nl ** 2)) - 1)) // 3))
if x.is_cuda:
return _cuda_SO3_mm.apply(x, y)
Fz_list = []
begin = 0
for l in range(nl):
L = ((2 * l) + 1)
size = (L ** 2)
Fx = x[begin:(begin + size)]
Fy = y[begin:(begin + size)]
Fx = Fx.view(L, L, nbatch, nfeature_in, 2)
Fx = Fx.transpose(0, 1)
Fx = Fx.transpose(0, 2)
Fx = Fx.transpose(2, 3)
Fx = Fx.contiguous()
Fx = Fx.view((nbatch * L), (nfeature_in * L), 2)
Fy = Fy.view(L, L, nfeature_in, nfeature_out, 2)
Fy = Fy.transpose(0, 2)
Fy = Fy.contiguous()
Fy = Fy.view((nfeature_in * L), (L * nfeature_out), 2)
Fz = complex_mm(Fx, Fy, conj_y=True)
Fz = Fz.view(nbatch, (L * L), nfeature_out, 2)
Fz = Fz.transpose(0, 1)
Fz_list.append(Fz)
begin += size
z = torch.cat(Fz_list, 0)
return z
|
class _cuda_SO3_mm(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
'\n :param x: [l * m * n, batch, feature_in, complex]\n :param y: [l * m * n, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n '
assert (x.is_cuda and (x.dtype == torch.float32))
assert (y.is_cuda and (y.dtype == torch.float32))
assert (y.size(3) == 2)
assert (x.size(3) == 2)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
assert (y.size(1) == nfeature_in)
nspec = x.size(0)
assert (y.size(0) == nspec)
nl = round((((3 / 4) * nspec) ** (1 / 3)))
assert (nspec == ((nl * ((4 * (nl ** 2)) - 1)) // 3))
ctx.save_for_backward(x, y)
device = torch.cuda.current_device()
cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nbatch, nj=nfeature_out, nk=nfeature_in, conj_y=True, trans_y_spec=True, device=device)
output = x.new_empty((nspec, nbatch, nfeature_out, 2))
cuda_kernel(x, y, output)
return output
@staticmethod
def backward(ctx, gradz):
(x, y) = ctx.saved_tensors
nspec = x.size(0)
nbatch = x.size(1)
nfeature_in = x.size(2)
nfeature_out = y.size(2)
nl = round((((3 / 4) * nspec) ** (1 / 3)))
assert (nspec == ((nl * ((4 * (nl ** 2)) - 1)) // 3))
gradx = grady = None
device = torch.cuda.current_device()
if ctx.needs_input_grad[0]:
gradx_cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nbatch, nj=nfeature_in, nk=nfeature_out, trans_y_feature=True, device=device)
gradx = gradz.new_empty((nspec, nbatch, nfeature_in, 2))
gradx_cuda_kernel(gradz, y, gradx)
if ctx.needs_input_grad[1]:
grady_cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nfeature_out, nj=nfeature_in, nk=nbatch, trans_out_feature=True, conj_x=True, trans_x_spec=True, trans_x_feature=True, device=device)
grady = gradz.new_empty((nspec, nfeature_in, nfeature_out, 2))
grady_cuda_kernel(gradz, x, grady)
return (gradx, grady)
|
@lru_cache(maxsize=32)
def _setup_so3mm_cuda_kernel(nl, ni, nj, nk, conj_x=False, conj_y=False, trans_x_spec=False, trans_x_feature=False, trans_y_spec=False, trans_y_feature=False, trans_out_feature=False, device=0):
'\n return a function that computes\n out[l*m*n, i, j] = sum_k sum_p x[l*m*p, i, k] y[l*p*n, k, j]\n where out, x, y are complex valued\n\n if conj_x is set to True, x is conjugated\n if conj_y is set to True, y is conjugated\n if trans_x_spec is set to True m and p are permuted in x[...]\n if trans_y_spec is set to True p and n are permuted in y[...]\n if trans_x_feature is set to True i and k are permuted in x[...]\n if trans_y_feature is set to True k and j are permuted in y[...]\n if trans_out_feature is set to True i and j are permuted in out[...]\n '
kernel = '\n#define NI {}\n#define NJ {}\n#define NK {}\n'.format(ni, nj, nk)
if ((not trans_x_spec) and (not trans_x_feature)):
kernel += '#define INDEX_X (((L0 + m * L + p) * NI + i) * NK + k)\n'
if ((not trans_x_spec) and trans_x_feature):
kernel += '#define INDEX_X (((L0 + m * L + p) * NK + k) * NI + i)\n'
if (trans_x_spec and (not trans_x_feature)):
kernel += '#define INDEX_X (((L0 + p * L + m) * NI + i) * NK + k)\n'
if (trans_x_spec and trans_x_feature):
kernel += '#define INDEX_X (((L0 + p * L + m) * NK + k) * NI + i)\n'
if ((not trans_y_spec) and (not trans_y_feature)):
kernel += '#define INDEX_Y (((L0 + p * L + n) * NK + k) * NJ + j)\n'
if ((not trans_y_spec) and trans_y_feature):
kernel += '#define INDEX_Y (((L0 + p * L + n) * NJ + j) * NK + k)\n'
if (trans_y_spec and (not trans_y_feature)):
kernel += '#define INDEX_Y (((L0 + n * L + p) * NK + k) * NJ + j)\n'
if (trans_y_spec and trans_y_feature):
kernel += '#define INDEX_Y (((L0 + n * L + p) * NJ + j) * NK + k)\n'
if (not trans_out_feature):
kernel += '#define INDEX_OUT (((L0 + m * L + n) * NI + i) * NJ + j)\n'
if trans_out_feature:
kernel += '#define INDEX_OUT (((L0 + m * L + n) * NJ + j) * NI + i)\n'
kernel += '\n#define CONJ_X {}\n#define CONJ_Y {}\n'.format(('x_im = -x_im;' if conj_x else ';'), ('y_im = -y_im;' if conj_y else ';'))
kernel += '\n#define CEIL_DIV(x, y) (((x) + (y) - 1) / (y))\n\nextern "C"\n__global__ void main_(const float* in_x, const float* in_y, float* out)\n{\n // start of thread independant code\n int l = blockIdx.z;\n int L = 2 * l + 1;\n int L0 = (4 * l*l - 1) * l / 3;\n\n if (blockIdx.y * 32 >= L * NI || blockIdx.x * 32 >= L * NJ) {\n return;\n }\n\n int ntile = CEIL_DIV(L * NK, 32);\n // end of thread independant code\n\n int mi = blockIdx.y * 32 + threadIdx.y;\n int m = mi / NI;\n int i = mi % NI;\n int nj = blockIdx.x * 32 + threadIdx.x;\n int n = nj / NJ;\n int j = nj % NJ;\n\n float sum_re = 0.0;\n float sum_im = 0.0;\n\n for (int tile = 0; tile < ntile; ++tile) {\n __shared__ float tileX[2][32][32];\n __shared__ float tileY[2][32][32];\n\n int pk = tile * 32 + threadIdx.x;\n int p = pk / NK;\n int k = pk % NK;\n int index = INDEX_X * 2;\n tileX[0][threadIdx.y][threadIdx.x] = m < L && p < L ? in_x[index + 0] : 0.0;\n tileX[1][threadIdx.y][threadIdx.x] = m < L && p < L ? in_x[index + 1] : 0.0;\n\n pk = tile * 32 + threadIdx.y;\n p = pk / NK;\n k = pk % NK;\n index = INDEX_Y * 2;\n tileY[0][threadIdx.y][threadIdx.x] = p < L && n < L ? in_y[index + 0] : 0.0;\n tileY[1][threadIdx.y][threadIdx.x] = p < L && n < L ? in_y[index + 1] : 0.0;\n\n __syncthreads();\n\n for (int any = 0; any < 32; ++any) {\n float x_re = tileX[0][threadIdx.y][any];\n float x_im = tileX[1][threadIdx.y][any];\n float y_re = tileY[0][any][threadIdx.x];\n float y_im = tileY[1][any][threadIdx.x];\n\n CONJ_X\n CONJ_Y\n\n sum_re += x_re * y_re - x_im * y_im;\n sum_im += x_re * y_im + x_im * y_re;\n }\n\n __syncthreads();\n }\n\n if (m < L && n < L) {\n int index = INDEX_OUT * 2;\n out[index + 0] = sum_re;\n out[index + 1] = sum_im;\n }\n}\n'
import s2cnn.utils.cuda as cuda_utils
kernel = cuda_utils.compile_kernel(kernel, 'so3_mm.cu', 'main_')
stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream)
def fun(x, y, output):
assert output.is_contiguous()
kernel(block=(32, 32, 1), grid=(math.ceil(((((2 * nl) - 1) * nj) / 32)), math.ceil(((((2 * nl) - 1) * ni) / 32)), nl), args=[x.contiguous().data_ptr(), y.contiguous().data_ptr(), output.data_ptr()], stream=stream)
return fun
|
def test_compare_cuda_cpu():
x = torch.rand((((1 + 9) + 25) + 49), 2, 3, 2)
y = torch.rand((((1 + 9) + 25) + 49), 3, 5, 2)
z1 = so3_mm(x, y)
z2 = so3_mm(x.cuda(), y.cuda()).cpu()
q = ((z1 - z2).abs().max().item() / z1.std().item())
print(q)
assert (q < 0.0001)
|
class S2Convolution(Module):
def __init__(self, nfeature_in, nfeature_out, b_in, b_out, grid):
"\n :param nfeature_in: number of input fearures\n :param nfeature_out: number of output features\n :param b_in: input bandwidth (precision of the input SOFT grid)\n :param b_out: output bandwidth\n :param grid: points of the sphere defining the kernel, tuple of (alpha, beta)'s\n "
super(S2Convolution, self).__init__()
self.nfeature_in = nfeature_in
self.nfeature_out = nfeature_out
self.b_in = b_in
self.b_out = b_out
self.grid = grid
self.kernel = Parameter(torch.empty(nfeature_in, nfeature_out, len(grid)).uniform_((- 1), 1))
self.scaling = (1.0 / math.sqrt((((len(self.grid) * self.nfeature_in) * (self.b_out ** 4.0)) / (self.b_in ** 2.0))))
self.bias = Parameter(torch.zeros(1, nfeature_out, 1, 1, 1))
def forward(self, x):
'\n :x: [batch, feature_in, beta, alpha]\n :return: [batch, feature_out, beta, alpha, gamma]\n '
assert (x.size(1) == self.nfeature_in)
assert (x.size(2) == (2 * self.b_in))
assert (x.size(3) == (2 * self.b_in))
x = S2_fft_real.apply(x, self.b_out)
y = s2_rft((self.kernel * self.scaling), self.b_out, self.grid)
z = s2_mm(x, y)
z = SO3_ifft_real.apply(z)
z = (z + self.bias)
return z
|
class SO3Convolution(Module):
def __init__(self, nfeature_in, nfeature_out, b_in, b_out, grid):
"\n :param nfeature_in: number of input fearures\n :param nfeature_out: number of output features\n :param b_in: input bandwidth (precision of the input SOFT grid)\n :param b_out: output bandwidth\n :param grid: points of the SO(3) group defining the kernel, tuple of (alpha, beta, gamma)'s\n "
super(SO3Convolution, self).__init__()
self.nfeature_in = nfeature_in
self.nfeature_out = nfeature_out
self.b_in = b_in
self.b_out = b_out
self.grid = grid
self.kernel = Parameter(torch.empty(nfeature_in, nfeature_out, len(grid)).uniform_((- 1), 1))
self.bias = Parameter(torch.zeros(1, nfeature_out, 1, 1, 1))
self.scaling = (1.0 / math.sqrt((((len(self.grid) * self.nfeature_in) * (self.b_out ** 3.0)) / (self.b_in ** 3.0))))
def forward(self, x):
'\n :x: [batch, feature_in, beta, alpha, gamma]\n :return: [batch, feature_out, beta, alpha, gamma]\n '
assert (x.size(1) == self.nfeature_in)
assert (x.size(2) == (2 * self.b_in))
assert (x.size(3) == (2 * self.b_in))
assert (x.size(4) == (2 * self.b_in))
x = SO3_fft_real.apply(x, self.b_out)
y = so3_rft((self.kernel * self.scaling), self.b_out, self.grid)
assert (x.size(0) == y.size(0))
assert (x.size(2) == y.size(1))
z = so3_mm(x, y)
assert (z.size(0) == x.size(0))
assert (z.size(1) == x.size(1))
assert (z.size(2) == y.size(2))
z = SO3_ifft_real.apply(z)
z = (z + self.bias)
return z
|
class SO3Shortcut(Module):
'\n Useful for ResNet\n '
def __init__(self, nfeature_in, nfeature_out, b_in, b_out):
super(SO3Shortcut, self).__init__()
assert (b_out <= b_in)
if ((nfeature_in != nfeature_out) or (b_in != b_out)):
self.conv = SO3Convolution(nfeature_in=nfeature_in, nfeature_out=nfeature_out, b_in=b_in, b_out=b_out, grid=((0, 0, 0),))
else:
self.conv = None
def forward(self, x):
'\n :x: [batch, feature_in, beta, alpha, gamma]\n :return: [batch, feature_out, beta, alpha, gamma]\n '
if (self.conv is not None):
return self.conv(x)
else:
return x
|
def so3_integrate(x):
'\n Integrate a signal on SO(3) using the Haar measure\n \n :param x: [..., beta, alpha, gamma] (..., 2b, 2b, 2b)\n :return y: [...] (...)\n '
assert (x.size((- 1)) == x.size((- 2)))
assert (x.size((- 2)) == x.size((- 3)))
b = (x.size((- 1)) // 2)
w = _setup_so3_integrate(b, device_type=x.device.type, device_index=x.device.index)
x = torch.sum(x, dim=(- 1)).squeeze((- 1))
x = torch.sum(x, dim=(- 1)).squeeze((- 1))
sz = x.size()
x = x.view((- 1), (2 * b))
w = w.view((2 * b), 1)
x = torch.mm(x, w).squeeze((- 1))
x = x.view(*sz[:(- 1)])
return x
|
@lru_cache(maxsize=32)
@show_running
def _setup_so3_integrate(b, device_type, device_index):
import lie_learn.spaces.S3 as S3
return torch.tensor(S3.quadrature_weights(b), dtype=torch.float32, device=torch.device(device_type, device_index))
|
def so3_rotation(x, alpha, beta, gamma):
'\n :param x: [..., beta, alpha, gamma] (..., 2b, 2b, 2b)\n '
b = (x.size()[(- 1)] // 2)
x_size = x.size()
Us = _setup_so3_rotation(b, alpha, beta, gamma, device_type=x.device.type, device_index=x.device.index)
x = SO3_fft_real.apply(x)
Fz_list = []
begin = 0
for l in range(b):
L = ((2 * l) + 1)
size = (L ** 2)
Fx = x[begin:(begin + size)]
Fx = Fx.view(L, (- 1), 2)
U = Us[l].view(L, L, 2)
Fz = complex_mm(U, Fx, conj_x=True)
Fz = Fz.view(size, (- 1), 2)
Fz_list.append(Fz)
begin += size
Fz = torch.cat(Fz_list, 0)
z = SO3_ifft_real.apply(Fz)
z = z.contiguous()
z = z.view(*x_size)
return z
|
@cached_dirpklgz('cache/setup_so3_rotation')
def __setup_so3_rotation(b, alpha, beta, gamma):
from lie_learn.representations.SO3.wigner_d import wigner_D_matrix
Us = [wigner_D_matrix(l, alpha, beta, gamma, field='complex', normalization='quantum', order='centered', condon_shortley='cs') for l in range(b)]
Us = [Us[l].astype(np.complex64).view(np.float32).reshape((((2 * l) + 1), ((2 * l) + 1), 2)) for l in range(b)]
return Us
|
@lru_cache(maxsize=32)
def _setup_so3_rotation(b, alpha, beta, gamma, device_type, device_index):
Us = __setup_so3_rotation(b, alpha, beta, gamma)
Us = [torch.tensor(U, dtype=torch.float32, device=torch.device(device_type, device_index)) for U in Us]
return Us
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.