code stringlengths 17 6.64M |
|---|
def load_gan(gan_path, n_gan_images):
return get_gan_data(n_gan_images)
|
def get_gan_data(n_gan_images):
images = dict()
labels = []
for i in range(N_CLUSTER):
f = open(os.path.join(dataset.cluster_path(), ('gan%s.list' % i)), 'r')
tmp_labels = np.zeros(shape=dataset.n_classe(), dtype=np.int32)
for line in f:
lbl = line.strip()
tmp_labels[int(lbl)] = 1
f.close()
labels.append(tmp_labels)
labels = np.array(labels)
n_gan = int(np.floor(((n_gan_images / N_CLUSTER) + 1)))
data_list = None
for i in range(N_CLUSTER):
gan_list = glob.glob(os.path.join(dataset.gan_path(), ('gan_%s*.jpg' % i)))
gan_list = gan_list[:n_gan]
if (data_list is None):
data_list = gan_list
else:
data_list = np.concatenate((data_list, gan_list), axis=0)
data_list = np.unique(data_list)
np.random.shuffle(data_list)
data_list = data_list[:n_gan_images]
assert (len(data_list) == n_gan_images)
for (i, filename) in enumerate(data_list):
img_name = os.path.basename(filename)
lbl = int(img_name.split('_')[1])
try:
images[str(lbl)].append(img_name)
except KeyError:
images[str(lbl)] = list()
images[str(lbl)].append(img_name)
labels = np.array(labels)
assert (np.sum([len(images[i]) for i in images]) == n_gan_images)
print(images)
assert (len(images) == labels.shape[0])
assert (labels.shape[1] == dataset.n_classe())
return (images, labels)
|
def copyfolder(src, dst):
files = os.listdir(src)
if (not os.path.isdir(dst)):
os.mkdir(dst)
for tt in files:
copyfile(((src + '/') + tt), ((dst + '/') + tt))
|
class dcganDataset(Dataset):
def __init__(self, root, transform=None, targte_transform=None):
super(dcganDataset, self).__init__()
self.image_dir = os.path.join(opt.data_dir, root)
self.samples = []
self.img_label = []
self.img_flag = []
self.transform = transform
self.targte_transform = targte_transform
self.train_val = root
if (root == 'train_new'):
for folder in os.listdir(self.image_dir):
fdir = ((self.image_dir + '/') + folder)
if (folder == 'gen_0000'):
(samples, img_labels, flags) = generated_images
self.samples = (self.samples + samples)
self.img_label = (self.img_label + img_labels)
self.img_flag = (self.img_flag + flags)
else:
for files in os.listdir(fdir):
temp = ((folder + '_') + files)
lbl = int(folder)
label_vec = np.zeros(shape=n_classes)
label_vec[lbl] = 1
self.img_label.append(label_vec)
self.img_flag.append(0)
self.samples.append(temp)
else:
for folder in os.listdir(self.image_dir):
fdir = ((self.image_dir + '/') + folder)
for files in os.listdir(fdir):
temp = ((folder + '_') + files)
lbl = int(folder)
label_vec = np.zeros(shape=n_classes)
label_vec[lbl] = 1
self.img_label.append(label_vec)
self.img_flag.append(0)
self.samples.append(temp)
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
temp = self.samples[idx]
if (self.img_flag[idx] == 1):
foldername = 'gen_0000'
filename = temp[9:]
else:
foldername = temp[:4]
filename = temp[5:]
img = default_loader(((((self.image_dir + '/') + foldername) + '/') + filename))
if (self.train_val == 'train_new'):
result = {'img': data_transforms['train'](img), 'label': self.img_label[idx], 'flag': self.img_flag[idx]}
else:
result = {'img': data_transforms['val'](img), 'label': self.img_label[idx], 'flag': self.img_flag[idx]}
return result
|
class SLSloss(nn.Module):
def __init__(self):
super(SLSloss, self).__init__()
def forward(self, input, target, flg):
if (input.dim() > 2):
input = input.view(input.size(0), input.size(1), (- 1))
input = input.transpose(1, 2)
input = input.contiguous().view((- 1), input.size(2))
(maxRow, _) = torch.max(input.data, 1)
maxRow = maxRow.unsqueeze(1)
input.data = (input.data - maxRow)
flg = flg.view((- 1), 1)
flos = F.log_softmax(input)
flos = (torch.sum(flos, 1) / flos.size(1))
logpt = F.log_softmax(input)
logpt = torch.mul(logpt, target)
logpt = torch.sum(logpt, 1, True)
logpt = logpt.view((- 1))
flg = flg.view((- 1))
flg = flg.type(torch.cuda.FloatTensor)
loss = ((((- 1) * logpt) * (1 - flg)) - (flos * flg))
return loss.mean()
|
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = model.state_dict()
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, (num_epochs - 1)))
print(('-' * 10))
for phase in ['train', 'val']:
if (phase == 'train'):
scheduler.step()
model.train(True)
else:
model.train(False)
running_loss = 0.0
running_corrects = 0
for data in dataloaders[phase]:
inputs = data['img']
labels = data['label']
flags = data['flag']
labels = labels.type(torch.cuda.FloatTensor)
if use_gpu:
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
flags = Variable(flags.cuda())
else:
(inputs, labels, flags) = (Variable(inputs), Variable(labels), Variable(flags))
optimizer.zero_grad()
outputs = model(inputs)
(_, preds) = torch.max(outputs.data, 1)
loss = criterion(outputs, labels, flags)
if (phase == 'train'):
loss.backward()
optimizer.step()
running_loss += loss.item()
print('Loss {} '.format(loss.item()))
for temp in range(flags.size()[0]):
if (flags.data[temp] == 1):
preds[temp] = (- 1)
indices = torch.argmax(labels, dim=1)
running_corrects += torch.sum((preds == indices.data))
epoch_loss = (running_loss / dataset_sizes[phase])
if (phase == 'train'):
epoch_acc = (running_corrects / (dataset_sizes[phase] - generated_image_size))
else:
epoch_acc = (running_corrects / dataset_sizes[phase])
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
y_loss[phase].append(epoch_loss)
y_err[phase].append((1.0 - epoch_acc))
if (phase == 'val'):
if (epoch_acc > best_acc):
best_acc = epoch_acc
best_model_wts = model.state_dict()
if (epoch >= 40):
save_network(model, epoch)
print()
time_elapsed = (time.time() - since)
print('Training complete in {:.0f}m {:.0f}s'.format((time_elapsed // 60), (time_elapsed % 60)))
print('Best val Acc: {:4f}'.format(best_acc))
model.load_state_dict(best_model_wts)
save_network(model, 'best')
return model
|
def save_network(network, epoch_label):
save_filename = ('net_%s.pth' % epoch_label)
save_path = os.path.join('./model', name, save_filename)
torch.save(network.cpu().state_dict(), save_path)
if torch.cuda.is_available:
network.cuda(gpu_ids[0])
|
def load_network(network):
save_path = os.path.join('./model', name, ('net_%s.pth' % opt.which_epoch))
network.load_state_dict(torch.load(save_path))
return network
|
def fliplr(img):
'flip horizontal'
inv_idx = torch.arange((img.size(3) - 1), (- 1), (- 1)).long()
img_flip = img.index_select(3, inv_idx)
return img_flip
|
def extract_feature(model, dataloaders):
features = torch.FloatTensor()
count = 0
for data in dataloaders:
(img, label) = data
(n, c, h, w) = img.size()
count += n
print(count)
if opt.use_dense:
ff = torch.FloatTensor(n, 1024).zero_()
else:
ff = torch.FloatTensor(n, 2048).zero_()
if opt.PCB:
ff = torch.FloatTensor(n, 2048, 6).zero_()
for i in range(2):
if (i == 1):
img = fliplr(img)
input_img = Variable(img.cuda())
outputs = model(input_img)
f = outputs.data.cpu()
ff = (ff + f)
if opt.PCB:
fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
ff = ff.div(fnorm.expand_as(ff))
ff = ff.view(ff.size(0), (- 1))
else:
fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
ff = ff.div(fnorm.expand_as(ff))
features = torch.cat((features, ff), 0)
return features
|
def get_id(img_path):
camera_id = []
labels = []
for (path, v) in img_path:
filename = path.split('/')[(- 1)]
label = filename[0:4]
camera = filename.split('c')[1]
if (label[0:2] == '-1'):
labels.append((- 1))
else:
labels.append(int(label))
camera_id.append(int(camera[0]))
return (camera_id, labels)
|
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
batch_time = AverageMeter()
model.eval()
with torch.no_grad():
(qf, q_pids, q_camids) = ([], [], [])
for (batch_idx, (imgs, pids, camids)) in enumerate(queryloader):
if use_gpu:
imgs = imgs.cuda()
end = time.time()
features = model(imgs)
batch_time.update((time.time() - end))
features = features.data.cpu()
qf.append(features)
q_pids.extend(pids)
q_camids.extend(camids)
qf = torch.cat(qf, 0)
q_pids = np.asarray(q_pids)
q_camids = np.asarray(q_camids)
print('Extracted features for query set, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))
(gf, g_pids, g_camids) = ([], [], [])
end = time.time()
for (batch_idx, (imgs, pids, camids)) in enumerate(galleryloader):
if use_gpu:
imgs = imgs.cuda()
end = time.time()
features = model(imgs)
batch_time.update((time.time() - end))
features = features.data.cpu()
gf.append(features)
g_pids.extend(pids)
g_camids.extend(camids)
gf = torch.cat(gf, 0)
g_pids = np.asarray(g_pids)
g_camids = np.asarray(g_camids)
print('Extracted features for gallery set, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))
print('==> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(batch_time.avg, 32))
(m, n) = (qf.size(0), gf.size(0))
distmat = (torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t())
distmat.addmm_(1, (- 2), qf, gf.t())
distmat = distmat.numpy()
result = {'distmat': distmat, 'q_pids': q_pids, 'g_pids': g_pids, 'q_camids': q_camids, 'g_camids': g_camids, 'query_feature': qf.numpy(), 'gallery_feature': gf.numpy()}
print(qf.numpy())
print(gf.numpy())
scipy.io.savemat('./result.mat', result)
|
def load_network(network):
save_path = os.path.join(opt.model_path)
network.load_state_dict(torch.load(save_path))
return network
|
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = model.state_dict()
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, (num_epochs - 1)))
print(('-' * 10))
for phase in ['train', 'val']:
if (phase == 'train'):
scheduler.step()
model.train(True)
else:
model.train(False)
running_loss = 0.0
running_corrects = 0
for data in dataloaders[phase]:
(inputs, labels) = data
if use_gpu:
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
else:
(inputs, labels) = (Variable(inputs), Variable(labels))
optimizer.zero_grad()
outputs = model(inputs)
(_, preds) = torch.max(outputs.data, 1)
loss = criterion(outputs, labels)
if (phase == 'train'):
loss.backward()
optimizer.step()
running_loss += loss.item()
print('Current Loss {}'.format(loss.item()))
running_corrects += torch.sum((preds == labels.data))
epoch_loss = (running_loss / dataset_sizes[phase])
epoch_acc = (running_corrects / dataset_sizes[phase])
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
y_loss[phase].append(epoch_loss)
y_err[phase].append((1.0 - epoch_acc))
if (phase == 'val'):
last_model_wts = model.state_dict()
save_network(model, epoch)
print()
time_elapsed = (time.time() - since)
print('Training complete in {:.0f}m {:.0f}s'.format((time_elapsed // 60), (time_elapsed % 60)))
model.load_state_dict(last_model_wts)
save_network(model, 'last')
return model
|
def save_network(network, epoch_label):
save_filename = ('net_%s.pth' % epoch_label)
save_path = os.path.join('./model', name, save_filename)
torch.save(network.cpu().state_dict(), save_path)
if torch.cuda.is_available:
network.cuda(gpu_ids[0])
|
def get_gan_data(generated_size, n_clusters=3, generated_dir=None):
assert (generated_dir is not None)
labels = []
for i in range(n_clusters):
f = open(os.path.join('/home/paul/clustering', ('gan%s.list' % i)), 'r')
tmp_labels = np.zeros(shape=n_classes, dtype=np.float)
for line in f:
lbl = line.strip()
tmp_labels[int(lbl)] = 1.0
f.close()
tmp_labels = (tmp_labels / np.sum(tmp_labels))
labels.append(tmp_labels)
labels = np.array(labels)
n_gan = int(np.floor(((generated_size / n_clusters) + 1)))
data_list = None
for i in range(n_clusters):
gan_list = glob.glob(os.path.join(generated_dir, ('gan_%s*.jpg' % i)))
gan_list = gan_list[:n_gan]
if (data_list is None):
data_list = gan_list
else:
data_list = np.concatenate((data_list, gan_list), axis=0)
data_list = np.unique(data_list)
np.random.shuffle(data_list)
assert (data_list.shape[0] >= generated_size)
data_list = data_list[:generated_size]
img_labels = []
images = []
flags = []
for (i, filename) in enumerate(data_list):
img_name = os.path.basename(filename)
lbl = int(img_name.split('_')[1])
img_labels.append(labels[lbl])
temp = (('gen_0000' + '_') + img_name)
images.append(temp)
flags.append(1)
assert (len(images) == generated_size)
assert (len(images) == len(img_labels) == len(flags))
return (images, img_labels, flags)
|
class AverageMeter(object):
'Computes and stores the average and current value.\n\n Code imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262\n '
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
def read_json(fpath):
with open(fpath, 'r') as f:
obj = json.load(f)
return obj
|
def mkdir_if_missing(directory):
if (not os.path.exists(directory)):
os.makedirs(directory)
|
def write_json(obj, fpath):
mkdir_if_missing(os.path.dirname(fpath))
with open(fpath, 'w') as f:
json.dump(obj, f, indent=4, separators=(',', ': '))
|
def Conv(incoming, num_filters, filter_size=3, stride=(1, 1), pad='same', W=lasagne.init.HeNormal(), b=None, nonlinearity=lasagne.nonlinearities.rectify, **kwargs):
'\n Overrides the default parameters for ConvLayer\n '
ensure_set_name('conv', kwargs)
return ConvLayer(incoming, num_filters, filter_size, stride, pad, W=W, b=b, nonlinearity=nonlinearity, **kwargs)
|
class ConvPrelu(Layer):
def __init__(self, incoming, num_filters, filter_size=3, stride=(1, 1), pad='same', W=lasagne.init.HeNormal(), b=None, **kwargs):
ensure_set_name('conv_prelu', kwargs)
super(ConvPrelu, self).__init__(incoming, **kwargs)
self.conv = Conv(incoming, num_filters, filter_size, stride, pad=pad, W=W, b=b, nonlinearity=None, **kwargs)
self.prelu = prelu(self.conv, **kwargs)
self.params = self.conv.params.copy()
self.params.update(self.prelu.params)
def get_output_for(self, input, **kwargs):
out_conv = self.conv.get_output_for(input)
out_prelu = self.prelu.get_output_for(out_conv)
return out_prelu
def get_output_shape_for(self, input, **kwargs):
return self.conv.get_output_shape_for(input)
|
class ConvAggr(Layer):
def __init__(self, incoming, num_channels, filter_size=3, stride=(1, 1), pad='same', W=lasagne.init.HeNormal(), b=None, **kwargs):
ensure_set_name('conv_aggr', kwargs)
super(ConvAggr, self).__init__(incoming, **kwargs)
self.conv = Conv(incoming, num_channels, filter_size, stride, pad=pad, W=W, b=b, nonlinearity=None, **kwargs)
self.params = self.conv.params.copy()
def get_output_for(self, input, **kwargs):
return self.conv.get_output_for(input)
def get_output_shape_for(self, input_shape):
return self.conv.get_output_shape_for(input_shape)
|
def Conv3D(incoming, num_filters, filter_size=3, stride=(1, 1, 1), pad='same', W=lasagne.init.HeNormal(), b=lasagne.init.Constant(), nonlinearity=lasagne.nonlinearities.rectify, **kwargs):
'\n Overrides the default parameters for Conv3DLayer\n '
ensure_set_name('conv3d', kwargs)
return Conv3DLayer(incoming, num_filters, filter_size, stride, pad, W=W, b=b, nonlinearity=nonlinearity, **kwargs)
|
class Conv3DPrelu(Layer):
def __init__(self, incoming, num_filters, filter_size=3, stride=(1, 1, 1), pad='same', W=lasagne.init.HeNormal(), b=None, **kwargs):
ensure_set_name('conv3d_prelu', kwargs)
super(Conv3DPrelu, self).__init__(incoming, **kwargs)
self.conv = Conv3D(incoming, num_filters, filter_size, stride, pad=pad, W=W, b=b, nonlinearity=None, **kwargs)
self.prelu = prelu(self.conv, **kwargs)
self.params = self.conv.params.copy()
self.params.update(self.prelu.params)
def get_output_for(self, input, **kwargs):
out_conv = self.conv.get_output_for(input)
out_prelu = self.prelu.get_output_for(out_conv)
return out_prelu
def get_output_shape_for(self, input, **kwargs):
return self.conv.get_output_shape_for(input)
|
class Conv3DAggr(Layer):
def __init__(self, incoming, num_channels, filter_size=3, stride=(1, 1, 1), pad='same', W=lasagne.init.HeNormal(), b=None, **kwargs):
ensure_set_name('conv3d_aggr', kwargs)
super(Conv3DAggr, self).__init__(incoming, **kwargs)
self.conv = Conv3D(incoming, num_channels, filter_size, stride, pad=pad, W=W, b=b, nonlinearity=None, **kwargs)
self.params = self.conv.params.copy()
def get_output_for(self, input, **kwargs):
return self.conv.get_output_for(input)
def get_output_shape_for(self, input_shape):
return self.conv.get_output_shape_for(input_shape)
|
class DataConsistencyLayer(MergeLayer):
'\n Data consistency layer\n '
def __init__(self, incomings, inv_noise_level=None, **kwargs):
super(DataConsistencyLayer, self).__init__(incomings, **kwargs)
self.inv_noise_level = inv_noise_level
def get_output_for(self, inputs, **kwargs):
'\n\n Parameters\n ------------------------------\n inputs: 2 4d tensors, first is data, second is the k-space samples\n\n Returns\n ------------------------------\n output: 4d tensor, data input with entries replaced with sampled vals\n '
x = inputs[0]
x_sampled = inputs[1]
v = self.inv_noise_level
if v:
out = ((x + (v * x_sampled)) / (1 + v))
else:
mask = T.set_subtensor(x_sampled[T.neq(x_sampled, 0).nonzero()], 1)
out = (((1 - mask) * x) + x_sampled)
return out
def get_output_shape_for(self, input_shapes, **kwargs):
return input_shapes[0]
|
class DataConsistencyWithMaskLayer(MergeLayer):
'\n Data consistency layer\n '
def __init__(self, incomings, inv_noise_level=None, **kwargs):
super(DataConsistencyWithMaskLayer, self).__init__(incomings, **kwargs)
self.inv_noise_level = inv_noise_level
def get_output_for(self, inputs, **kwargs):
'\n\n Parameters\n ------------------------------\n inputs: 3 4d tensors\n First is data, second is the mask, third is the k-space samples\n\n Returns\n ------------------------------\n output: 4d tensor, data input with entries replaced with the sampled\n values\n '
x = inputs[0]
mask = inputs[1]
x_sampled = inputs[2]
v = self.inv_noise_level
if v:
out = ((x + (v * x_sampled)) / (1 + v))
else:
out = (((1 - mask) * x) + x_sampled)
return out
def get_output_shape_for(self, input_shapes, **kwargs):
return input_shapes[0]
|
class DCLayer(MergeLayer):
'\n Data consistency layer\n '
def __init__(self, incomings, data_shape, inv_noise_level=None, **kwargs):
if ('name' not in kwargs):
kwargs['name'] = 'dc'
super(DCLayer, self).__init__(incomings, **kwargs)
self.inv_noise_level = inv_noise_level
(data, mask, sampled) = incomings
self.data = data
self.mask = mask
self.sampled = sampled
self.dft2 = FFT2Layer(data, data_shape, name='dc_dft2')
self.dc = DataConsistencyWithMaskLayer([self.dft2, mask, sampled], name='dc_consistency')
self.idft2 = FFT2Layer(self.dc, data_shape, inv=True, name='dc_idft2')
def get_output_for(self, inputs, **kwargs):
x = inputs[0]
mask = inputs[1]
x_sampled = inputs[2]
return get_output(self.idft2, {self.data: x, self.mask: mask, self.sampled: x_sampled})
def get_output_shape_for(self, input_shapes, **kwargs):
return input_shapes[0]
|
def ensure_set_name(default_name, kwargs):
"Ensure that the parameters contain names. Be careful, kwargs need to be\n passed as a dictionary here\n\n Parameters\n ----------\n default_name: string\n default name to set if neither name or pr is present, or if name is not\n present but pr is, the name becomes ``pr+default_name''\n kwargs: dict\n keyword arguments given to functions\n\n Returns\n -------\n kwargs: dict\n "
if ('name' not in kwargs):
raise Warning("You need to name the layers, otherwise it simply won't work")
global id_ctr
if (('name' in kwargs) and ('pr' in kwargs)):
kwargs['name'] = (kwargs['pr'] + kwargs['name'])
elif (('name' not in kwargs) and ('pr' in kwargs)):
idx = next(id_ctr)
kwargs['name'] = (((kwargs['pr'] + default_name) + '_g') + str(idx))
elif ('name' not in kwargs):
idx = next(id_ctr)
kwargs['name'] = ((default_name + '_g') + str(idx))
return kwargs
|
def get_dc_input_layers(shape):
'\n Creates input layer for the CNN. Works for 2D and 3D input.\n\n Returns\n -------\n net: Ordered Dictionary\n net config with 3 entries: input, kspace_input, mask.\n '
if (len(shape) > 4):
input_var = tensor5('input_var')
kspace_input_var = tensor5('kspace_input_var')
mask_var = tensor5('mask')
else:
input_var = T.tensor4('input_var')
kspace_input_var = T.tensor4('kspace_input_var')
mask_var = T.tensor4('mask')
input_layer = InputLayer(shape, input_var=input_var, name='input')
kspace_input_layer = InputLayer(shape, input_var=kspace_input_var, name='kspace_input')
mask_layer = InputLayer(shape, input_var=mask_var, name='mask')
return (input_layer, kspace_input_layer, mask_layer)
|
def roll_and_sum(prior_result, orig):
res = (prior_result + orig)
res = T.roll(res, 1, axis=(- 1))
return res
|
class KspaceFillNeighbourLayer(MergeLayer):
'\n k-space fill layer - The input data is assumed to be in k-space grid.\n\n The input data is assumed to be in k-space grid.\n This layer should be invoked from AverageInKspaceLayer\n '
def __init__(self, incomings, frame_dist=range(5), divide_by_n=False, **kwargs):
super(KspaceFillNeighbourLayer, self).__init__(incomings, **kwargs)
self.frame_dist = frame_dist
n_samples = [(1 + (2 * i)) for i in self.frame_dist]
self.n_samples = n_samples
self.divide_by_n = divide_by_n
def get_output_for(self, inputs, **kwargs):
'\n\n Parameters\n ------------------------------\n inputs: two 5d tensors, [kspace_data, mask], each of shape (n, 2, nx, ny, nt)\n\n Returns\n ------------------------------\n output: 5d tensor, missing lines of k-space are filled using neighbouring frames.\n shape becomes (n* (len(frame_dist), 2, nx, ny, nt)\n '
x = inputs[0]
mask = inputs[1]
(result, _) = theano.scan(fn=roll_and_sum, outputs_info=T.zeros_like(x), non_sequences=x, n_steps=T.constant(np.max(self.n_samples)))
(mask_result, _) = theano.scan(fn=roll_and_sum, outputs_info=T.zeros_like(x), non_sequences=mask, n_steps=T.constant(np.max(self.n_samples)))
results = [x]
for (i, t) in enumerate(self.n_samples):
if self.divide_by_n:
c = float(t)
else:
c = 1.0
acc = result[(t - 1)]
mask_acc = mask_result[(t - 1)]
avg = T.roll((acc / T.maximum(c, mask_acc)), ((- self.frame_dist[i]) - 1), axis=(- 1))
res = ((avg * (1 - mask)) + (x * mask))
results.append(res)
return T.concatenate(results, axis=1)
def get_output_shape_for(self, input_shapes, **kwargs):
(n, nc, nx, ny, nt) = input_shapes[0]
nc_new = ((len(self.frame_dist) + 1) * nc)
return (n, nc_new, nx, ny, nt)
|
class KspaceFillNeighbourLayer_Clipped(MergeLayer):
'\n k-space fill layer with clipping at the edge.\n\n The input data is assumed to be in k-space grid.\n This layer should be invoked from AverageInKspaceLayer\n '
def __init__(self, incomings, nt, frame_dist=range(5), divide_by_n=False, **kwargs):
super(KspaceFillNeighbourLayer_Clipped, self).__init__(incomings, **kwargs)
self.frame_dist = frame_dist
n_samples = [(1 + (2 * i)) for i in self.frame_dist]
self.n_samples = n_samples
self.divide_by_n = divide_by_n
self.nt = nt
def get_output_for(self, inputs, **kwargs):
'\n\n Parameters\n ------------------------------\n inputs: two 5d tensors, [kspace_data, mask], each of shape (n, 2, nx, ny, nt)\n\n Returns\n ------------------------------\n output: 5d tensor, missing lines of k-space are filled using neighbouring frames.\n shape becomes (n* (len(frame_dist), 2, nx, ny, nt)\n '
x = inputs[0]
mask = inputs[1]
results = [x]
for (i, t) in enumerate(self.n_samples):
dist = (t / 2)
if self.divide_by_n:
c = float(t)
else:
c = 1.0
def fn(i, input):
s = slice(T.maximum(0, (i - dist)), T.minimum(self.nt, ((i + dist) + 1)))
return input[(..., s)].sum(axis=(- 1))
(result, _) = theano.scan(fn, non_sequences=x, sequences=np.arange(self.nt))
(mask_result, _) = theano.scan(fn, non_sequences=mask, sequences=np.arange(self.nt))
acc = T.transpose(result, axes=(1, 2, 3, 4, 0))
mask_acc = T.transpose(mask_result, axes=(1, 2, 3, 4, 0))
avg = (acc / T.maximum(c, mask_acc))
res = ((avg * (1 - mask)) + (x * mask))
results.append(res)
return T.concatenate(results, axis=1)
def get_output_shape_for(self, input_shapes, **kwargs):
(n, nc, nx, ny, nt) = input_shapes[0]
nc_new = ((len(self.frame_dist) + 1) * nc)
return (n, nc_new, nx, ny, nt)
|
class AverageInKspaceLayer(MergeLayer):
'\n Average-in-k-space layer\n\n First transforms the representation in Fourier domain,\n then performs averaging along temporal axis, then transforms back to image\n domain. Works only for 5D tensor (see parameter descriptions).\n\n\n Parameters\n -----------------------------\n incomings: two 5d tensors, [kspace_data, mask], each of shape (n, 2, nx, ny, nt)\n\n data_shape: shape of the incoming tensors: (n, 2, nx, ny, nt) (This is for convenience)\n\n frame_dist: a list of distances of neighbours to sample for each averaging channel\n if frame_dist=[1], samples from [-1, 1] for each temporal frames\n if frame_dist=[3, 5], samples from [-3,-2,...,0,1,...,3] for one,\n [-5,-4,...,0,1,...,5] for the second one\n\n divide_by_n: bool - Decides how averaging will be done.\n True => divide by number of neighbours (=#2*frame_dist+1)\n False => divide by number of nonzero contributions\n\n clipped: bool - By default the layer assumes periodic boundary condition along temporal axis.\n True => Averaging will be clipped at the boundary, no circular references.\n False => Averages with circular referencing (i.e. at t=0, gets contribution from t=nt-1, so on).\n\n Returns\n ------------------------------\n output: 5d tensor, missing lines of k-space are filled using neighbouring frames.\n shape becomes (n* (len(frame_dist)), 2, nx, ny, nt)\n '
def __init__(self, incomings, data_shape, frame_dist=[1, 3, 5], divide_by_n=False, clipped=False, **kwargs):
if ('name' not in kwargs):
kwargs['name'] = 'kspace_averaging_layer'
super(AverageInKspaceLayer, self).__init__(incomings, **kwargs)
(data, mask) = incomings
(n, nc, nx, ny, nt) = data_shape
nc_new = ((len(frame_dist) + 1) * 2)
self.data = data
self.mask = mask
self.frame_dist = frame_dist
self.divide_by_n = divide_by_n
self.dft2 = FFT2Layer(data, data_shape, name='kavg_dft2')
if clipped:
self.kavg = KspaceFillNeighbourLayer_Clipped([self.dft2, mask], nt, frame_dist, divide_by_n, name='kavg_avg')
else:
self.kavg = KspaceFillNeighbourLayer([self.dft2, mask], frame_dist, divide_by_n, name='kavg_avg')
self.kavg_tmp = lasagne.layers.reshape(self.kavg, ((- 1), 2, nx, ny, nt))
self.idft2 = FFT2Layer(self.kavg_tmp, data_shape, inv=True, name='kavg_idft2')
self.out = lasagne.layers.reshape(self.idft2, ((- 1), nc_new, nx, ny, nt))
def get_output_for(self, inputs, **kwargs):
x = inputs[0]
mask = inputs[1]
res = get_output(self.out, {self.data: x, self.mask: mask})
return res
def get_output_shape_for(self, input_shapes, **kwargs):
return self.kavg.get_output_shape_for(input_shapes)
|
class PoolNDLayer(Layer):
"\n ND pooling layer\n\n Performs ND mean or max-pooling over the trailing axes\n of a ND input tensor.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or tuple\n The layer feeding into this layer, or the expected input shape.\n\n pool_size : integer or iterable\n The length of the pooling region in each dimension. If an integer, it\n is promoted to a square pooling region. If an iterable, it should have\n n elements.\n\n stride : integer, iterable or ``None``\n The strides between sucessive pooling regions in each dimension.\n If ``None`` then ``stride = pool_size``.\n\n pad : integer or iterable\n Number of elements to be added on each side of the input\n in each dimension. Each value must be less than\n the corresponding stride.\n\n ignore_border : bool\n If ``True``, partial pooling regions will be ignored.\n Must be ``True`` if ``pad != (0, 0)``.\n\n mode : {'max', 'average_inc_pad', 'average_exc_pad'}\n Pooling mode: max-pooling or mean-pooling including/excluding zeros\n from partially padded pooling regions. Default is 'max'.\n\n **kwargs\n Any additional keyword arguments are passed to the :class:`Layer`\n superclass.\n\n See Also\n --------\n MaxPool2DLayer : Shortcut for max pooling layer.\n\n Notes\n -----\n The value used to pad the input is chosen to be less than\n the minimum of the input, so that the output of each pooling region\n always corresponds to some element in the unpadded input region.\n\n Using ``ignore_border=False`` prevents Theano from using cuDNN for the\n operation, so it will fall back to a slower implementation.\n "
def __init__(self, incoming, n, pool_size, stride=None, pad=0, ignore_border=True, mode='max', **kwargs):
super(PoolNDLayer, self).__init__(incoming, **kwargs)
self.n = n
self.pool_size = lasagne.utils.as_tuple(pool_size, n)
if (stride is None):
self.stride = self.pool_size
else:
self.stride = lasagne.utils.as_tuple(stride, n)
self.pad = lasagne.utils.as_tuple(pad, n)
self.ignore_border = ignore_border
self.mode = mode
if ((n % 2) == 1):
self.pool_size += (1,)
self.pad += (0,)
self.stride += (1,)
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape)
tr = (len(output_shape) - self.n)
for i in xrange(self.n):
output_shape[(tr + i)] = pool.pool_output_length(input_shape[(tr + i)], pool_size=self.pool_size[i], stride=self.stride[i], pad=self.pad[i], ignore_border=self.ignore_border)
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
' Uses pool_2d to pool each dimension.'
input_shape = input.shape
n = self.n
if ((n % 2) == 1):
n += 1
input = T.shape_padright(input, 1)
n_axis = (input.ndim - n)
for i in np.arange(0, n, 2):
i1 = (((n - 2) + i) % n)
i2 = (((n - 1) + i) % n)
input = pool.pool_2d(input, ds=(self.pool_size[i1], self.pool_size[i2]), st=(self.stride[i1], self.stride[i2]), ignore_border=self.ignore_border, padding=(self.pad[i1], self.pad[i2]), mode=self.mode)
fixed = tuple(np.arange(n_axis))
perm = tuple(((np.arange(2, (n + 2)) % n) + n_axis))
shuffle = (fixed + perm)
input = input.dimshuffle(shuffle)
input = input.reshape(self.get_output_shape_for(input_shape))
return input
|
class Upscale3DLayer(Layer):
'\n 3D upscaling layer\n Performs 3D upscaling over the two trailing axes of a 4D input tensor.\n Parameters\n ----------\n incoming : a :class:`Layer` instance or tuple\n The layer feeding into this layer, or the expected input shape.\n scale_factor : integer or iterable\n The scale factor in each dimension. If an integer, it is promoted to\n a square scale factor region. If an iterable, it should have two\n elements.\n **kwargs\n Any additional keyword arguments are passed to the :class:`Layer`\n superclass.\n '
def __init__(self, incoming, scale_factor, **kwargs):
super(Upscale3DLayer, self).__init__(incoming, **kwargs)
self.scale_factor = lasagne.utils.as_tuple(scale_factor, 3)
if ((self.scale_factor[0] < 1) or (self.scale_factor[1] < 1) or (self.scale_factor[2] < 1)):
raise ValueError('Scale factor must be >= 1, not {0}'.format(self.scale_factor))
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape)
if (output_shape[2] is not None):
output_shape[2] *= self.scale_factor[0]
if (output_shape[3] is not None):
output_shape[3] *= self.scale_factor[1]
if (output_shape[4] is not None):
output_shape[4] *= self.scale_factor[2]
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
(a, b, c) = self.scale_factor
upscaled = input
if (c > 1):
upscaled = T.extra_ops.repeat(upscaled, b, 4)
if (b > 1):
upscaled = T.extra_ops.repeat(upscaled, b, 3)
if (a > 1):
upscaled = T.extra_ops.repeat(upscaled, a, 2)
return upscaled
|
class IdLayer(Layer):
def get_output_for(self, input, **kwargs):
return input
|
class SumLayer(Layer):
def get_output_for(self, input, **kwargs):
return input.sum(axis=(- 1))
def get_output_shape_for(self, input_shape):
return input_shape[:(- 1)]
|
class SHLULayer(Layer):
def get_output_for(self, input, **kwargs):
return (T.sgn(input) * T.maximum((input - 1), 0))
|
class ResidualLayer(lasagne.layers.ElemwiseSumLayer):
'\n Residual Layer, which just wraps around ElemwiseSumLayer\n '
def __init__(self, incomings, **kwargs):
ensure_set_name('res', kwargs)
super(ResidualLayer, self).__init__(incomings, **kwargs)
input_names = []
for l in incomings:
if isinstance(l, lasagne.layers.InputLayer):
input_names.append((l.name if l.name else l.input_var.name))
elif l.name:
input_names.append(l.name)
else:
input_names.append(str(l))
self.input_names = input_names
def get_output_for(self, inputs, **kwargs):
return super(lasagne.layers.ElemwiseSumLayer, self).get_output_for(inputs, **kwargs)
|
def cascade_resnet(pr, net, input_layer, n=5, nf=64, b=lasagne.init.Constant, **kwargs):
shape = lasagne.layers.get_output_shape(input_layer)
n_channel = shape[1]
net[(pr + 'conv1')] = l.Conv(input_layer, nf, 3, b=b(), name=(pr + 'conv1'))
for i in xrange(2, n):
net[(pr + ('conv%d' % i))] = l.Conv(net[(pr + ('conv%d' % (i - 1)))], nf, 3, b=b(), name=(pr + ('conv%d' % i)))
net[(pr + 'conv_aggr')] = l.ConvAggr(net[(pr + ('conv%d' % (n - 1)))], n_channel, 3, b=b(), name=(pr + 'conv_aggr'))
net[(pr + 'res')] = l.ResidualLayer([net[(pr + 'conv_aggr')], input_layer], name=(pr + 'res'))
output_layer = net[(pr + 'res')]
return (net, output_layer)
|
def cascade_resnet_3d_avg(pr, net, input_layer, n=5, nf=64, b=lasagne.init.Constant, frame_dist=range(5), **kwargs):
shape = lasagne.layers.get_output_shape(input_layer)
n_channel = shape[1]
divide_by_n = (kwargs['cascade_i'] != 0)
k = (3, 3, 3)
net[(pr + 'kavg')] = l.AverageInKspaceLayer([input_layer, net['mask']], shape, frame_dist=frame_dist, divide_by_n=divide_by_n, clipped=False)
net[(pr + 'conv1')] = l.Conv3D(net[(pr + 'kavg')], nf, k, b=b(), name=(pr + 'conv1'))
for i in xrange(2, n):
net[(pr + ('conv%d' % i))] = l.Conv3D(net[(pr + ('conv%d' % (i - 1)))], nf, k, b=b(), name=(pr + ('conv%d' % i)))
net[(pr + 'conv_aggr')] = l.Conv3DAggr(net[(pr + ('conv%d' % (n - 1)))], n_channel, k, b=b(), name=(pr + 'conv_aggr'))
net[(pr + 'res')] = l.ResidualLayer([net[(pr + 'conv_aggr')], input_layer], name=(pr + 'res'))
output_layer = net[(pr + 'res')]
return (net, output_layer)
|
def build_cascade_cnn_from_list(shape, net_meta, lmda=None):
'\n Create iterative network with more flexibility\n\n net_meta: [(model1, cascade1_n),(model2, cascade2_n),....(modelm, cascadem_n),]\n '
if (not net_meta):
raise
net = OrderedDict()
(input_layer, kspace_input_layer, mask_layer) = l.get_dc_input_layers(shape)
net['input'] = input_layer
net['kspace_input'] = kspace_input_layer
net['mask'] = mask_layer
j = 0
for (cascade_net, cascade_n) in net_meta:
for i in xrange(cascade_n):
pr = ('c%d_' % j)
(net, output_layer) = cascade_net(pr, net, input_layer, **{'cascade_i': j})
net[(pr + 'dc')] = l.DCLayer([output_layer, net['mask'], net['kspace_input']], shape, inv_noise_level=lmda)
input_layer = net[(pr + 'dc')]
j += 1
output_layer = input_layer
return (net, output_layer)
|
def build_d2_c2(shape):
def cascade_d2(pr, net, input_layer, **kwargs):
return cascade_resnet(pr, net, input_layer, n=2)
return build_cascade_cnn_from_list(shape, [(cascade_d2, 2)])
|
def build_d5_c5(shape):
return build_cascade_cnn_from_list(shape, [(cascade_resnet, 5)])
|
def build_d2_c2_s(shape):
def cascade_d2(pr, net, input_layer, **kwargs):
return cascade_resnet_3d_avg(pr, net, input_layer, n=2, nf=16, frame_dist=range(2), **kwargs)
return build_cascade_cnn_from_list(shape, [(cascade_d2, 2)])
|
def build_d5_c10_s(shape):
return build_cascade_cnn_from_list(shape, [(cascade_resnet_3d_avg, 10)])
|
class FFTOp(gof.Op):
__props__ = ()
def output_type(self, inp):
return T.TensorType(inp.dtype, broadcastable=([False] * inp.type.ndim))
def make_node(self, a, s=None):
a = T.as_tensor_variable(a)
if (a.ndim < 3):
raise TypeError((('%s: input must have dimension >= 3, with ' % self.__class__.__name__) + 'first dimension batches and last real/imag parts'))
if (s is None):
s = a.shape[(- 2)]
s = T.as_tensor_variable(s)
else:
s = T.as_tensor_variable(s)
if ((not s.dtype.startswith('int')) and (not s.dtype.startswith('uint'))):
raise TypeError(('%s: length of the transformed axis must be of type integer' % self.__class__.__name__))
return gof.Apply(self, [a, s], [self.output_type(a)()])
def perform(self, node, inputs, output_storage):
a = inputs[0]
s = inputs[1]
a_in = (a[(..., 0)] + (1j * a[(..., 1)]))
A = np.fft.fft(a_in)
out = np.zeros((A.shape + (2,)), dtype=a.dtype)
(out[(..., 0)], out[(..., 1)]) = (np.real(A), np.imag(A))
output_storage[0][0] = out
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
return [ifft_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
class IFFTOp(gof.Op):
__props__ = ()
def output_type(self, inp):
return T.TensorType(inp.dtype, broadcastable=([False] * inp.type.ndim))
def make_node(self, a, s=None):
a = T.as_tensor_variable(a)
if (a.ndim < 3):
raise TypeError((('%s: input must have dimension >= 3, with ' % self.__class__.__name__) + 'first dimension batches and last real/imag parts'))
if (s is None):
s = a.shape[(- 2)]
s = T.as_tensor_variable(s)
else:
s = T.as_tensor_variable(s)
if ((not s.dtype.startswith('int')) and (not s.dtype.startswith('uint'))):
raise TypeError(('%s: length of the transformed axis must be of type integer' % self.__class__.__name__))
return gof.Apply(self, [a, s], [self.output_type(a)()])
def perform(self, node, inputs, output_storage):
a = inputs[0]
s = inputs[1]
inp = (a[(..., 0)] + (1j * a[(..., 1)]))
A = np.fft.ifft(inp)
out = np.zeros((A.shape + (2,)), dtype=a.dtype)
(out[(..., 0)], out[(..., 1)]) = (np.real(A), np.imag(A))
output_storage[0][0] = (out * s).astype(a.dtype)
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
gf = fft_op(gout, s)
return [gf, DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
def fft(inp, norm=None):
"\n Performs the fast Fourier transform of a complex-valued input simulated by R^2.\n\n The input must be a real-valued variable of dimensions (m, ..., n, 2).\n It performs FFTs of size n along the last axis. \n\n The output is a tensor of dimensions (m, ..., n, 2).\n The real and imaginary parts are stored as a pair of\n float arrays.\n\n Parameters\n ----------\n inp\n Array of floats of size (m, ..., n, 2)\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n\n "
s = inp.shape[(- 2)]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm == 'ortho'):
scaling = T.sqrt(s.astype(inp.dtype))
return (fft_op(inp, s) / scaling)
|
def ifft(inp, norm=None):
"\n Performs the inverse fast Fourier Transform with complex-valued input simulated by R^2.\n\n The input is a variable of dimensions (m, ..., n, 2)\n The real and imaginary parts are stored as a\n pair of float arrays.\n\n The output is a real-valued variable of dimensions (m, ..., n, 2)\n giving the inverse FFTs along the last axis.\n\n Parameters\n ----------\n inp\n Array of size (m, ..., n, 2), containing m inputs\n\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n\n "
s = inp.shape[(- 2)]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm is None):
scaling = s.astype(inp.dtype)
elif (cond_norm == 'ortho'):
scaling = T.sqrt(s.astype(inp.dtype))
return (ifft_op(inp, s) / scaling)
|
def _unitary(norm):
if (norm not in (None, 'ortho', 'no_norm')):
raise ValueError(("Invalid value %s for norm, must be None, 'ortho' or 'no norm'" % norm))
return norm
|
class FFT2Op(gof.Op):
__props__ = ()
def output_type(self, inp):
return T.TensorType(inp.dtype, broadcastable=([False] * inp.type.ndim))
def make_node(self, a, s=None):
a = T.as_tensor_variable(a)
if (a.ndim < 4):
raise TypeError((('%s: input must have dimension >= 4, with ' % self.__class__.__name__) + 'first dimension batches, then last axes are (Nx, Ny, 2)'))
if (s is None):
s = a.shape[(- 3):(- 1)]
s = T.as_tensor_variable(s)
else:
s = T.as_tensor_variable(s)
if ((not s.dtype.startswith('int')) and (not s.dtype.startswith('uint'))):
raise TypeError(('%s: length of the transformed axis must be of type integer' % self.__class__.__name__))
return gof.Apply(self, [a, s], [self.output_type(a)()])
def perform(self, node, inputs, output_storage):
a = inputs[0]
s = inputs[1]
a_in = (a[(..., 0)] + (1j * a[(..., 1)]))
A = np.fft.fft2(a_in)
out = np.zeros((A.shape + (2,)), dtype=a.dtype)
(out[(..., 0)], out[(..., 1)]) = (np.real(A), np.imag(A))
output_storage[0][0] = out
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
return [ifft2_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
class IFFT2Op(gof.Op):
__props__ = ()
def output_type(self, inp):
return T.TensorType(inp.dtype, broadcastable=([False] * inp.type.ndim))
def make_node(self, a, s=None):
a = T.as_tensor_variable(a)
if (a.ndim < 4):
raise TypeError((('%s: input must have dimension >= 4, with ' % self.__class__.__name__) + 'first dimension batches, then last axes are (Nx, Ny, 2)'))
if (s is None):
s = a.shape[(- 3):(- 1)]
s = T.as_tensor_variable(s)
else:
s = T.as_tensor_variable(s)
if ((not s.dtype.startswith('int')) and (not s.dtype.startswith('uint'))):
raise TypeError(('%s: length of the transformed axis must be of type integer' % self.__class__.__name__))
return gof.Apply(self, [a, s], [self.output_type(a)()])
def perform(self, node, inputs, output_storage):
a = inputs[0]
s = inputs[1]
inp = (a[(..., 0)] + (1j * a[(..., 1)]))
A = np.fft.ifft2(inp)
out = np.zeros((A.shape + (2,)), dtype=a.dtype)
(out[(..., 0)], out[(..., 1)]) = (np.real(A), np.imag(A))
output_storage[0][0] = (out * s.prod()).astype(a.dtype)
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
gf = fft2_op(gout, s)
return [gf, DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
def fft2(inp, norm=None):
"\n Performs the fast Fourier transform of a complex-valued input simulated by R^2.\n\n The input must be a real-valued variable of dimensions (m, ..., n, 2).\n It performs FFT2s of size n along the last axis. \n\n The output is a tensor of dimensions (m, ..., n, 2).\n The real and imaginary parts are stored as a pair of\n float arrays.\n\n Parameters\n ----------\n inp\n Array of floats of size (m, ..., n, 2)\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n\n "
s = inp.shape[(- 3):(- 1)]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm == 'ortho'):
scaling = T.sqrt(s.prod().astype(inp.dtype))
return (fft2_op(inp, s) / scaling)
|
def ifft2(inp, norm=None):
"\n Performs the inverse fast Fourier Transform with complex-valued input simulated by R^2.\n\n The input is a variable of dimensions (m, ..., n, 2)\n The real and imaginary parts are stored as a\n pair of float arrays.\n\n The output is a real-valued variable of dimensions (m, ..., n, 2)\n giving the inverse FFT2s along the last axis.\n\n Parameters\n ----------\n inp\n Array of size (m, ..., n, 2), containing m inputs\n with n//2+1 non-trivial elements on the last dimension and real\n and imaginary parts stored as separate real arrays.\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n\n "
s = inp.shape[(- 3):(- 1)]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm is None):
scaling = s.prod().astype(inp.dtype)
elif (cond_norm == 'ortho'):
scaling = T.sqrt(s.prod().astype(inp.dtype))
return (ifft2_op(inp, s) / scaling)
|
def _unitary(norm):
if (norm not in (None, 'ortho', 'no_norm')):
raise ValueError(("Invalid value %s for norm, must be None, 'ortho' or 'no norm'" % norm))
return norm
|
class FFTSHIFTOp(gof.Op):
__props__ = ()
def output_type(self, inp):
return T.TensorType(inp.dtype, broadcastable=([False] * inp.type.ndim))
def make_node(self, x, axes=None):
x = T.as_tensor_variable(x)
if (x.ndim < 2):
raise TypeError((('%s: input must have dimension >= 2. For example,' % self.__class__.__name__) + '(n_batches, 2, nx, ny[, nt])'))
if (axes is None):
axes = list(range(x.ndim))
elif isinstance(axes, int):
axes = (axes,)
axes = T.as_tensor_variable(axes)
if ((not axes.dtype.startswith('int')) and (not axes.dtype.startswith('uint'))):
raise TypeError(('%s: length of the transformed axis must be of type integer' % self.__class__.__name__))
return gof.Apply(self, [x, axes], [self.output_type(x)()])
def perform(self, node, inputs, output_storage):
x = inputs[0]
axes = inputs[1]
out = np.fft.fftshift(x, axes)
output_storage[0][0] = out
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
return [ifftshift_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
class IFFTSHIFTOp(gof.Op):
__props__ = ()
def output_type(self, inp):
return T.TensorType(inp.dtype, broadcastable=([False] * inp.type.ndim))
def make_node(self, x, axes=None):
x = T.as_tensor_variable(x)
if (x.ndim < 2):
raise TypeError((('%s: input must have dimension >= 2. For example' % self.__class__.__name__) + '(n_batches, 2, nx, ny[, nt])'))
if (axes is None):
axes = list(range(x.ndim))
elif isinstance(axes, int):
axes = (axes,)
axes = T.as_tensor_variable(axes)
if ((not axes.dtype.startswith('int')) and (not axes.dtype.startswith('uint'))):
raise TypeError(('%s: length of the transformed axis must be of type integer' % self.__class__.__name__))
return gof.Apply(self, [x, axes], [self.output_type(x)()])
def perform(self, node, inputs, output_storage):
x = inputs[0]
axes = inputs[1]
out = np.fft.ifftshift(x, axes)
output_storage[0][0] = out
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
return [fftshift_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
def fftshift(x, axes=None):
'\n Performs np.fft.fftshift. Gradient is implemented as ifftshift\n\n Parameters\n ----------\n x : array_like\n Input array.\n axes : int or shape tuple, optional\n Axes over which to calculate. Defaults to None, which shifts all axes.\n\n Returns\n -------\n y : ndarray\n The shifted array.\n\n '
return fftshift_op(x, axes)
|
def ifftshift(x, axes=None):
'\n Performs np.fft.ifftshift. Gradient is implemented as fftshift\n\n Parameters\n ----------\n x : array_like\n Input array.\n axes : int or shape tuple, optional\n Axes over which to calculate. Defaults to None, which shifts all axes.\n\n Returns\n -------\n y : ndarray\n The shifted array.\n\n '
return ifftshift_op(x, axes)
|
class CuFFTOp(Op):
__props__ = ()
def output_type(self, inp):
return GpuArrayType(inp.dtype, broadcastable=([False] * inp.type.ndim), context_name=inp.type.context_name)
def make_node(self, inp, s=None):
if (not scikits_cuda_available):
raise RuntimeError('skcuda is needed for CuFFTOp')
if (not pygpu_available):
raise RuntimeError('pygpu is needed for CuFFTOp')
if (not pycuda_available):
raise RuntimeError('pycuda is needed for CuFFTOp')
inp = basic_ops.gpu_contiguous(basic_ops.as_gpuarray_variable(inp, basic_ops.infer_context_name(inp)))
if (s is None):
s = inp.shape[1:(- 1)]
s = T.as_tensor_variable(s)
assert (inp.dtype == 'float32')
assert (s.ndim == 1)
assert ('int' in s.dtype)
return theano.Apply(self, [inp, s], [self.output_type(inp)()])
def make_thunk(self, node, storage_map, _, _2, impl=None):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
with node.inputs[0].type.context:
skcuda.misc.init()
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
s = inputs[1][0]
assert (input_shape[1:(- 1)] == s[:(- 1)]).all()
output_shape = input_shape
z = outputs[0]
if ((z[0] is None) or (z[0].shape != output_shape)):
z[0] = pygpu.zeros(output_shape, context=inputs[0][0].context, dtype='float32')
input_pycuda = inputs[0][0]
output_pycuda = z[0]
with input_pycuda.context:
if ((plan[0] is None) or (plan_input_shape[0] != input_shape)):
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(s, np.complex64, np.complex64, batch=input_shape[0])
input_pycuda.sync()
output_pycuda.sync()
fft.fft(input_pycuda, output_pycuda, plan[0])
pycuda.driver.Context.synchronize()
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
return [cuifft_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
class CuIFFTOp(Op):
__props__ = ()
def output_type(self, inp):
return GpuArrayType(inp.dtype, broadcastable=([False] * inp.type.ndim), context_name=inp.type.context_name)
def make_node(self, inp, s=None):
if (not scikits_cuda_available):
raise RuntimeError('skcuda is needed for CuIFFTOp')
if (not pygpu_available):
raise RuntimeError('pygpu is needed for CuIFFTOp')
if (not pycuda_available):
raise RuntimeError('pycuda is needed for CuIFFTOp')
inp = basic_ops.gpu_contiguous(basic_ops.as_gpuarray_variable(inp, basic_ops.infer_context_name(inp)))
if (s is None):
s = inp.shape[1:(- 1)]
s = T.as_tensor_variable(s)
assert (inp.dtype == 'float32')
assert (s.ndim == 1)
return theano.Apply(self, [inp, s], [self.output_type(inp)()])
def make_thunk(self, node, storage_map, _, _2, impl=None):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
with node.inputs[0].type.context:
skcuda.misc.init()
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
s = inputs[1][0]
output_shape = input_shape
z = outputs[0]
if ((z[0] is None) or (z[0].shape != output_shape)):
z[0] = pygpu.zeros(output_shape, context=inputs[0][0].context, dtype='float32')
input_pycuda = inputs[0][0]
output_pycuda = z[0]
with input_pycuda.context:
if ((plan[0] is None) or (plan_input_shape[0] != input_shape)):
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(s, np.complex64, np.complex64, batch=output_shape[0])
input_pycuda.sync()
output_pycuda.sync()
fft.ifft(input_pycuda, output_pycuda, plan[0])
pycuda.driver.Context.synchronize()
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
return [cufft_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
def cufft(inp, norm=None):
"\n Performs the fast Fourier transform of a real-valued input on the GPU.\n\n The input must be a real-valued float32 variable of dimensions (m, ..., n, 2).\n It performs FFTs of size (..., n) on m batches.\n\n The output is a GpuArray of dimensions (m, ..., n, 2). The second to\n last dimension of the output contains the n//2+1 non-trivial elements of\n the real-valued FFTs. The real and imaginary parts are stored as a pair of\n float32 arrays.\n\n Parameters\n ----------\n inp\n Array of real-valued float32 of size (m, ..., n), containing m inputs of\n size (..., n).\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n\n "
s = inp.shape[1:(- 1)]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm == 'ortho'):
scaling = T.sqrt(s.prod().astype('float32'))
return (cufft_op(inp, s) / scaling)
|
def cuifft(inp, norm=None, is_odd=False):
"\n Performs the inverse fast Fourier Transform with real-valued output on the GPU.\n\n The input is a variable of dimensions (m, ..., n, 2) with\n type float32 representing the non-trivial elements of m\n real-valued Fourier transforms of initial size (..., n). The real and\n imaginary parts are stored as a pair of float32 arrays.\n\n The output is a real-valued float32 variable of dimensions (m, ..., n)\n giving the m inverse FFTs.\n\n Parameters\n ----------\n inp\n Array of float32 of size (m, ..., n, 2), containing m inputs\n with n//2+1 non-trivial elements on the last dimension and real\n and imaginary parts stored as separate arrays.\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n is_odd : {True, False}\n Set to True to get a real inverse transform output with an odd last dimension\n of length (N-1)*2 + 1 for an input last dimension of length N.\n\n "
if (is_odd not in (True, False)):
raise ValueError(('Invalid value %s for id_odd, must be True or False' % is_odd))
s = inp.shape[1:(- 1)]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm is None):
scaling = s.prod().astype('float32')
elif (cond_norm == 'ortho'):
scaling = T.sqrt(s.prod().astype('float32'))
return (cuifft_op(inp, s) / scaling)
|
def _unitary(norm):
if (norm not in (None, 'ortho', 'no_norm')):
raise ValueError(("Invalid value %s for norm, must be None, 'ortho' or 'no norm'" % norm))
return norm
|
class CuFFT2Op(Op):
__props__ = ()
def output_type(self, inp):
return GpuArrayType(inp.dtype, broadcastable=([False] * inp.type.ndim), context_name=inp.type.context_name)
def make_node(self, inp, s=None):
if (not scikits_cuda_available):
raise RuntimeError('skcuda is needed for CuFFTOp')
if (not pygpu_available):
raise RuntimeError('pygpu is needed for CuFFTOp')
if (not pycuda_available):
raise RuntimeError('pycuda is needed for CuFFTOp')
inp = basic_ops.gpu_contiguous(basic_ops.as_gpuarray_variable(inp, basic_ops.infer_context_name(inp)))
if (s is None):
s = inp.shape[(- 3):(- 1)]
s = T.as_tensor_variable(s)
assert (inp.dtype == 'float32')
assert (s.ndim == 1)
assert ('int' in s.dtype)
return theano.Apply(self, [inp, s], [self.output_type(inp)()])
def make_thunk(self, node, storage_map, _, _2, impl=None):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
with node.inputs[0].type.context:
skcuda.misc.init()
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
s = inputs[1][0]
assert (input_shape[(- 3):(- 1)] == s).all()
output_shape = input_shape
z = outputs[0]
if ((z[0] is None) or (z[0].shape != output_shape)):
z[0] = pygpu.zeros(output_shape, context=inputs[0][0].context, dtype='float32')
input_pycuda = inputs[0][0]
output_pycuda = z[0]
with input_pycuda.context:
if ((plan[0] is None) or (plan_input_shape[0] != input_shape)):
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(s, np.complex64, np.complex64, batch=np.prod(input_shape[:(- 3)]))
input_pycuda.sync()
output_pycuda.sync()
fft.fft(input_pycuda, output_pycuda, plan[0])
pycuda.driver.Context.synchronize()
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
return [cuifft2_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
class CuIFFT2Op(Op):
__props__ = ()
def output_type(self, inp):
return GpuArrayType(inp.dtype, broadcastable=([False] * inp.type.ndim), context_name=inp.type.context_name)
def make_node(self, inp, s=None):
if (not scikits_cuda_available):
raise RuntimeError('skcuda is needed for CuIFFTOp')
if (not pygpu_available):
raise RuntimeError('pygpu is needed for CuIFFTOp')
if (not pycuda_available):
raise RuntimeError('pycuda is needed for CuIFFTOp')
inp = basic_ops.gpu_contiguous(basic_ops.as_gpuarray_variable(inp, basic_ops.infer_context_name(inp)))
if (s is None):
s = inp.shape[(- 3):(- 1)]
s = T.as_tensor_variable(s)
assert (inp.dtype == 'float32')
assert (s.ndim == 1)
return theano.Apply(self, [inp, s], [self.output_type(inp)()])
def make_thunk(self, node, storage_map, _, _2, impl=None):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
with node.inputs[0].type.context:
skcuda.misc.init()
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
s = inputs[1][0]
output_shape = input_shape
z = outputs[0]
if ((z[0] is None) or (z[0].shape != output_shape)):
z[0] = pygpu.zeros(output_shape, context=inputs[0][0].context, dtype='float32')
input_pycuda = inputs[0][0]
output_pycuda = z[0]
with input_pycuda.context:
if ((plan[0] is None) or (plan_input_shape[0] != input_shape)):
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(s, np.complex64, np.complex64, batch=np.prod(input_shape[:(- 3)]))
input_pycuda.sync()
output_pycuda.sync()
fft.ifft(input_pycuda, output_pycuda, plan[0])
pycuda.driver.Context.synchronize()
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
return [cufft2_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
def cufft2(inp, norm=None):
"\n Performs the 2D fast Fourier transform of a simulated complex-valued input on the GPU.\n\n The input must be a real-valued float32 variable of dimensions (m, ..., nx, ny, 2).\n It performs 2D FFTs of size (..., nx, ny) on m batches.\n\n The output is a GpuArray of dimensions (m, ..., nx, ny, 2). \n\n Parameters\n ----------\n inp\n Array of real-valued float32 of size (m, ..., nx, ny, 2).\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n\n "
print('... using GPU implementation for fft2')
s = inp.shape[(- 3):(- 1)]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm == 'ortho'):
scaling = T.sqrt(s.prod().astype('float32'))
return (cufft2_op(inp, s) / scaling)
|
def cuifft2(inp, norm=None):
"\n Performs the 2D inverse fast Fourier transform of a simulated complex-valued input on the GPU.\n\n The input must be a real-valued float32 variable of dimensions (m, ..., nx, ny, 2).\n It performs 2D IFFTs of size (..., nx, ny) on m batches.\n\n The output is a GpuArray of dimensions (m, ..., nx, ny, 2). \n\n Parameters\n ----------\n inp\n Array of real-valued float32 of size (m, ..., nx, ny, 2).\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n\n "
print('... using GPU implementation for ifft2')
s = inp.shape[(- 3):(- 1)]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm is None):
scaling = s.prod().astype('float32')
elif (cond_norm == 'ortho'):
scaling = T.sqrt(s.prod().astype('float32'))
return (cuifft2_op(inp, s) / scaling)
|
def _unitary(norm):
if (norm not in (None, 'ortho', 'no_norm')):
raise ValueError(("Invalid value %s for norm, must be None, 'ortho' or 'no norm'" % norm))
return norm
|
class CuRFFTOp(Op):
__props__ = ()
def output_type(self, inp):
return GpuArrayType(inp.dtype, broadcastable=([False] * (inp.type.ndim + 1)), context_name=inp.type.context_name)
def make_node(self, inp, s=None):
if (not scikits_cuda_available):
raise RuntimeError('skcuda is needed for CuFFTOp')
if (not pygpu_available):
raise RuntimeError('pygpu is needed for CuFFTOp')
if (not pycuda_available):
raise RuntimeError('pycuda is needed for CuFFTOp')
inp = basic_ops.gpu_contiguous(basic_ops.as_gpuarray_variable(inp, basic_ops.infer_context_name(inp)))
if (s is None):
s = inp.shape[1:]
s = T.as_tensor_variable(s)
assert (inp.dtype == 'float32')
assert (s.ndim == 1)
assert ('int' in s.dtype)
return theano.Apply(self, [inp, s], [self.output_type(inp)()])
def make_thunk(self, node, storage_map, _, _2, impl=None):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
with node.inputs[0].type.context:
skcuda.misc.init()
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
s = inputs[1][0]
assert (input_shape[1:] == s).all()
output_shape = ([input_shape[0]] + list(s))
output_shape[(- 1)] = ((output_shape[(- 1)] // 2) + 1)
output_shape += [2]
output_shape = tuple(output_shape)
z = outputs[0]
if ((z[0] is None) or (z[0].shape != output_shape)):
z[0] = pygpu.zeros(output_shape, context=inputs[0][0].context, dtype='float32')
input_pycuda = inputs[0][0]
output_pycuda = z[0]
with input_pycuda.context:
if ((plan[0] is None) or (plan_input_shape[0] != input_shape)):
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(s, np.float32, np.complex64, batch=input_shape[0])
input_pycuda.sync()
output_pycuda.sync()
fft.fft(input_pycuda, output_pycuda, plan[0])
pycuda.driver.Context.synchronize()
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
idx = ((([slice(None)] * (gout.ndim - 2)) + [slice(1, ((s[(- 1)] // 2) + (s[(- 1)] % 2)))]) + [slice(None)])
gout = T.set_subtensor(gout[idx], (gout[idx] * 0.5))
return [cuirfft_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
class CuIRFFTOp(Op):
__props__ = ()
def output_type(self, inp):
return GpuArrayType(inp.dtype, broadcastable=([False] * (inp.type.ndim - 1)), context_name=inp.type.context_name)
def make_node(self, inp, s=None):
if (not scikits_cuda_available):
raise RuntimeError('skcuda is needed for CuIFFTOp')
if (not pygpu_available):
raise RuntimeError('pygpu is needed for CuIFFTOp')
if (not pycuda_available):
raise RuntimeError('pycuda is needed for CuIFFTOp')
inp = basic_ops.gpu_contiguous(basic_ops.as_gpuarray_variable(inp, basic_ops.infer_context_name(inp)))
if (s is None):
s = inp.shape[1:(- 1)]
s = T.set_subtensor(s[(- 1)], ((s[(- 1)] - 1) * 2))
s = T.as_tensor_variable(s)
assert (inp.dtype == 'float32')
assert (s.ndim == 1)
return theano.Apply(self, [inp, s], [self.output_type(inp)()])
def make_thunk(self, node, storage_map, _, _2, impl=None):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
with node.inputs[0].type.context:
skcuda.misc.init()
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
s = inputs[1][0]
assert (input_shape[1:(- 2)] == s[:(- 1)]).all()
assert ((((input_shape[(- 2)] - 1) * 2) + (s[(- 1)] % 2)) == s[(- 1)]).all()
output_shape = ([input_shape[0]] + list(s))
output_shape = tuple(output_shape)
z = outputs[0]
if ((z[0] is None) or (z[0].shape != output_shape)):
z[0] = pygpu.zeros(output_shape, context=inputs[0][0].context, dtype='float32')
input_pycuda = inputs[0][0]
output_pycuda = z[0]
with input_pycuda.context:
if ((plan[0] is None) or (plan_input_shape[0] != input_shape)):
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(s, np.complex64, np.float32, batch=output_shape[0])
input_pycuda.sync()
output_pycuda.sync()
fft.ifft(input_pycuda, output_pycuda, plan[0])
pycuda.driver.Context.synchronize()
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
gf = curfft_op(gout, s)
idx = ((([slice(None)] * (gf.ndim - 2)) + [slice(1, ((s[(- 1)] // 2) + (s[(- 1)] % 2)))]) + [slice(None)])
gf = T.set_subtensor(gf[idx], (gf[idx] * 2))
return [gf, DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
def curfft(inp, norm=None):
"\n Performs the fast Fourier transform of a real-valued input on the GPU.\n\n The input must be a real-valued float32 variable of dimensions (m, ..., n).\n It performs FFTs of size (..., n) on m batches.\n\n The output is a GpuArray of dimensions (m, ..., n//2+1, 2). The second to\n last dimension of the output contains the n//2+1 non-trivial elements of\n the real-valued FFTs. The real and imaginary parts are stored as a pair of\n float32 arrays.\n\n Parameters\n ----------\n inp\n Array of real-valued float32 of size (m, ..., n), containing m inputs of\n size (..., n).\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n\n "
s = inp.shape[1:]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm == 'ortho'):
scaling = T.sqrt(s.prod().astype('float32'))
return (curfft_op(inp, s) / scaling)
|
def cuirfft(inp, norm=None, is_odd=False):
"\n Performs the inverse fast Fourier Transform with real-valued output on the GPU.\n\n The input is a variable of dimensions (m, ..., n//2+1, 2) with\n type float32 representing the non-trivial elements of m\n real-valued Fourier transforms of initial size (..., n). The real and\n imaginary parts are stored as a pair of float32 arrays.\n\n The output is a real-valued float32 variable of dimensions (m, ..., n)\n giving the m inverse FFTs.\n\n Parameters\n ----------\n inp\n Array of float32 of size (m, ..., n//2+1, 2), containing m inputs\n with n//2+1 non-trivial elements on the last dimension and real\n and imaginary parts stored as separate arrays.\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n is_odd : {True, False}\n Set to True to get a real inverse transform output with an odd last dimension\n of length (N-1)*2 + 1 for an input last dimension of length N.\n\n "
if (is_odd not in (True, False)):
raise ValueError(('Invalid value %s for id_odd, must be True or False' % is_odd))
s = inp.shape[1:(- 1)]
if is_odd:
s = T.set_subtensor(s[(- 1)], (((s[(- 1)] - 1) * 2) + 1))
else:
s = T.set_subtensor(s[(- 1)], ((s[(- 1)] - 1) * 2))
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm is None):
scaling = s.prod().astype('float32')
elif (cond_norm == 'ortho'):
scaling = T.sqrt(s.prod().astype('float32'))
return (cuirfft_op(inp, s) / scaling)
|
def _unitary(norm):
if (norm not in (None, 'ortho', 'no_norm')):
raise ValueError(("Invalid value %s for norm, must be None, 'ortho' or 'no norm'" % norm))
return norm
|
def tensor5(name=None, dtype=None):
if (dtype is None):
dtype = theano.config.floatX
type = T.TensorType(dtype, ((False,) * 5))
return type(name)
|
def prep_input(im, acc=4):
'Undersample the batch, then reformat them into what the network accepts.\n\n Parameters\n ----------\n gauss_ivar: float - controls the undersampling rate.\n higher the value, more undersampling\n '
mask = cs.cartesian_mask(im.shape, acc, sample_n=8)
(im_und, k_und) = cs.undersample(im, mask, centred=False, norm='ortho')
im_gnd_l = to_lasagne_format(im)
im_und_l = to_lasagne_format(im_und)
k_und_l = to_lasagne_format(k_und)
mask_l = to_lasagne_format(mask, mask=True)
return (im_und_l, k_und_l, mask_l, im_gnd_l)
|
def iterate_minibatch(data, batch_size, shuffle=True):
n = len(data)
if shuffle:
data = np.random.permutation(data)
for i in xrange(0, n, batch_size):
(yield data[i:(i + batch_size)])
|
def create_dummy_data():
'\n Creates dummy dataset from one knee subject for demo.\n In practice, one should take much bigger dataset,\n as well as train & test should have similar distribution.\n\n Source: http://mridata.org/\n '
data = loadmat(join(project_root, './data/lustig_knee_p2.mat'))['xn']
(nx, ny, nz, nc) = data.shape
train = np.transpose(data, (3, 0, 1, 2)).reshape(((- 1), ny, nz))
validate = np.transpose(data, (3, 1, 0, 2)).reshape(((- 1), nx, nz))
test = np.transpose(data, (3, 2, 0, 1)).reshape(((- 1), nx, ny))
return (train, validate, test)
|
def compile_fn(network, net_config, args):
'\n Create Training function and validation function\n '
base_lr = float(args.lr[0])
l2 = float(args.l2[0])
input_var = net_config['input'].input_var
mask_var = net_config['mask'].input_var
kspace_var = net_config['kspace_input'].input_var
target_var = T.tensor4('targets')
pred = lasagne.layers.get_output(network)
loss_sq = (lasagne.objectives.squared_error(target_var, pred).mean() * 2)
if l2:
l2_penalty = lasagne.regularization.regularize_network_params(network, lasagne.regularization.l2)
loss = (loss_sq + (l2_penalty * l2))
update_rule = lasagne.updates.adam
params = lasagne.layers.get_all_params(network, trainable=True)
updates = update_rule(loss, params, learning_rate=base_lr)
print(' Compiling ... ')
t_start = time.time()
train_fn = theano.function([input_var, mask_var, kspace_var, target_var], [loss], updates=updates, on_unused_input='ignore')
val_fn = theano.function([input_var, mask_var, kspace_var, target_var], [loss, pred], on_unused_input='ignore')
t_end = time.time()
print((' ... Done, took %.4f s' % (t_end - t_start)))
return (train_fn, val_fn)
|
def mse(x, y):
return np.mean((np.abs((x - y)) ** 2))
|
def psnr(x, y):
'\n Measures the PSNR of recon w.r.t x.\n Image must be of either integer (0, 256) or float value (0,1)\n :param x: [m,n]\n :param y: [m,n]\n :return:\n '
assert (x.shape == y.shape)
assert ((x.dtype == y.dtype) or (np.issubdtype(x.dtype, np.float) and np.issubdtype(y.dtype, np.float)))
if (x.dtype == np.uint8):
max_intensity = 256
else:
max_intensity = 1
mse = (np.sum(((x - y) ** 2)).astype(float) / x.size)
return ((20 * np.log10(max_intensity)) - (10 * np.log10(mse)))
|
def complex_psnr(x, y, peak='normalized'):
"\n x: reference image\n y: reconstructed image\n peak: normalised or max\n\n Notice that ``abs'' squares\n Be careful with the order, since peak intensity is taken from the reference\n image (taking from reconstruction yields a different value).\n\n "
mse = np.mean((np.abs((x - y)) ** 2))
if (peak == 'max'):
return (10 * np.log10(((np.max(np.abs(x)) ** 2) / mse)))
else:
return (10 * np.log10((1.0 / mse)))
|
def fftc(x, axis=(- 1), norm='ortho'):
' expect x as m*n matrix '
return fftshift(fft(ifftshift(x, axes=axis), axis=axis, norm=norm), axes=axis)
|
def ifftc(x, axis=(- 1), norm='ortho'):
' expect x as m*n matrix '
return fftshift(ifft(ifftshift(x, axes=axis), axis=axis, norm=norm), axes=axis)
|
def fft2c(x):
'\n Centered fft\n Note: fft2 applies fft to last 2 axes by default\n :param x: 2D onwards. e.g: if its 3d, x.shape = (n,row,col). 4d:x.shape = (n,slice,row,col)\n :return:\n '
axes = ((- 2), (- 1))
res = fftshift(fft2(ifftshift(x, axes=axes), norm='ortho'), axes=axes)
return res
|
def ifft2c(x):
'\n Centered ifft\n Note: fft2 applies fft to last 2 axes by default\n :param x: 2D onwards. e.g: if its 3d, x.shape = (n,row,col). 4d:x.shape = (n,slice,row,col)\n :return:\n '
axes = ((- 2), (- 1))
res = fftshift(ifft2(ifftshift(x, axes=axes), norm='ortho'), axes=axes)
return res
|
def fourier_matrix(rows, cols):
'\n parameters:\n rows: number or rows\n cols: number of columns\n\n return unitary (rows x cols) fourier matrix\n '
col_range = np.arange(cols)
row_range = np.arange(rows)
scale = (1 / np.sqrt(cols))
coeffs = np.outer(row_range, col_range)
fourier_matrix = (np.exp((coeffs * ((((- 2.0) * np.pi) * 1j) / cols))) * scale)
return fourier_matrix
|
def inverse_fourier_matrix(rows, cols):
return np.array(np.matrix(fourier_matrix(rows, cols)).getH())
|
def flip(m, axis):
'\n ==== > Only in numpy 1.12 < =====\n\n Reverse the order of elements in an array along the given axis.\n The shape of the array is preserved, but the elements are reordered.\n .. versionadded:: 1.12.0\n Parameters\n ----------\n m : array_like\n Input array.\n axis : integer\n Axis in array, which entries are reversed.\n Returns\n -------\n out : array_like\n A view of `m` with the entries of axis reversed. Since a view is\n returned, this operation is done in constant time.\n See Also\n --------\n flipud : Flip an array vertically (axis=0).\n fliplr : Flip an array horizontally (axis=1).\n Notes\n -----\n flip(m, 0) is equivalent to flipud(m).\n flip(m, 1) is equivalent to fliplr(m).\n flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.\n Examples\n --------\n >>> A = np.arange(8).reshape((2,2,2))\n >>> A\n array([[[0, 1],\n [2, 3]],\n [[4, 5],\n [6, 7]]])\n >>> flip(A, 0)\n array([[[4, 5],\n [6, 7]],\n [[0, 1],\n [2, 3]]])\n >>> flip(A, 1)\n array([[[2, 3],\n [0, 1]],\n [[6, 7],\n [4, 5]]])\n >>> A = np.random.randn(3,4,5)\n >>> np.all(flip(A,2) == A[:,:,::-1,...])\n True\n '
if (not hasattr(m, 'ndim')):
m = np.asarray(m)
indexer = ([slice(None)] * m.ndim)
try:
indexer[axis] = slice(None, None, (- 1))
except IndexError:
raise ValueError(('axis=%i is invalid for the %i-dimensional input array' % (axis, m.ndim)))
return m[tuple(indexer)]
|
def rot90_nd(x, axes=((- 2), (- 1)), k=1):
'Rotates selected axes'
def flipud(x):
return flip(x, axes[0])
def fliplr(x):
return flip(x, axes[1])
x = np.asanyarray(x)
if (x.ndim < 2):
raise ValueError('Input must >= 2-d.')
k = (k % 4)
if (k == 0):
return x
elif (k == 1):
return fliplr(x).swapaxes(*axes)
elif (k == 2):
return fliplr(flipud(x))
else:
return fliplr(x.swapaxes(*axes))
|
def data_loader(file_name='data/google.csv', seq_len=7, missing_rate=0.2):
'Load complete data and introduce missingness.\n \n Args:\n - file_name: the location of file to be loaded\n - seq_len: sequence length\n - missing_rate: rate of missing data to be introduced\n \n Returns:\n - x: data with missing values\n - m: observation indicator (m=1: observe, m=0: missing)\n - t: time information (time difference between two measurments)\n - ori_x: original data without missing values (for evaluation)\n '
data = np.loadtxt(file_name, delimiter=',', skiprows=1)
data = data[::(- 1)]
(data, norm_parameters) = MinMaxScaler(data)
(no, dim) = data.shape
no = (no - seq_len)
ori_x = list()
for i in range(no):
temp_ori_x = data[i:(i + seq_len)]
ori_x = (ori_x + [temp_ori_x])
m = list()
x = list()
t = list()
for i in range(no):
temp_m = (1 * (np.random.uniform(0, 1, [seq_len, dim]) > missing_rate))
m = (m + [temp_m])
temp_x = ori_x[i].copy()
temp_x[np.where((temp_m == 0))] = np.nan
x = (x + [temp_x])
temp_t = np.ones([seq_len, dim])
for j in range(dim):
for k in range(1, seq_len):
if (temp_m[(k, j)] == 0):
temp_t[(k, j)] = (temp_t[((k - 1), j)] + 1)
t = (t + [temp_t])
x = np.asarray(x)
m = np.asarray(m)
t = np.asarray(t)
ori_x = np.asarray(ori_x)
x = np.nan_to_num(x, 0)
return (x, m, t, ori_x)
|
def main(args):
'MRNN main function.\n \n Args:\n - file_name: dataset file name\n - seq_len: sequence length of time-series data\n - missing_rate: the rate of introduced missingness\n - h_dim: hidden state dimensions\n - batch_size: the number of samples in mini batch\n - iteration: the number of iteration\n - learning_rate: learning rate of model training\n - metric_name: imputation performance metric (mse, mae, rmse)\n \n Returns:\n - output:\n - x: original data with missing\n - ori_x: original data without missing\n - m: mask matrix\n - t: time matrix\n - imputed_x: imputed data\n - performance: imputation performance\n '
(x, m, t, ori_x) = data_loader(args.file_name, args.seq_len, args.missing_rate)
if os.path.exists('tmp/mrnn_imputation'):
shutil.rmtree('tmp/mrnn_imputation')
model_parameters = {'h_dim': args.h_dim, 'batch_size': args.batch_size, 'iteration': args.iteration, 'learning_rate': args.learning_rate}
mrnn_model = mrnn(x, model_parameters)
mrnn_model.fit(x, m, t)
imputed_x = mrnn_model.transform(x, m, t)
performance = imputation_performance(ori_x, imputed_x, m, args.metric_name)
print(((args.metric_name + ': ') + str(np.round(performance, 4))))
output = {'x': x, 'ori_x': ori_x, 'm': m, 't': t, 'imputed_x': imputed_x, 'performance': performance}
if os.path.exists('tmp/mrnn_imputation'):
shutil.rmtree('tmp/mrnn_imputation')
return output
|
def MinMaxScaler(data):
'Normalization tool: Min Max Scaler.\n \n Args:\n - data: raw input data\n \n Returns:\n - normalized_data: minmax normalized data\n - norm_parameters: normalization parameters for rescaling if needed\n '
min_val = np.min(data, axis=0)
data = (data - min_val)
max_val = (np.max(data, axis=0) + 1e-08)
normalized_data = (data / max_val)
norm_parameters = {'min_val': min_val, 'max_val': max_val}
return (normalized_data, norm_parameters)
|
def imputation_performance(ori_x, imputed_x, m, metric_name):
'Performance metrics for imputation.\n \n Args:\n - ori_x: original complete data (without missing values)\n - imputed_x: imputed data from incomplete data\n - m: observation indicator\n - metric_name: mae, mse, or rmse\n \n Returns:\n - performance: imputation performance in terms or mae, mse, or rmse\n '
assert (metric_name in ['mae', 'mse', 'rmse'])
(no, seq_len, dim) = ori_x.shape
ori_x = np.reshape(ori_x, [(no * seq_len), dim])
imputed_x = np.reshape(imputed_x, [(no * seq_len), dim])
m = np.reshape(m, [(no * seq_len), dim])
if (metric_name == 'mae'):
performance = mean_absolute_error(ori_x, imputed_x, (1 - m))
elif (metric_name == 'mse'):
performance = mean_squared_error(ori_x, imputed_x, (1 - m))
elif (metric_name == 'rmse'):
performance = np.sqrt(mean_squared_error(ori_x, imputed_x, (1 - m)))
return performance
|
def define_classification_model():
' Select and define the model you will use for the classifier. \n '
if (config['model_type'] == 'linearSVM'):
return LinearSVC(C=1)
elif (config['model_type'] == 'SVM'):
return SVC(C=1, kernel='rbf', gamma='scale')
elif (config['model_type'] == 'kNN'):
return KNeighborsClassifier(n_neighbors=1, metric='cosine')
elif (config['model_type'] == 'perceptron'):
return SGDClassifier(max_iter=600, verbose=0.5, loss='log', learning_rate='optimal')
elif (config['model_type'] == 'MLP'):
return MLPClassifier(hidden_layer_sizes=(20,), max_iter=600, verbose=10, solver='sgd', learning_rate='constant', learning_rate_init=0.001)
|
def extract_vggish_features(paths, path2gt, model):
'Extracts VGGish features and their corresponding ground_truth and identifiers (the path).\n\n VGGish features are extracted from non-overlapping audio patches of 0.96 seconds, \n where each audio patch covers 64 mel bands and 96 frames of 10 ms each.\n\n We repeat ground_truth and identifiers to fit the number of extracted VGGish features.\n '
first_audio = True
for p in paths:
if first_audio:
input_data = vggish_input.wavfile_to_examples((config['audio_folder'] + p))
ground_truth = np.repeat(path2gt[p], input_data.shape[0], axis=0)
identifiers = np.repeat(p, input_data.shape[0], axis=0)
first_audio = False
else:
tmp_in = vggish_input.wavfile_to_examples((config['audio_folder'] + p))
input_data = np.concatenate((input_data, tmp_in), axis=0)
tmp_gt = np.repeat(path2gt[p], tmp_in.shape[0], axis=0)
ground_truth = np.concatenate((ground_truth, tmp_gt), axis=0)
tmp_id = np.repeat(p, tmp_in.shape[0], axis=0)
identifiers = np.concatenate((identifiers, tmp_id), axis=0)
with tf.Graph().as_default(), tf.Session() as sess:
vggish_slim.define_vggish_slim(training=False)
vggish_slim.load_vggish_slim_checkpoint(sess, 'vggish_model.ckpt')
features_tensor = sess.graph.get_tensor_by_name(vggish_params.INPUT_TENSOR_NAME)
embedding_tensor = sess.graph.get_tensor_by_name(vggish_params.OUTPUT_TENSOR_NAME)
extracted_feat = sess.run([embedding_tensor], feed_dict={features_tensor: input_data})
feature = np.squeeze(np.asarray(extracted_feat))
return [feature, ground_truth, identifiers]
|
def extract_other_features(paths, path2gt, model_type):
'Extracts MusiCNN or OpenL3 features and their corresponding ground_truth and identifiers (the path).\n\n OpenL3 features are extracted from non-overlapping audio patches of 1 second, \n where each audio patch covers 128 mel bands.\n\n MusiCNN features are extracted from non-overlapping audio patches of 1 second, \n where each audio patch covers 96 mel bands.\n\n We repeat ground_truth and identifiers to fit the number of extracted OpenL3 features.\n '
if (model_type == 'openl3'):
model = openl3.models.load_embedding_model(input_repr='mel128', content_type='music', embedding_size=512)
first_audio = True
for p in paths:
if (model_type == 'musicnn'):
(taggram, tags, extracted_features) = extractor((config['audio_folder'] + p), model='MSD_musicnn', extract_features=True, input_overlap=1)
emb = extracted_features['max_pool']
elif (model_type == 'openl3'):
(wave, sr) = wavefile_to_waveform((config['audio_folder'] + p), 'openl3')
(emb, _) = openl3.get_embedding(wave, sr, hop_size=1, model=model, verbose=False)
if first_audio:
features = emb
ground_truth = np.repeat(path2gt[p], features.shape[0], axis=0)
identifiers = np.repeat(p, features.shape[0], axis=0)
first_audio = False
else:
features = np.concatenate((features, emb), axis=0)
tmp_gt = np.repeat(path2gt[p], emb.shape[0], axis=0)
ground_truth = np.concatenate((ground_truth, tmp_gt), axis=0)
tmp_id = np.repeat(p, emb.shape[0], axis=0)
identifiers = np.concatenate((identifiers, tmp_id), axis=0)
return [features, ground_truth, identifiers]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.