code
stringlengths
17
6.64M
def sample_discretized_logistic(mean, logscale, inverse_bin_width): x = sample_logistic(mean, logscale) x = (torch.round((x * inverse_bin_width)) / inverse_bin_width) return x
def normal_cdf(value, loc, std): return (0.5 * (1 + torch.erf((((value - loc) * std.reciprocal()) / math.sqrt(2)))))
def log_discretized_normal(x, mean, logvar, inverse_bin_width): std = torch.exp((0.5 * logvar)) log_p = torch.log(((normal_cdf((x + (0.5 / inverse_bin_width)), mean, std) - normal_cdf((x - (0.5 / inverse_bin_width)), mean, std)) + 1e-07)) return log_p
def log_mixture_discretized_normal(x, mean, logvar, pi, inverse_bin_width): std = torch.exp((0.5 * logvar)) x = x.view(x.size(0), x.size(1), x.size(2), x.size(3), 1) p = (normal_cdf((x + (0.5 / inverse_bin_width)), mean, std) - normal_cdf((x - (0.5 / inverse_bin_width)), mean, std)) p = torch.sum((p * pi), dim=(- 1)) logp = torch.log((p + 1e-08)) return logp
def sample_discretized_normal(mean, logvar, inverse_bin_width): y = torch.randn_like(mean) x = ((torch.exp((0.5 * logvar)) * y) + mean) x = (torch.round((x * inverse_bin_width)) / inverse_bin_width) return x
def log_mixture_discretized_logistic(x, mean, logscale, pi, inverse_bin_width): scale = torch.exp(logscale) x = x.view(x.size(0), x.size(1), x.size(2), x.size(3), 1) p = (torch.sigmoid((((x + (0.5 / inverse_bin_width)) - mean) / scale)) - torch.sigmoid((((x - (0.5 / inverse_bin_width)) - mean) / scale))) p = torch.sum((p * pi), dim=(- 1)) logp = torch.log((p + 1e-08)) return logp
def mixture_discretized_logistic_cdf(x, mean, logscale, pi, inverse_bin_width): scale = torch.exp(logscale) x = x[(..., None)] cdfs = torch.sigmoid((((x + (0.5 / inverse_bin_width)) - mean) / scale)) cdf = torch.sum((cdfs * pi), dim=(- 1)) return cdf
def sample_mixture_discretized_logistic(mean, logs, pi, inverse_bin_width): (b, c, h, w, n_mixtures) = tuple(map(int, pi.size())) pi = pi.view((((b * c) * h) * w), n_mixtures) sampled_pi = torch.multinomial(pi, num_samples=1).view((- 1)) mean = mean.view((((b * c) * h) * w), n_mixtures) mean = mean[(torch.arange((((b * c) * h) * w)), sampled_pi)].view(b, c, h, w) logs = logs.view((((b * c) * h) * w), n_mixtures) logs = logs[(torch.arange((((b * c) * h) * w)), sampled_pi)].view(b, c, h, w) y = torch.rand_like(mean) x = ((torch.exp(logs) * torch.log((y / (1 - y)))) + mean) x = (torch.round((x * inverse_bin_width)) / inverse_bin_width) return x
def log_multinomial(logits, targets): return (- F.cross_entropy(logits, targets, reduction='none'))
def sample_multinomial(logits): (b, n_categories, c, h, w) = logits.size() logits = logits.permute(0, 2, 3, 4, 1) p = F.softmax(logits, dim=(- 1)) p = p.view((((b * c) * h) * w), n_categories) x = torch.multinomial(p, num_samples=1).view(b, c, h, w) return x
class ToTensorNoNorm(): def __call__(self, X_i): return torch.from_numpy(np.array(X_i, copy=False)).permute(2, 0, 1)
class PadToMultiple(object): def __init__(self, multiple, fill=0, padding_mode='constant'): assert isinstance(multiple, numbers.Number) assert isinstance(fill, (numbers.Number, str, tuple)) assert (padding_mode in ['constant', 'edge', 'reflect', 'symmetric']) self.multiple = multiple self.fill = fill self.padding_mode = padding_mode def __call__(self, img): '\n Args:\n img (PIL Image): Image to be padded.\n Returns:\n PIL Image: Padded image.\n ' (w, h) = img.size m = self.multiple nw = (((w // m) + int(((w % m) != 0))) * m) nh = (((h // m) + int(((h % m) != 0))) * m) padw = (nw - w) padh = (nh - h) out = vf.pad(img, (0, 0, padw, padh), self.fill, self.padding_mode) return out def __repr__(self): return (self.__class__.__name__ + '(multiple={0}, fill={1}, padding_mode={2})'.format(self.mulitple, self.fill, self.padding_mode))
class CustomTensorDataset(Dataset): 'Dataset wrapping tensors.\n\n Each sample will be retrieved by indexing tensors along the first dimension.\n\n Arguments:\n *tensors (Tensor): tensors that have the same size of the first dimension.\n ' def __init__(self, *tensors, transform=None): assert all(((tensors[0].size(0) == tensor.size(0)) for tensor in tensors)) self.tensors = tensors self.transform = transform def __getitem__(self, index): from PIL import Image (X, y) = self.tensors (X_i, y_i) = (X[index], y[index]) if self.transform: X_i = self.transform(X_i) X_i = torch.from_numpy(np.array(X_i, copy=False)) X_i = X_i.permute(2, 0, 1) return (X_i, y_i) def __len__(self): return self.tensors[0].size(0)
def load_cifar10(args, **kwargs): args.input_size = [3, 32, 32] args.input_type = 'continuous' args.dynamic_binarization = False from keras.datasets import cifar10 ((x_train, y_train), (x_test, y_test)) = cifar10.load_data() x_train = x_train.transpose(0, 3, 1, 2) x_test = x_test.transpose(0, 3, 1, 2) import math if (args.data_augmentation_level == 2): data_transform = transforms.Compose([transforms.ToPILImage(), transforms.RandomHorizontalFlip(), transforms.Pad(int(math.ceil((32 * 0.05))), padding_mode='edge'), transforms.RandomAffine(degrees=0, translate=(0.05, 0.05)), transforms.CenterCrop(32)]) elif (args.data_augmentation_level == 1): data_transform = transforms.Compose([transforms.ToPILImage(), transforms.RandomHorizontalFlip()]) else: data_transform = transforms.Compose([transforms.ToPILImage()]) x_val = x_train[(- 10000):] y_val = y_train[(- 10000):] x_train = x_train[:(- 10000)] y_train = y_train[:(- 10000)] train = CustomTensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train), transform=data_transform) train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) validation = data_utils.TensorDataset(torch.from_numpy(x_val), torch.from_numpy(y_val)) val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs) test = data_utils.TensorDataset(torch.from_numpy(x_test), torch.from_numpy(y_test)) test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs) return (train_loader, val_loader, test_loader, args)
def extract_tar(tarpath): assert tarpath.endswith('.tar') startdir = (tarpath[:(- 4)] + '/') if os.path.exists(startdir): return startdir print('Extracting', tarpath) with tarfile.open(name=tarpath) as tar: t = 0 done = False while (not done): path = join(startdir, 'images{}'.format(t)) os.makedirs(path, exist_ok=True) print(path) for i in range(50000): member = tar.next() if (member is None): done = True break while member.isdir(): member = tar.next() if (member is None): done = True break member.name = member.name.split('/')[(- 1)] tar.extract(member, path=path) t += 1 return startdir
def load_imagenet(resolution, args, **kwargs): assert ((resolution == 32) or (resolution == 64)) args.input_size = [3, resolution, resolution] trainpath = '../imagenet{res}/train_{res}x{res}.tar'.format(res=resolution) valpath = '../imagenet{res}/valid_{res}x{res}.tar'.format(res=resolution) trainpath = extract_tar(trainpath) valpath = extract_tar(valpath) data_transform = transforms.Compose([ToTensorNoNorm()]) print('Starting loading ImageNet') imagenet_data = torchvision.datasets.ImageFolder(trainpath, transform=data_transform) print('Number of data images', len(imagenet_data)) val_idcs = np.random.choice(len(imagenet_data), size=20000, replace=False) train_idcs = np.setdiff1d(np.arange(len(imagenet_data)), val_idcs) train_dataset = torch.utils.data.dataset.Subset(imagenet_data, train_idcs) val_dataset = torch.utils.data.dataset.Subset(imagenet_data, val_idcs) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, **kwargs) test_dataset = torchvision.datasets.ImageFolder(valpath, transform=data_transform) print('Number of val images:', len(test_dataset)) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, **kwargs) return (train_loader, val_loader, test_loader, args)
def load_dataset(args, **kwargs): if (args.dataset == 'cifar10'): (train_loader, val_loader, test_loader, args) = load_cifar10(args, **kwargs) elif (args.dataset == 'imagenet32'): (train_loader, val_loader, test_loader, args) = load_imagenet(32, args, **kwargs) elif (args.dataset == 'imagenet64'): (train_loader, val_loader, test_loader, args) = load_imagenet(64, args, **kwargs) else: raise Exception('Wrong name of the dataset!') return (train_loader, val_loader, test_loader, args)
def calculate_likelihood(X, model, args, S=5000, MB=500): N_test = X.size(0) X = X.view((- 1), *args.input_size) likelihood_test = [] if (S <= MB): R = 1 else: R = (S // MB) S = MB for j in range(N_test): if ((j % 100) == 0): print('Progress: {:.2f}%'.format(((j / (1.0 * N_test)) * 100))) x_single = X[j].unsqueeze(0) a = [] for r in range(0, R): x = x_single.expand(S, *x_single.size()[1:]).contiguous() (x_mean, z_mu, z_var, ldj, z0, zk) = model(x) a_tmp = calculate_loss_array(x_mean, x, z_mu, z_var, z0, zk, ldj, args) a.append((- a_tmp.cpu().data.numpy())) a = np.asarray(a) a = np.reshape(a, ((a.shape[0] * a.shape[1]), 1)) likelihood_x = logsumexp(a) likelihood_test.append((likelihood_x - np.log(len(a)))) likelihood_test = np.array(likelihood_test) nll = (- np.mean(likelihood_test)) if (args.input_type == 'multinomial'): bpd = (nll / (np.prod(args.input_size) * np.log(2.0))) elif (args.input_type == 'binary'): bpd = 0.0 else: raise ValueError('invalid input type!') return (nll, bpd)
def plot_training_curve(train_loss, validation_loss, fname='training_curve.pdf', labels=None): '\n Plots train_loss and validation loss as a function of optimization iteration\n :param train_loss: np.array of train_loss (1D or 2D)\n :param validation_loss: np.array of validation loss (1D or 2D)\n :param fname: output file name\n :param labels: if train_loss and validation loss are 2D, then labels indicate which variable is varied\n accross training curves.\n :return: None\n ' plt.close() matplotlib.rcParams.update({'font.size': 14}) matplotlib.rcParams['mathtext.fontset'] = 'stix' matplotlib.rcParams['font.family'] = 'STIXGeneral' if (len(train_loss.shape) == 1): (fig, ax) = plt.subplots(nrows=1, ncols=1) figsize = (6, 4) if (train_loss.shape[0] == validation_loss.shape[0]): x = np.arange(train_loss.shape[0]) ax.plot(x, train_loss, '-', lw=2.0, color='black', label='train') ax.plot(x, validation_loss, '-', lw=2.0, color='blue', label='val') elif ((train_loss.shape[0] % validation_loss.shape[0]) == 0): x = np.arange(train_loss.shape[0]) ax.plot(x, train_loss, '-', lw=2.0, color='black', label='train') x = np.arange(validation_loss.shape[0]) x = (((x + 1) * train_loss.shape[0]) / validation_loss.shape[0]) ax.plot(x, validation_loss, '-', lw=2.0, color='blue', label='val') else: raise ValueError('Length of train_loss and validation_loss must be equal or divisible') miny = (np.minimum(validation_loss.min(), train_loss.min()) - 20.0) maxy = (np.maximum(validation_loss.max(), train_loss.max()) + 30.0) ax.set_ylim([miny, maxy]) elif (len(train_loss.shape) == 2): cmap = plt.cm.brg cNorm = matplotlib.colors.Normalize(vmin=0, vmax=train_loss.shape[0]) scalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=cmap) (fig, ax) = plt.subplots(nrows=1, ncols=1) figsize = (6, 4) if (labels is None): labels = [('%d' % i) for i in range(train_loss.shape[0])] if (train_loss.shape[1] == validation_loss.shape[1]): for i in range(train_loss.shape[0]): color_val = scalarMap.to_rgba(i) x = np.arange(train_loss.shape[0]) ax.plot(x, train_loss[i], '-', lw=2.0, color=color_val, label=labels[i]) ax.plot(x, validation_loss[i], '--', lw=2.0, color=color_val) elif ((train_loss.shape[1] % validation_loss.shape[1]) == 0): for i in range(train_loss.shape[0]): color_val = scalarMap.to_rgba(i) x = np.arange(train_loss.shape[1]) ax.plot(x, train_loss[i], '-', lw=2.0, color=color_val, label=labels[i]) x = np.arange(validation_loss.shape[1]) x = (((x + 1) * train_loss.shape[1]) / validation_loss.shape[1]) ax.plot(x, validation_loss[i], '-', lw=2.0, color=color_val) miny = (np.minimum(validation_loss.min(), train_loss.min()) - 20.0) maxy = (np.maximum(validation_loss.max(), train_loss.max()) + 30.0) ax.set_ylim([miny, maxy]) else: raise ValueError('train_loss and validation_loss must be 1D or 2D arrays') ax.set_xlabel('iteration') ax.set_ylabel('loss') plt.title('Training and validation loss') fig.set_size_inches(figsize) fig.subplots_adjust(hspace=0.1) plt.savefig(fname, bbox_inches='tight') plt.close()
class LiviaSoftmax(HyperDenseNetConvLayer): ' Final Classification layer with Softmax ' def __init__(self, rng, layerID, inputSample_Train, inputSample_Test, inputToLayerShapeTrain, inputToLayerShapeTest, filterShape, applyBatchNorm, applyBatchNormNumberEpochs, maxPoolingParameters, weights_initialization, weights, activationType=0, dropoutRate=0.0, softmaxTemperature=1.0): HyperDenseNetConvLayer.__init__(self, rng, layerID, inputSample_Train, inputSample_Test, inputToLayerShapeTrain, inputToLayerShapeTest, filterShape, applyBatchNorm, applyBatchNormNumberEpochs, maxPoolingParameters, weights_initialization, weights, activationType, dropoutRate) self._numberOfOutputClasses = None self._bClassLayer = None self._softmaxTemperature = None self._numberOfOutputClasses = filterShape[0] self._softmaxTemperature = softmaxTemperature outputOfConvTrain = self.outputTrain outputOfConvTest = self.outputTest outputOfConvShapeTrain = self.outputShapeTrain outputOfConvShapeTest = self.outputShapeTest b_values = np.zeros(self._numberOfFeatureMaps, dtype='float32') self._bClassLayer = theano.shared(value=b_values, borrow=True) inputToSoftmaxTrain = applyBiasToFeatureMaps(self._bClassLayer, outputOfConvTrain) inputToSoftmaxTest = applyBiasToFeatureMaps(self._bClassLayer, outputOfConvTest) self.params = (self.params + [self._bClassLayer]) (self.p_y_given_x_train, self.y_pred_train) = applySoftMax(inputToSoftmaxTrain, outputOfConvShapeTrain, self._numberOfOutputClasses, softmaxTemperature) (self.p_y_given_x_test, self.y_pred_test) = applySoftMax(inputToSoftmaxTest, outputOfConvShapeTest, self._numberOfOutputClasses, softmaxTemperature) def negativeLogLikelihoodWeighted(self, y, weightPerClass): e1 = np.finfo(np.float32).tiny addTinyProbMatrix = (T.lt(self.p_y_given_x_train, (4 * e1)) * e1) weights = weightPerClass.dimshuffle('x', 0, 'x', 'x', 'x') log_p_y_given_x_train = T.log((self.p_y_given_x_train + addTinyProbMatrix)) weighted_log_probs = (log_p_y_given_x_train * weights) wShape = weighted_log_probs.shape idx0 = T.arange(wShape[0]).dimshuffle(0, 'x', 'x', 'x') idx2 = T.arange(wShape[2]).dimshuffle('x', 0, 'x', 'x') idx3 = T.arange(wShape[3]).dimshuffle('x', 'x', 0, 'x') idx4 = T.arange(wShape[4]).dimshuffle('x', 'x', 'x', 0) return (- T.mean(weighted_log_probs[(idx0, y, idx2, idx3, idx4)])) def predictionProbabilities(self): return self.p_y_given_x_test
def computeDice(autoSeg, groundTruth): ' Returns\n -------\n DiceArray : floats array\n \n Dice coefficient as a float on range [0,1].\n Maximum similarity = 1\n No similarity = 0 ' n_classes = int((np.max(groundTruth) + 1)) DiceArray = [] for c_i in xrange(1, n_classes): idx_Auto = np.where((autoSeg.flatten() == c_i))[0] idx_GT = np.where((groundTruth.flatten() == c_i))[0] autoArray = np.zeros(autoSeg.size, dtype=np.bool) autoArray[idx_Auto] = 1 gtArray = np.zeros(autoSeg.size, dtype=np.bool) gtArray[idx_GT] = 1 dsc = dice(autoArray, gtArray) DiceArray.append(dsc) return DiceArray
def dice(im1, im2): '\n Computes the Dice coefficient\n ----------\n im1 : boolean array\n im2 : boolean array\n \n If they are not boolean, they will be converted.\n \n -------\n It returns the Dice coefficient as a float on the range [0,1].\n 1: Perfect overlapping \n 0: Not overlapping \n ' im1 = np.asarray(im1).astype(np.bool) im2 = np.asarray(im2).astype(np.bool) if (im1.size != im2.size): raise ValueError('Size mismatch between input arrays!!!') im_sum = (im1.sum() + im2.sum()) if (im_sum == 0): return 1.0 intersection = np.logical_and(im1, im2) return ((2.0 * intersection.sum()) / im_sum)
def applyActivationFunction_Sigmoid(inputData): ' inputData is a tensor5D with shape:\n (batchSize,\n Number of feature Maps,\n convolvedImageShape[0],\n convolvedImageShape[1],\n convolvedImageShape[2]) ' outputData = T.nnet.sigmoid(inputData) return outputData
def applyActivationFunction_Tanh(inputData): 'inputData is a tensor5D with shape:\n # (batchSize,\n # Number of feature Maps,\n # convolvedImageShape[0],\n # convolvedImageShape[1],\n # convolvedImageShape[2])' outputData = T.tanh(inputData) return outputData
def applyActivationFunction_ReLU_v1(inputData): ' inputData is a tensor5D with shape:\n # (batchSize,\n # Number of feature Maps,\n # convolvedImageShape[0],\n # convolvedImageShape[1],\n # convolvedImageShape[2]) ' return T.maximum(inputData, 0)
def applyActivationFunction_ReLU_v2(inputData): return T.switch((inputData < 0.0), 0.0, inputData)
def applyActivationFunction_ReLU_v3(inputData): return ((inputData + abs(inputData)) / 2.0)
def applyActivationFunction_ReLU_v4(inputData): return (((T.sgn(inputData) + 1) * inputData) * 0.5)
def applyActivationFunction_LeakyReLU(inputData, leakiness): 'leakiness : float\n Slope for negative input, usually between 0 and 1.\n A leakiness of 0 will lead to the standard rectifier,\n a leakiness of 1 will lead to a linear activation function,\n and any value in between will give a leaky rectifier.\n \n [1] Maas et al. (2013):\n Rectifier Nonlinearities Improve Neural Network Acoustic Models,\n http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf\n \n \n - The input is a tensor of shape (batchSize, FeatMaps, xDim, yDim, zDim) ' pos = (0.5 * (1 + leakiness)) neg = (0.5 * (1 - leakiness)) output = ((pos * inputData) + (neg * abs(inputData))) return output
def applyActivationFunction_PReLU(inputData, PreluActivations): 'Parametric Rectified Linear Unit.\n It follows:\n `f(x) = alpha * x for x < 0`,\n `f(x) = x for x >= 0`,\n where `alpha` is a learned array with the same shape as x.\n \n - The input is a tensor of shape (batchSize, FeatMaps, xDim, yDim, zDim) ' preluActivationsAsRow = PreluActivations.dimshuffle('x', 0, 'x', 'x', 'x') pos = T.maximum(0, inputData) neg = ((preluActivationsAsRow * (inputData - abs(inputData))) * 0.5) output = (pos + neg) return output
def applyActivationFunction_PReLU_v2(inputData, PreluActivations): ' inputData is a tensor5D with shape:\n (batchSize,\n Number of feature Maps,\n convolvedImageShape[0],\n convolvedImageShape[1],\n convolvedImageShape[2]) ' preluActivationsAsRow = PreluActivations.dimshuffle('x', 0, 'x', 'x', 'x') pos = ((inputData + abs(inputData)) / 2.0) neg = (preluActivationsAsRow * ((inputData - abs(inputData)) / 2.0)) output = (pos + neg) return output
def applyActivationFunction_PReLU_v3(inputData, PreluActivations): ' inputData is a tensor5D with shape:\n (batchSize,\n Number of feature Maps,\n convolvedImageShape[0],\n convolvedImageShape[1],\n convolvedImageShape[2]) ' preluActivationsAsRow = PreluActivations.dimshuffle('x', 0, 'x', 'x', 'x') pos = (0.5 * (1 + preluActivationsAsRow)) neg = (0.5 * (1 - preluActivationsAsRow)) output = ((pos * inputData) + (neg * abs(inputData))) return output
def apply_Dropout(rng, dropoutRate, inputShape, inputData, task): ' Task:\n # 0: Training\n # 1: Validation\n # 2: Testing ' outputData = inputData if (dropoutRate > 0.001): activationRate = (1 - dropoutRate) srng = T.shared_randomstreams.RandomStreams(rng.randint(999999)) dropoutMask = srng.binomial(n=1, size=inputShape, p=activationRate, dtype=theano.config.floatX) if (task == 0): outputData = (inputData * dropoutMask) else: outputData = (inputData * activationRate) return outputData
def convolveWithKernel(W, filter_shape, inputSample, inputSampleShape): wReshapedForConv = W.dimshuffle(0, 4, 1, 2, 3) wReshapedForConvShape = (filter_shape[0], filter_shape[4], filter_shape[1], filter_shape[2], filter_shape[3]) inputSampleReshaped = inputSample.dimshuffle(0, 4, 1, 2, 3) inputSampleReshapedShape = (inputSampleShape[0], inputSampleShape[4], inputSampleShape[1], inputSampleShape[2], inputSampleShape[3]) convolved_Output = T.nnet.conv3d2d.conv3d(inputSampleReshaped, wReshapedForConv, inputSampleReshapedShape, wReshapedForConvShape, border_mode='valid') output = convolved_Output.dimshuffle(0, 2, 3, 4, 1) outputShape = [inputSampleShape[0], filter_shape[0], ((inputSampleShape[2] - filter_shape[2]) + 1), ((inputSampleShape[3] - filter_shape[3]) + 1), ((inputSampleShape[4] - filter_shape[4]) + 1)] return (output, outputShape)
def applyBn(numberEpochApplyRolling, inputTrain, inputTest, inputShapeTrain): numberOfChannels = inputShapeTrain[1] gBn_values = np.ones(numberOfChannels, dtype='float32') gBn = theano.shared(value=gBn_values, borrow=True) bBn_values = np.zeros(numberOfChannels, dtype='float32') bBn = theano.shared(value=bBn_values, borrow=True) muArray = theano.shared(np.zeros((numberEpochApplyRolling, numberOfChannels), dtype='float32'), borrow=True) varArray = theano.shared(np.ones((numberEpochApplyRolling, numberOfChannels), dtype='float32'), borrow=True) sharedNewMu_B = theano.shared(np.zeros(numberOfChannels, dtype='float32'), borrow=True) sharedNewVar_B = theano.shared(np.ones(numberOfChannels, dtype='float32'), borrow=True) e1 = np.finfo(np.float32).tiny mu_B = inputTrain.mean(axis=[0, 2, 3, 4]) mu_B = T.unbroadcast(mu_B, 0) var_B = inputTrain.var(axis=[0, 2, 3, 4]) var_B = T.unbroadcast(var_B, 0) var_B_plusE = (var_B + e1) mu_RollingAverage = muArray.mean(axis=0) effectiveSize = (((inputShapeTrain[0] * inputShapeTrain[2]) * inputShapeTrain[3]) * inputShapeTrain[4]) var_RollingAverage = ((effectiveSize / (effectiveSize - 1)) * varArray.mean(axis=0)) var_RollingAverage_plusE = (var_RollingAverage + e1) normXi_train = ((inputTrain - mu_B.dimshuffle('x', 0, 'x', 'x', 'x')) / T.sqrt(var_B_plusE.dimshuffle('x', 0, 'x', 'x', 'x'))) normYi_train = ((gBn.dimshuffle('x', 0, 'x', 'x', 'x') * normXi_train) + bBn.dimshuffle('x', 0, 'x', 'x', 'x')) normXi_test = ((inputTest - mu_RollingAverage.dimshuffle('x', 0, 'x', 'x', 'x')) / T.sqrt(var_RollingAverage_plusE.dimshuffle('x', 0, 'x', 'x', 'x'))) normYi_test = ((gBn.dimshuffle('x', 0, 'x', 'x', 'x') * normXi_test) + bBn.dimshuffle('x', 0, 'x', 'x', 'x')) return (normYi_train, normYi_test, gBn, bBn, muArray, varArray, sharedNewMu_B, sharedNewVar_B, mu_B, var_B)
def applySoftMax(inputSample, inputSampleShape, numClasses, softmaxTemperature): inputSampleReshaped = inputSample.dimshuffle(0, 2, 3, 4, 1) inputSampleFlattened = inputSampleReshaped.flatten(1) numClassifiedVoxels = ((inputSampleShape[2] * inputSampleShape[3]) * inputSampleShape[4]) firstDimOfinputSample2d = (inputSampleShape[0] * numClassifiedVoxels) inputSample2d = inputSampleFlattened.reshape((firstDimOfinputSample2d, numClasses)) p_y_given_x_2d = T.nnet.softmax((inputSample2d / softmaxTemperature)) p_y_given_x_class = p_y_given_x_2d.reshape((inputSampleShape[0], inputSampleShape[2], inputSampleShape[3], inputSampleShape[4], inputSampleShape[1])) p_y_given_x = p_y_given_x_class.dimshuffle(0, 4, 1, 2, 3) y_pred = T.argmax(p_y_given_x, axis=1) return (p_y_given_x, y_pred)
def applyBiasToFeatureMaps(bias, featMaps): featMaps = (featMaps + bias.dimshuffle('x', 0, 'x', 'x', 'x')) return featMaps
class parserConfigIni(object): def __init__(_self): _self.networkName = [] def readConfigIniFile(_self, fileName, task): def createModel(): print(' --- Creating model (Reading parameters...)') _self.readModelCreation_params(fileName) def trainModel(): print(' --- Training model (Reading parameters...)') _self.readModelTraining_params(fileName) def testModel(): print(' --- Testing model (Reading parameters...)') _self.readModelTesting_params(fileName) optionsParser = {0: createModel, 1: trainModel, 2: testModel} optionsParser[task]() def readModelCreation_params(_self, fileName): ConfigIni = ConfigParser.ConfigParser() ConfigIni.read(fileName) _self.networkName = ConfigIni.get('General', 'networkName') _self.folderName = ConfigIni.get('General', 'folderName') _self.n_classes = json.loads(ConfigIni.get('CNN_Architecture', 'n_classes')) _self.layers = json.loads(ConfigIni.get('CNN_Architecture', 'numkernelsperlayer')) _self.kernels = json.loads(ConfigIni.get('CNN_Architecture', 'kernelshapes')) _self.intermediate_ConnectedLayers = json.loads(ConfigIni.get('CNN_Architecture', 'intermediateConnectedLayers')) _self.pooling_scales = json.loads(ConfigIni.get('CNN_Architecture', 'pooling_scales')) _self.dropout_Rates = json.loads(ConfigIni.get('CNN_Architecture', 'dropout_Rates')) _self.activationType = json.loads(ConfigIni.get('CNN_Architecture', 'activationType')) _self.weight_Initialization_CNN = json.loads(ConfigIni.get('CNN_Architecture', 'weight_Initialization_CNN')) _self.weight_Initialization_FCN = json.loads(ConfigIni.get('CNN_Architecture', 'weight_Initialization_FCN')) _self.weightsFolderName = ConfigIni.get('CNN_Architecture', 'weights folderName') _self.weightsTrainedIdx = json.loads(ConfigIni.get('CNN_Architecture', 'weights trained indexes')) _self.batch_size = json.loads(ConfigIni.get('Training Parameters', 'batch_size')) _self.sampleSize_Train = json.loads(ConfigIni.get('Training Parameters', 'sampleSize_Train')) _self.sampleSize_Test = json.loads(ConfigIni.get('Training Parameters', 'sampleSize_Test')) _self.costFunction = json.loads(ConfigIni.get('Training Parameters', 'costFunction')) _self.L1_reg_C = json.loads(ConfigIni.get('Training Parameters', 'L1 Regularization Constant')) _self.L2_reg_C = json.loads(ConfigIni.get('Training Parameters', 'L2 Regularization Constant')) _self.learning_rate = json.loads(ConfigIni.get('Training Parameters', 'Leraning Rate')) _self.momentumType = json.loads(ConfigIni.get('Training Parameters', 'Momentum Type')) _self.momentumValue = json.loads(ConfigIni.get('Training Parameters', 'Momentum Value')) _self.momentumNormalized = json.loads(ConfigIni.get('Training Parameters', 'momentumNormalized')) _self.optimizerType = json.loads(ConfigIni.get('Training Parameters', 'Optimizer Type')) _self.rho_RMSProp = json.loads(ConfigIni.get('Training Parameters', 'Rho RMSProp')) _self.epsilon_RMSProp = json.loads(ConfigIni.get('Training Parameters', 'Epsilon RMSProp')) applyBatchNorm = json.loads(ConfigIni.get('Training Parameters', 'applyBatchNormalization')) if (applyBatchNorm == 1): _self.applyBatchNorm = True else: _self.applyBatchNorm = False _self.BatchNormEpochs = json.loads(ConfigIni.get('Training Parameters', 'BatchNormEpochs')) _self.tempSoftMax = json.loads(ConfigIni.get('Training Parameters', 'SoftMax temperature')) def readModelTraining_params(_self, fileName): ConfigIni = ConfigParser.ConfigParser() ConfigIni.read(fileName) _self.imagesFolder = ConfigIni.get('Training Images', 'imagesFolder') _self.imagesFolder_Bottom = ConfigIni.get('Training Images', 'imagesFolder_Bottom') _self.GroundTruthFolder = ConfigIni.get('Training Images', 'GroundTruthFolder') _self.ROIFolder = ConfigIni.get('Training Images', 'ROIFolder') _self.indexesForTraining = json.loads(ConfigIni.get('Training Images', 'indexesForTraining')) _self.indexesForValidation = json.loads(ConfigIni.get('Training Images', 'indexesForValidation')) _self.imageTypesTrain = json.loads(ConfigIni.get('Training Images', 'imageTypes')) _self.numberOfEpochs = json.loads(ConfigIni.get('Training Parameters', 'number of Epochs')) _self.numberOfSubEpochs = json.loads(ConfigIni.get('Training Parameters', 'number of SubEpochs')) _self.numberOfSamplesSupEpoch = json.loads(ConfigIni.get('Training Parameters', 'number of samples at each SubEpoch Train')) _self.firstEpochChangeLR = json.loads(ConfigIni.get('Training Parameters', 'First Epoch Change LR')) _self.frequencyChangeLR = json.loads(ConfigIni.get('Training Parameters', 'Frequency Change LR')) _self.applyPadding = json.loads(ConfigIni.get('Training Parameters', 'applyPadding')) def readModelTesting_params(_self, fileName): ConfigIni = ConfigParser.ConfigParser() ConfigIni.read(fileName) _self.imagesFolder = ConfigIni.get('Segmentation Images', 'imagesFolder') _self.imagesFolder_Bottom = ConfigIni.get('Segmentation Images', 'imagesFolder_Bottom') _self.GroundTruthFolder = ConfigIni.get('Segmentation Images', 'GroundTruthFolder') _self.ROIFolder = ConfigIni.get('Segmentation Images', 'ROIFolder') _self.imageTypes = json.loads(ConfigIni.get('Segmentation Images', 'imageTypes')) _self.indexesToSegment = json.loads(ConfigIni.get('Segmentation Images', 'indexesToSegment')) _self.applyPadding = json.loads(ConfigIni.get('Segmentation Images', 'applyPadding'))
def printUsage(error_type): if (error_type == 1): print(' ** ERROR!!: Few parameters used.') else: print(' ** ERROR!!: Asked to start with an already created network but its name is not specified.') print(' ******** USAGE ******** ') print(' --- argv 1: Name of the configIni file.') print(' --- argv 2: Network model name')
def networkSegmentation(argv): if (len(argv) < 2): printUsage(1) sys.exit() configIniName = argv[0] networkModelName = argv[1] startTesting(networkModelName, configIniName) print(' ***************** SEGMENTATION DONE!!! ***************** ')
def pytest_ignore_collect(path, config): 'Ignore paths that would otherwise be collceted by the doctest\n plugin and lead to ImportError due to missing dependencies.\n ' return any((path.fnmatch(ignore) for ignore in ignore_test_paths))
class Initializer(object): 'Base class for parameter tensor initializers.\n\n The :class:`Initializer` class represents a weight initializer used\n to initialize weight parameters in a neural network layer. It should be\n subclassed when implementing new types of weight initializers.\n\n ' def __call__(self, shape): '\n Makes :class:`Initializer` instances callable like a function, invoking\n their :meth:`sample()` method.\n ' return self.sample(shape) def sample(self, shape): '\n Sample should return a theano.tensor of size shape and data type\n theano.config.floatX.\n\n Parameters\n -----------\n shape : tuple or int\n Integer or tuple specifying the size of the returned\n matrix.\n returns : theano.tensor\n Matrix of size shape and dtype theano.config.floatX.\n ' raise NotImplementedError()
class Normal(Initializer): 'Sample initial weights from the Gaussian distribution.\n\n Initial weight parameters are sampled from N(mean, std).\n\n Parameters\n ----------\n std : float\n Std of initial parameters.\n mean : float\n Mean of initial parameters.\n ' def __init__(self, std=0.01, mean=0.0): self.std = std self.mean = mean def sample(self, shape): return floatX(get_rng().normal(self.mean, self.std, size=shape))
class Uniform(Initializer): 'Sample initial weights from the uniform distribution.\n\n Parameters are sampled from U(a, b).\n\n Parameters\n ----------\n range : float or tuple\n When std is None then range determines a, b. If range is a float the\n weights are sampled from U(-range, range). If range is a tuple the\n weights are sampled from U(range[0], range[1]).\n std : float or None\n If std is a float then the weights are sampled from\n U(mean - np.sqrt(3) * std, mean + np.sqrt(3) * std).\n mean : float\n see std for description.\n ' def __init__(self, range=0.01, std=None, mean=0.0): if (std is not None): a = (mean - (np.sqrt(3) * std)) b = (mean + (np.sqrt(3) * std)) else: try: (a, b) = range except TypeError: (a, b) = ((- range), range) self.range = (a, b) def sample(self, shape): return floatX(get_rng().uniform(low=self.range[0], high=self.range[1], size=shape))
class Glorot(Initializer): "Glorot weight initialization.\n\n This is also known as Xavier initialization [1]_.\n\n Parameters\n ----------\n initializer : lasagne.init.Initializer\n Initializer used to sample the weights, must accept `std` in its\n constructor to sample from a distribution with a given standard\n deviation.\n gain : float or 'relu'\n Scaling factor for the weights. Set this to ``1.0`` for linear and\n sigmoid units, to 'relu' or ``sqrt(2)`` for rectified linear units, and\n to ``sqrt(2/(1+alpha**2))`` for leaky rectified linear units with\n leakiness ``alpha``. Other transfer functions may need different\n factors.\n c01b : bool\n For a :class:`lasagne.layers.cuda_convnet.Conv2DCCLayer` constructed\n with ``dimshuffle=False``, `c01b` must be set to ``True`` to compute\n the correct fan-in and fan-out.\n\n References\n ----------\n .. [1] Xavier Glorot and Yoshua Bengio (2010):\n Understanding the difficulty of training deep feedforward neural\n networks. International conference on artificial intelligence and\n statistics.\n\n Notes\n -----\n For a :class:`DenseLayer <lasagne.layers.DenseLayer>`, if ``gain='relu'``\n and ``initializer=Uniform``, the weights are initialized as\n\n .. math::\n a &= \\sqrt{\\frac{12}{fan_{in}+fan_{out}}}\\\\\n W &\\sim U[-a, a]\n\n If ``gain=1`` and ``initializer=Normal``, the weights are initialized as\n\n .. math::\n \\sigma &= \\sqrt{\\frac{2}{fan_{in}+fan_{out}}}\\\\\n W &\\sim N(0, \\sigma)\n\n See Also\n --------\n GlorotNormal : Shortcut with Gaussian initializer.\n GlorotUniform : Shortcut with uniform initializer.\n " def __init__(self, initializer, gain=1.0, c01b=False): if (gain == 'relu'): gain = np.sqrt(2) self.initializer = initializer self.gain = gain self.c01b = c01b def sample(self, shape): if self.c01b: if (len(shape) != 4): raise RuntimeError('If c01b is True, only shapes of length 4 are accepted') (n1, n2) = (shape[0], shape[3]) receptive_field_size = (shape[1] * shape[2]) else: if (len(shape) < 2): raise RuntimeError('This initializer only works with shapes of length >= 2') (n1, n2) = shape[:2] receptive_field_size = np.prod(shape[2:]) std = (self.gain * np.sqrt((2.0 / ((n1 + n2) * receptive_field_size)))) return self.initializer(std=std).sample(shape)
class GlorotNormal(Glorot): 'Glorot with weights sampled from the Normal distribution.\n\n See :class:`Glorot` for a description of the parameters.\n ' def __init__(self, gain=1.0, c01b=False): super(GlorotNormal, self).__init__(Normal, gain, c01b)
class GlorotUniform(Glorot): 'Glorot with weights sampled from the Uniform distribution.\n\n See :class:`Glorot` for a description of the parameters.\n ' def __init__(self, gain=1.0, c01b=False): super(GlorotUniform, self).__init__(Uniform, gain, c01b)
class He(Initializer): "He weight initialization.\n\n Weights are initialized with a standard deviation of\n :math:`\\sigma = gain \\sqrt{\\frac{1}{fan_{in}}}` [1]_.\n\n Parameters\n ----------\n initializer : lasagne.init.Initializer\n Initializer used to sample the weights, must accept `std` in its\n constructor to sample from a distribution with a given standard\n deviation.\n gain : float or 'relu'\n Scaling factor for the weights. Set this to ``1.0`` for linear and\n sigmoid units, to 'relu' or ``sqrt(2)`` for rectified linear units, and\n to ``sqrt(2/(1+alpha**2))`` for leaky rectified linear units with\n leakiness ``alpha``. Other transfer functions may need different\n factors.\n c01b : bool\n For a :class:`lasagne.layers.cuda_convnet.Conv2DCCLayer` constructed\n with ``dimshuffle=False``, `c01b` must be set to ``True`` to compute\n the correct fan-in and fan-out.\n\n References\n ----------\n .. [1] Kaiming He et al. (2015):\n Delving deep into rectifiers: Surpassing human-level performance on\n imagenet classification. arXiv preprint arXiv:1502.01852.\n\n See Also\n ----------\n HeNormal : Shortcut with Gaussian initializer.\n HeUniform : Shortcut with uniform initializer.\n " def __init__(self, initializer, gain=1.0, c01b=False): if (gain == 'relu'): gain = np.sqrt(2) self.initializer = initializer self.gain = gain self.c01b = c01b def sample(self, shape): if self.c01b: if (len(shape) != 4): raise RuntimeError('If c01b is True, only shapes of length 4 are accepted') fan_in = np.prod(shape[:3]) elif (len(shape) == 2): fan_in = shape[0] elif (len(shape) > 2): fan_in = np.prod(shape[1:]) else: raise RuntimeError('This initializer only works with shapes of length >= 2') std = (self.gain * np.sqrt((1.0 / fan_in))) return self.initializer(std=std).sample(shape)
class HeNormal(He): 'He initializer with weights sampled from the Normal distribution.\n\n See :class:`He` for a description of the parameters.\n ' def __init__(self, gain=1.0, c01b=False): super(HeNormal, self).__init__(Normal, gain, c01b)
class HeUniform(He): 'He initializer with weights sampled from the Uniform distribution.\n\n See :class:`He` for a description of the parameters.\n ' def __init__(self, gain=1.0, c01b=False): super(HeUniform, self).__init__(Uniform, gain, c01b)
class Constant(Initializer): 'Initialize weights with constant value.\n\n Parameters\n ----------\n val : float\n Constant value for weights.\n ' def __init__(self, val=0.0): self.val = val def sample(self, shape): return floatX((np.ones(shape) * self.val))
class Sparse(Initializer): 'Initialize weights as sparse matrix.\n\n Parameters\n ----------\n sparsity : float\n Exact fraction of non-zero values per column. Larger values give less\n sparsity.\n std : float\n Non-zero weights are sampled from N(0, std).\n ' def __init__(self, sparsity=0.1, std=0.01): self.sparsity = sparsity self.std = std def sample(self, shape): if (len(shape) != 2): raise RuntimeError('sparse initializer only works with shapes of length 2') w = floatX(np.zeros(shape)) (n_inputs, n_outputs) = shape size = int((self.sparsity * n_inputs)) for k in range(n_outputs): indices = np.arange(n_inputs) get_rng().shuffle(indices) indices = indices[:size] values = floatX(get_rng().normal(0.0, self.std, size=size)) w[(indices, k)] = values return w
class Orthogonal(Initializer): 'Intialize weights as Orthogonal matrix.\n\n Orthogonal matrix initialization [1]_. For n-dimensional shapes where\n n > 2, the n-1 trailing axes are flattened. For convolutional layers, this\n corresponds to the fan-in, so this makes the initialization usable for\n both dense and convolutional layers.\n\n Parameters\n ----------\n gain : float or \'relu\'\n Scaling factor for the weights. Set this to ``1.0`` for linear and\n sigmoid units, to \'relu\' or ``sqrt(2)`` for rectified linear units, and\n to ``sqrt(2/(1+alpha**2))`` for leaky rectified linear units with\n leakiness ``alpha``. Other transfer functions may need different\n factors.\n\n References\n ----------\n .. [1] Saxe, Andrew M., James L. McClelland, and Surya Ganguli.\n "Exact solutions to the nonlinear dynamics of learning in deep\n linear neural networks." arXiv preprint arXiv:1312.6120 (2013).\n ' def __init__(self, gain=1.0): if (gain == 'relu'): gain = np.sqrt(2) self.gain = gain def sample(self, shape): if (len(shape) < 2): raise RuntimeError('Only shapes of length 2 or more are supported.') flat_shape = (shape[0], np.prod(shape[1:])) a = get_rng().normal(0.0, 1.0, flat_shape) (u, _, v) = np.linalg.svd(a, full_matrices=False) q = (u if (u.shape == flat_shape) else v) q = q.reshape(shape) return floatX((self.gain * q))
class Layer(object): "\n The :class:`Layer` class represents a single layer of a neural network. It\n should be subclassed when implementing new types of layers.\n\n Because each layer can keep track of the layer(s) feeding into it, a\n network's output :class:`Layer` instance can double as a handle to the full\n network.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape.\n name : a string or None\n An optional name to attach to this layer.\n " def __init__(self, incoming, name=None): if isinstance(incoming, tuple): self.input_shape = incoming self.input_layer = None else: self.input_shape = incoming.output_shape self.input_layer = incoming self.name = name self.params = OrderedDict() if any((((d is not None) and (d <= 0)) for d in self.input_shape)): raise ValueError(('Cannot create Layer with a non-positive input_shape dimension. input_shape=%r, self.name=%r' % (self.input_shape, self.name))) @property def output_shape(self): return self.get_output_shape_for(self.input_shape) def get_params(self, **tags): "\n Returns a list of Theano shared variables that parameterize the layer.\n\n By default, all shared variables that participate in the forward pass\n will be returned (in the order they were registered in the Layer's\n constructor via :meth:`add_param()`). The list can optionally be\n filtered by specifying tags as keyword arguments. For example,\n ``trainable=True`` will only return trainable parameters, and\n ``regularizable=True`` will only return parameters that can be\n regularized (e.g., by L2 decay).\n\n If any of the layer's parameters was set to a Theano expression instead\n of a shared variable, the shared variables involved in that expression\n will be returned rather than the expression itself. Tag filtering\n considers all variables within an expression to be tagged the same.\n\n Parameters\n ----------\n **tags (optional)\n tags can be specified to filter the list. Specifying ``tag1=True``\n will limit the list to parameters that are tagged with ``tag1``.\n Specifying ``tag1=False`` will limit the list to parameters that\n are not tagged with ``tag1``. Commonly used tags are\n ``regularizable`` and ``trainable``.\n\n Returns\n -------\n list of Theano shared variables\n A list of variables that parameterize the layer\n\n Notes\n -----\n For layers without any parameters, this will return an empty list.\n " result = list(self.params.keys()) only = set((tag for (tag, value) in tags.items() if value)) if only: result = [param for param in result if (not (only - self.params[param]))] exclude = set((tag for (tag, value) in tags.items() if (not value))) if exclude: result = [param for param in result if (not (self.params[param] & exclude))] return utils.collect_shared_vars(result) def get_output_shape_for(self, input_shape): '\n Computes the output shape of this layer, given an input shape.\n\n Parameters\n ----------\n input_shape : tuple\n A tuple representing the shape of the input. The tuple should have\n as many elements as there are input dimensions, and the elements\n should be integers or `None`.\n\n Returns\n -------\n tuple\n A tuple representing the shape of the output of this layer. The\n tuple has as many elements as there are output dimensions, and the\n elements are all either integers or `None`.\n\n Notes\n -----\n This method will typically be overridden when implementing a new\n :class:`Layer` class. By default it simply returns the input\n shape. This means that a layer that does not modify the shape\n (e.g. because it applies an elementwise operation) does not need\n to override this method.\n ' return input_shape def get_output_for(self, input, **kwargs): '\n Propagates the given input through this layer (and only this layer).\n\n Parameters\n ----------\n input : Theano expression\n The expression to propagate through this layer.\n\n Returns\n -------\n output : Theano expression\n The output of this layer given the input to this layer.\n\n\n Notes\n -----\n This is called by the base :meth:`lasagne.layers.get_output()`\n to propagate data through a network.\n\n This method should be overridden when implementing a new\n :class:`Layer` class. By default it raises `NotImplementedError`.\n ' raise NotImplementedError def add_param(self, spec, shape, name=None, **tags): "\n Register and possibly initialize a parameter tensor for the layer.\n\n When defining a layer class, this method is called in the constructor\n to define which parameters the layer has, what their shapes are, how\n they should be initialized and what tags are associated with them.\n This allows layer classes to transparently support parameter\n initialization from numpy arrays and callables, as well as setting\n parameters to existing Theano shared variables or Theano expressions.\n\n All registered parameters are stored along with their tags in the\n ordered dictionary :attr:`Layer.params`, and can be retrieved with\n :meth:`Layer.get_params()`, optionally filtered by their tags.\n\n Parameters\n ----------\n spec : Theano shared variable, expression, numpy array or callable\n initial value, expression or initializer for this parameter.\n See :func:`lasagne.utils.create_param` for more information.\n\n shape : tuple of int\n a tuple of integers representing the desired shape of the\n parameter tensor.\n\n name : str (optional)\n a descriptive name for the parameter variable. This will be passed\n to ``theano.shared`` when the variable is created, prefixed by the\n layer's name if any (in the form ``'layer_name.param_name'``). If\n ``spec`` is already a shared variable or expression, this parameter\n will be ignored to avoid overwriting an existing name.\n\n **tags (optional)\n tags associated with the parameter can be specified as keyword\n arguments. To associate the tag ``tag1`` with the parameter, pass\n ``tag1=True``.\n\n By default, the tags ``regularizable`` and ``trainable`` are\n associated with the parameter. Pass ``regularizable=False`` or\n ``trainable=False`` respectively to prevent this.\n\n Returns\n -------\n Theano shared variable or Theano expression\n the resulting parameter variable or parameter expression\n\n Notes\n -----\n It is recommended to assign the resulting parameter variable/expression\n to an attribute of the layer for easy access, for example:\n\n >>> self.W = self.add_param(W, (2, 3), name='W') #doctest: +SKIP\n " if (name is not None): if (self.name is not None): name = ('%s.%s' % (self.name, name)) param = utils.create_param(spec, shape, name) tags['trainable'] = tags.get('trainable', True) tags['regularizable'] = tags.get('regularizable', True) self.params[param] = set((tag for (tag, value) in tags.items() if value)) return param
class MergeLayer(Layer): '\n This class represents a layer that aggregates input from multiple layers.\n It should be subclassed when implementing new types of layers that obtain\n their input from multiple layers.\n\n Parameters\n ----------\n incomings : a list of :class:`Layer` instances or tuples\n The layers feeding into this layer, or expected input shapes.\n name : a string or None\n An optional name to attach to this layer.\n ' def __init__(self, incomings, name=None): self.input_shapes = [(incoming if isinstance(incoming, tuple) else incoming.output_shape) for incoming in incomings] self.input_layers = [(None if isinstance(incoming, tuple) else incoming) for incoming in incomings] self.name = name self.params = OrderedDict() @Layer.output_shape.getter def output_shape(self): return self.get_output_shape_for(self.input_shapes) def get_output_shape_for(self, input_shapes): '\n Computes the output shape of this layer, given a list of input shapes.\n\n Parameters\n ----------\n input_shape : list of tuple\n A list of tuples, with each tuple representing the shape of one of\n the inputs (in the correct order). These tuples should have as many\n elements as there are input dimensions, and the elements should be\n integers or `None`.\n\n Returns\n -------\n tuple\n A tuple representing the shape of the output of this layer. The\n tuple has as many elements as there are output dimensions, and the\n elements are all either integers or `None`.\n\n Notes\n -----\n This method must be overridden when implementing a new\n :class:`Layer` class with multiple inputs. By default it raises\n `NotImplementedError`.\n ' raise NotImplementedError def get_output_for(self, inputs, **kwargs): '\n Propagates the given inputs through this layer (and only this layer).\n\n Parameters\n ----------\n inputs : list of Theano expressions\n The Theano expressions to propagate through this layer.\n\n Returns\n -------\n Theano expressions\n The output of this layer given the inputs to this layer.\n\n Notes\n -----\n This is called by the base :meth:`lasagne.layers.get_output()`\n to propagate data through a network.\n\n This method should be overridden when implementing a new\n :class:`Layer` class with multiple inputs. By default it raises\n `NotImplementedError`.\n ' raise NotImplementedError
def conv_output_length(input_length, filter_size, stride, pad=0): "Helper function to compute the output size of a convolution operation\n\n This function computes the length along a single axis, which corresponds\n to a 1D convolution. It can also be used for convolutions with higher\n dimensionalities by using it individually for each axis.\n\n Parameters\n ----------\n input_length : int\n The size of the input.\n\n filter_size : int\n The size of the filter.\n\n stride : int\n The stride of the convolution operation.\n\n pad : int, 'full' or 'same' (default: 0)\n By default, the convolution is only computed where the input and the\n filter fully overlap (a valid convolution). When ``stride=1``, this\n yields an output that is smaller than the input by ``filter_size - 1``.\n The `pad` argument allows you to implicitly pad the input with zeros,\n extending the output size.\n\n A single integer results in symmetric zero-padding of the given size on\n both borders.\n\n ``'full'`` pads with one less than the filter size on both sides. This\n is equivalent to computing the convolution wherever the input and the\n filter overlap by at least one position.\n\n ``'same'`` pads with half the filter size on both sides (one less on\n the second side for an even filter size). When ``stride=1``, this\n results in an output size equal to the input size.\n\n Returns\n -------\n int\n The output size corresponding to the given convolution parameters.\n\n Raises\n ------\n RuntimeError\n When an invalid padding is specified, a `RuntimeError` is raised.\n " if (input_length is None): return None if (pad == 'valid'): output_length = ((input_length - filter_size) + 1) elif (pad == 'full'): output_length = ((input_length + filter_size) - 1) elif (pad == 'same'): output_length = input_length elif isinstance(pad, int): output_length = (((input_length + (2 * pad)) - filter_size) + 1) else: raise ValueError('Invalid pad: {0}'.format(pad)) output_length = (((output_length + stride) - 1) // stride) return output_length
class BaseConvLayer(Layer): "\n lasagne.layers.BaseConvLayer(incoming, num_filters, filter_size,\n stride=1, pad=0, untie_biases=False,\n W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),\n nonlinearity=lasagne.nonlinearities.rectify, flip_filters=True,\n n=None, **kwargs)\n\n Convolutional layer base class\n\n Base class for performing an `n`-dimensional convolution on its input,\n optionally adding a bias and applying an elementwise nonlinearity. Note\n that this class cannot be used in a Lasagne network, only its subclasses\n can (e.g., :class:`Conv1DLayer`, :class:`Conv2DLayer`).\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape. Must\n be a tensor of 2+`n` dimensions:\n ``(batch_size, num_input_channels, <n spatial dimensions>)``.\n\n num_filters : int\n The number of learnable convolutional filters this layer has.\n\n filter_size : int or iterable of int\n An integer or an `n`-element tuple specifying the size of the filters.\n\n stride : int or iterable of int\n An integer or an `n`-element tuple specifying the stride of the\n convolution operation.\n\n pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)\n By default, the convolution is only computed where the input and the\n filter fully overlap (a valid convolution). When ``stride=1``, this\n yields an output that is smaller than the input by ``filter_size - 1``.\n The `pad` argument allows you to implicitly pad the input with zeros,\n extending the output size.\n\n A single integer results in symmetric zero-padding of the given size on\n all borders, a tuple of `n` integers allows different symmetric padding\n per dimension.\n\n ``'full'`` pads with one less than the filter size on both sides. This\n is equivalent to computing the convolution wherever the input and the\n filter overlap by at least one position.\n\n ``'same'`` pads with half the filter size (rounded down) on both sides.\n When ``stride=1`` this results in an output size equal to the input\n size. Even filter size is not supported.\n\n ``'valid'`` is an alias for ``0`` (no padding / a valid convolution).\n\n Note that ``'full'`` and ``'same'`` can be faster than equivalent\n integer values due to optimizations by Theano.\n\n untie_biases : bool (default: False)\n If ``False``, the layer will have a bias parameter for each channel,\n which is shared across all positions in this channel. As a result, the\n `b` attribute will be a vector (1D).\n\n If ``True``, the layer will have separate bias parameters for each\n position in each channel. As a result, the `b` attribute will be an\n `n`-dimensional tensor.\n\n W : Theano shared variable, expression, numpy array or callable\n Initial value, expression or initializer for the weights.\n These should be a tensor of 2+`n` dimensions with shape\n ``(num_filters, num_input_channels, <n spatial dimensions>)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n b : Theano shared variable, expression, numpy array, callable or ``None``\n Initial value, expression or initializer for the biases. If set to\n ``None``, the layer will have no biases. Otherwise, biases should be\n a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to\n ``False``. If it is set to ``True``, its shape should be\n ``(num_filters, <n spatial dimensions>)`` instead.\n See :func:`lasagne.utils.create_param` for more information.\n\n nonlinearity : callable or None\n The nonlinearity that is applied to the layer activations. If None\n is provided, the layer will be linear.\n\n flip_filters : bool (default: True)\n Whether to flip the filters before sliding them over the input,\n performing a convolution (this is the default), or not to flip them and\n perform a correlation. Note that for some other convolutional layers in\n Lasagne, flipping incurs an overhead and is disabled by default --\n check the documentation when using learned weights from another layer.\n\n n : int or None\n The dimensionality of the convolution (i.e., the number of spatial\n dimensions of each feature map and each convolutional filter). If\n ``None``, will be inferred from the input shape.\n\n **kwargs\n Any additional keyword arguments are passed to the `Layer` superclass.\n\n Attributes\n ----------\n W : Theano shared variable or expression\n Variable or expression representing the filter weights.\n\n b : Theano shared variable or expression\n Variable or expression representing the biases.\n " def __init__(self, incoming, num_filters, filter_size, stride=1, pad=0, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.0), nonlinearity=nonlinearities.rectify, flip_filters=True, n=None, **kwargs): super(BaseConvLayer, self).__init__(incoming, **kwargs) if (nonlinearity is None): self.nonlinearity = nonlinearities.identity else: self.nonlinearity = nonlinearity if (n is None): n = (len(self.input_shape) - 2) elif (n != (len(self.input_shape) - 2)): raise ValueError(('Tried to create a %dD convolution layer with input shape %r. Expected %d input dimensions (batchsize, channels, %d spatial dimensions).' % (n, self.input_shape, (n + 2), n))) self.n = n self.num_filters = num_filters self.filter_size = as_tuple(filter_size, n, int) self.flip_filters = flip_filters self.stride = as_tuple(stride, n, int) self.untie_biases = untie_biases if (pad == 'same'): if any((((s % 2) == 0) for s in self.filter_size)): raise NotImplementedError('`same` padding requires odd filter size.') if (pad == 'valid'): self.pad = as_tuple(0, n) elif (pad in ('full', 'same')): self.pad = pad else: self.pad = as_tuple(pad, n, int) self.W = self.add_param(W, self.get_W_shape(), name='W') if (b is None): self.b = None else: if self.untie_biases: biases_shape = ((num_filters,) + self.output_shape[2:]) else: biases_shape = (num_filters,) self.b = self.add_param(b, biases_shape, name='b', regularizable=False) def get_W_shape(self): 'Get the shape of the weight matrix `W`.\n\n Returns\n -------\n tuple of int\n The shape of the weight matrix.\n ' num_input_channels = self.input_shape[1] return ((self.num_filters, num_input_channels) + self.filter_size) def get_output_shape_for(self, input_shape): pad = (self.pad if isinstance(self.pad, tuple) else ((self.pad,) * self.n)) batchsize = input_shape[0] return ((batchsize, self.num_filters) + tuple((conv_output_length(input, filter, stride, p) for (input, filter, stride, p) in zip(input_shape[2:], self.filter_size, self.stride, pad)))) def get_output_for(self, input, **kwargs): conved = self.convolve(input, **kwargs) if (self.b is None): activation = conved elif self.untie_biases: activation = (conved + T.shape_padleft(self.b, 1)) else: activation = (conved + self.b.dimshuffle((('x', 0) + (('x',) * self.n)))) return self.nonlinearity(activation) def convolve(self, input, **kwargs): '\n Symbolically convolves `input` with ``self.W``, producing an output of\n shape ``self.output_shape``. To be implemented by subclasses.\n\n Parameters\n ----------\n input : Theano tensor\n The input minibatch to convolve\n **kwargs\n Any additional keyword arguments from :meth:`get_output_for`\n\n Returns\n -------\n Theano tensor\n `input` convolved according to the configuration of this layer,\n without any bias or nonlinearity applied.\n ' raise NotImplementedError('BaseConvLayer does not implement the convolve() method. You will want to use a subclass such as Conv2DLayer.')
class Conv1DLayer(BaseConvLayer): "\n lasagne.layers.Conv1DLayer(incoming, num_filters, filter_size, stride=1,\n pad=0, untie_biases=False, W=lasagne.init.GlorotUniform(),\n b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.rectify,\n flip_filters=True, convolution=lasagne.theano_extensions.conv.conv1d_mc0,\n **kwargs)\n\n 1D convolutional layer\n\n Performs a 1D convolution on its input and optionally adds a bias and\n applies an elementwise nonlinearity.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape. The\n output of this layer should be a 3D tensor, with shape\n ``(batch_size, num_input_channels, input_length)``.\n\n num_filters : int\n The number of learnable convolutional filters this layer has.\n\n filter_size : int or iterable of int\n An integer or a 1-element tuple specifying the size of the filters.\n\n stride : int or iterable of int\n An integer or a 1-element tuple specifying the stride of the\n convolution operation.\n\n pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)\n By default, the convolution is only computed where the input and the\n filter fully overlap (a valid convolution). When ``stride=1``, this\n yields an output that is smaller than the input by ``filter_size - 1``.\n The `pad` argument allows you to implicitly pad the input with zeros,\n extending the output size.\n\n An integer or a 1-element tuple results in symmetric zero-padding of\n the given size on both borders.\n\n ``'full'`` pads with one less than the filter size on both sides. This\n is equivalent to computing the convolution wherever the input and the\n filter overlap by at least one position.\n\n ``'same'`` pads with half the filter size (rounded down) on both sides.\n When ``stride=1`` this results in an output size equal to the input\n size. Even filter size is not supported.\n\n ``'valid'`` is an alias for ``0`` (no padding / a valid convolution).\n\n untie_biases : bool (default: False)\n If ``False``, the layer will have a bias parameter for each channel,\n which is shared across all positions in this channel. As a result, the\n `b` attribute will be a vector (1D).\n\n If True, the layer will have separate bias parameters for each\n position in each channel. As a result, the `b` attribute will be a\n matrix (2D).\n\n W : Theano shared variable, expression, numpy array or callable\n Initial value, expression or initializer for the weights.\n These should be a 3D tensor with shape\n ``(num_filters, num_input_channels, filter_length)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n b : Theano shared variable, expression, numpy array, callable or ``None``\n Initial value, expression or initializer for the biases. If set to\n ``None``, the layer will have no biases. Otherwise, biases should be\n a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to\n ``False``. If it is set to ``True``, its shape should be\n ``(num_filters, input_length)`` instead.\n See :func:`lasagne.utils.create_param` for more information.\n\n nonlinearity : callable or None\n The nonlinearity that is applied to the layer activations. If None\n is provided, the layer will be linear.\n\n flip_filters : bool (default: True)\n Whether to flip the filters before sliding them over the input,\n performing a convolution (this is the default), or not to flip them and\n perform a correlation. Note that for some other convolutional layers in\n Lasagne, flipping incurs an overhead and is disabled by default --\n check the documentation when using learned weights from another layer.\n\n convolution : callable\n The convolution implementation to use. The\n `lasagne.theano_extensions.conv` module provides some alternative\n implementations for 1D convolutions, because the Theano API only\n features a 2D convolution implementation. Usually it should be fine\n to leave this at the default value. Note that not all implementations\n support all settings for `pad` and `subsample`.\n\n **kwargs\n Any additional keyword arguments are passed to the `Layer` superclass.\n\n Attributes\n ----------\n W : Theano shared variable or expression\n Variable or expression representing the filter weights.\n\n b : Theano shared variable or expression\n Variable or expression representing the biases.\n " def __init__(self, incoming, num_filters, filter_size, stride=1, pad=0, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.0), nonlinearity=nonlinearities.rectify, flip_filters=True, convolution=conv.conv1d_mc0, **kwargs): super(Conv1DLayer, self).__init__(incoming, num_filters, filter_size, stride, pad, untie_biases, W, b, nonlinearity, flip_filters, n=1, **kwargs) self.convolution = convolution def convolve(self, input, **kwargs): border_mode = ('half' if (self.pad == 'same') else self.pad) conved = self.convolution(input, self.W, self.input_shape, self.get_W_shape(), subsample=self.stride, border_mode=border_mode, filter_flip=self.flip_filters) return conved
class Conv2DLayer(BaseConvLayer): "\n lasagne.layers.Conv2DLayer(incoming, num_filters, filter_size,\n stride=(1, 1), pad=0, untie_biases=False,\n W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),\n nonlinearity=lasagne.nonlinearities.rectify, flip_filters=True,\n convolution=theano.tensor.nnet.conv2d, **kwargs)\n\n 2D convolutional layer\n\n Performs a 2D convolution on its input and optionally adds a bias and\n applies an elementwise nonlinearity.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape. The\n output of this layer should be a 4D tensor, with shape\n ``(batch_size, num_input_channels, input_rows, input_columns)``.\n\n num_filters : int\n The number of learnable convolutional filters this layer has.\n\n filter_size : int or iterable of int\n An integer or a 2-element tuple specifying the size of the filters.\n\n stride : int or iterable of int\n An integer or a 2-element tuple specifying the stride of the\n convolution operation.\n\n pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)\n By default, the convolution is only computed where the input and the\n filter fully overlap (a valid convolution). When ``stride=1``, this\n yields an output that is smaller than the input by ``filter_size - 1``.\n The `pad` argument allows you to implicitly pad the input with zeros,\n extending the output size.\n\n A single integer results in symmetric zero-padding of the given size on\n all borders, a tuple of two integers allows different symmetric padding\n per dimension.\n\n ``'full'`` pads with one less than the filter size on both sides. This\n is equivalent to computing the convolution wherever the input and the\n filter overlap by at least one position.\n\n ``'same'`` pads with half the filter size (rounded down) on both sides.\n When ``stride=1`` this results in an output size equal to the input\n size. Even filter size is not supported.\n\n ``'valid'`` is an alias for ``0`` (no padding / a valid convolution).\n\n Note that ``'full'`` and ``'same'`` can be faster than equivalent\n integer values due to optimizations by Theano.\n\n untie_biases : bool (default: False)\n If ``False``, the layer will have a bias parameter for each channel,\n which is shared across all positions in this channel. As a result, the\n `b` attribute will be a vector (1D).\n\n If True, the layer will have separate bias parameters for each\n position in each channel. As a result, the `b` attribute will be a\n 3D tensor.\n\n W : Theano shared variable, expression, numpy array or callable\n Initial value, expression or initializer for the weights.\n These should be a 4D tensor with shape\n ``(num_filters, num_input_channels, filter_rows, filter_columns)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n b : Theano shared variable, expression, numpy array, callable or ``None``\n Initial value, expression or initializer for the biases. If set to\n ``None``, the layer will have no biases. Otherwise, biases should be\n a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to\n ``False``. If it is set to ``True``, its shape should be\n ``(num_filters, output_rows, output_columns)`` instead.\n See :func:`lasagne.utils.create_param` for more information.\n\n nonlinearity : callable or None\n The nonlinearity that is applied to the layer activations. If None\n is provided, the layer will be linear.\n\n flip_filters : bool (default: True)\n Whether to flip the filters before sliding them over the input,\n performing a convolution (this is the default), or not to flip them and\n perform a correlation. Note that for some other convolutional layers in\n Lasagne, flipping incurs an overhead and is disabled by default --\n check the documentation when using learned weights from another layer.\n\n convolution : callable\n The convolution implementation to use. Usually it should be fine to\n leave this at the default value.\n\n **kwargs\n Any additional keyword arguments are passed to the `Layer` superclass.\n\n Attributes\n ----------\n W : Theano shared variable or expression\n Variable or expression representing the filter weights.\n\n b : Theano shared variable or expression\n Variable or expression representing the biases.\n " def __init__(self, incoming, num_filters, filter_size, stride=(1, 1), pad=0, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.0), nonlinearity=nonlinearities.rectify, flip_filters=True, convolution=T.nnet.conv2d, **kwargs): super(Conv2DLayer, self).__init__(incoming, num_filters, filter_size, stride, pad, untie_biases, W, b, nonlinearity, flip_filters, n=2, **kwargs) self.convolution = convolution def convolve(self, input, **kwargs): border_mode = ('half' if (self.pad == 'same') else self.pad) conved = self.convolution(input, self.W, self.input_shape, self.get_W_shape(), subsample=self.stride, border_mode=border_mode, filter_flip=self.flip_filters) return conved
class DenseLayer(Layer): '\n lasagne.layers.DenseLayer(incoming, num_units,\n W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),\n nonlinearity=lasagne.nonlinearities.rectify, **kwargs)\n\n A fully connected layer.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape\n\n num_units : int\n The number of units of the layer\n\n W : Theano shared variable, expression, numpy array or callable\n Initial value, expression or initializer for the weights.\n These should be a matrix with shape ``(num_inputs, num_units)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n b : Theano shared variable, expression, numpy array, callable or ``None``\n Initial value, expression or initializer for the biases. If set to\n ``None``, the layer will have no biases. Otherwise, biases should be\n a 1D array with shape ``(num_units,)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n nonlinearity : callable or None\n The nonlinearity that is applied to the layer activations. If None\n is provided, the layer will be linear.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> l_in = InputLayer((100, 20))\n >>> l1 = DenseLayer(l_in, num_units=50)\n\n Notes\n -----\n If the input to this layer has more than two axes, it will flatten the\n trailing axes. This is useful for when a dense layer follows a\n convolutional layer, for example. It is not necessary to insert a\n :class:`FlattenLayer` in this case.\n ' def __init__(self, incoming, num_units, W=init.GlorotUniform(), b=init.Constant(0.0), nonlinearity=nonlinearities.rectify, **kwargs): super(DenseLayer, self).__init__(incoming, **kwargs) self.nonlinearity = (nonlinearities.identity if (nonlinearity is None) else nonlinearity) self.num_units = num_units num_inputs = int(np.prod(self.input_shape[1:])) self.W = self.add_param(W, (num_inputs, num_units), name='W') if (b is None): self.b = None else: self.b = self.add_param(b, (num_units,), name='b', regularizable=False) def get_output_shape_for(self, input_shape): return (input_shape[0], self.num_units) def get_output_for(self, input, **kwargs): if (input.ndim > 2): input = input.flatten(2) activation = T.dot(input, self.W) if (self.b is not None): activation = (activation + self.b.dimshuffle('x', 0)) return self.nonlinearity(activation)
class NINLayer(Layer): '\n lasagne.layers.NINLayer(incoming, num_units, untie_biases=False,\n W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),\n nonlinearity=lasagne.nonlinearities.rectify, **kwargs)\n\n Network-in-network layer.\n Like DenseLayer, but broadcasting across all trailing dimensions beyond the\n 2nd. This results in a convolution operation with filter size 1 on all\n trailing dimensions. Any number of trailing dimensions is supported,\n so NINLayer can be used to implement 1D, 2D, 3D, ... convolutions.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape\n\n num_units : int\n The number of units of the layer\n\n untie_biases : bool\n If false the network has a single bias vector similar to a dense\n layer. If true a separate bias vector is used for each trailing\n dimension beyond the 2nd.\n\n W : Theano shared variable, expression, numpy array or callable\n Initial value, expression or initializer for the weights.\n These should be a matrix with shape ``(num_inputs, num_units)``,\n where ``num_inputs`` is the size of the second dimension of the input.\n See :func:`lasagne.utils.create_param` for more information.\n\n b : Theano shared variable, expression, numpy array, callable or ``None``\n Initial value, expression or initializer for the biases. If set to\n ``None``, the layer will have no biases. Otherwise, biases should be\n a 1D array with shape ``(num_units,)`` for ``untie_biases=False``, and\n a tensor of shape ``(num_units, input_shape[2], ..., input_shape[-1])``\n for ``untie_biases=True``.\n See :func:`lasagne.utils.create_param` for more information.\n\n nonlinearity : callable or None\n The nonlinearity that is applied to the layer activations. If None\n is provided, the layer will be linear.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, NINLayer\n >>> l_in = InputLayer((100, 20, 10, 3))\n >>> l1 = NINLayer(l_in, num_units=5)\n\n References\n ----------\n .. [1] Lin, Min, Qiang Chen, and Shuicheng Yan (2013):\n Network in network. arXiv preprint arXiv:1312.4400.\n ' def __init__(self, incoming, num_units, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.0), nonlinearity=nonlinearities.rectify, **kwargs): super(NINLayer, self).__init__(incoming, **kwargs) self.nonlinearity = (nonlinearities.identity if (nonlinearity is None) else nonlinearity) self.num_units = num_units self.untie_biases = untie_biases num_input_channels = self.input_shape[1] self.W = self.add_param(W, (num_input_channels, num_units), name='W') if (b is None): self.b = None else: if self.untie_biases: biases_shape = ((num_units,) + self.output_shape[2:]) else: biases_shape = (num_units,) self.b = self.add_param(b, biases_shape, name='b', regularizable=False) def get_output_shape_for(self, input_shape): return ((input_shape[0], self.num_units) + input_shape[2:]) def get_output_for(self, input, **kwargs): out_r = T.tensordot(self.W, input, axes=[[0], [1]]) remaining_dims = range(2, input.ndim) out = out_r.dimshuffle(1, 0, *remaining_dims) if (self.b is None): activation = out else: if self.untie_biases: remaining_dims_biases = range(1, (input.ndim - 1)) else: remaining_dims_biases = (['x'] * (input.ndim - 2)) b_shuffled = self.b.dimshuffle('x', 0, *remaining_dims_biases) activation = (out + b_shuffled) return self.nonlinearity(activation)
class Pool2DDNNLayer(Layer): "\n 2D pooling layer\n\n Performs 2D mean- or max-pooling over the two trailing axes of a 4D input\n tensor. This is an alternative implementation which uses\n ``theano.sandbox.cuda.dnn.dnn_pool`` directly.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or tuple\n The layer feeding into this layer, or the expected input shape.\n\n pool_size : integer or iterable\n The length of the pooling region in each dimension. If an integer, it\n is promoted to a square pooling region. If an iterable, it should have\n two elements.\n\n stride : integer, iterable or ``None``\n The strides between sucessive pooling regions in each dimension.\n If ``None`` then ``stride = pool_size``.\n\n pad : integer or iterable\n Number of elements to be added on each side of the input\n in each dimension. Each value must be less than\n the corresponding stride.\n\n ignore_border : bool (default: True)\n This implementation never includes partial pooling regions, so this\n argument must always be set to True. It exists only to make sure the\n interface is compatible with :class:`lasagne.layers.MaxPool2DLayer`.\n\n mode : string\n Pooling mode, one of 'max', 'average_inc_pad' or 'average_exc_pad'.\n Defaults to 'max'.\n\n **kwargs\n Any additional keyword arguments are passed to the :class:`Layer`\n superclass.\n\n Notes\n -----\n The value used to pad the input is chosen to be less than\n the minimum of the input, so that the output of each pooling region\n always corresponds to some element in the unpadded input region.\n\n This is a drop-in replacement for :class:`lasagne.layers.MaxPool2DLayer`.\n Its interface is the same, except it does not support the ``ignore_border``\n argument.\n " def __init__(self, incoming, pool_size, stride=None, pad=(0, 0), ignore_border=True, mode='max', **kwargs): super(Pool2DDNNLayer, self).__init__(incoming, **kwargs) if (len(self.input_shape) != 4): raise ValueError(('Tried to create a 2D pooling layer with input shape %r. Expected 4 input dimensions (batchsize, channels, 2 spatial dimensions).' % (self.input_shape,))) self.pool_size = as_tuple(pool_size, 2) if (stride is None): self.stride = self.pool_size else: self.stride = as_tuple(stride, 2) self.pad = as_tuple(pad, 2) self.mode = mode if (not ignore_border): raise NotImplementedError('Pool2DDNNLayer does not support ignore_border=False.') def get_output_shape_for(self, input_shape): output_shape = list(input_shape) output_shape[2] = pool_output_length(input_shape[2], pool_size=self.pool_size[0], stride=self.stride[0], pad=self.pad[0], ignore_border=True) output_shape[3] = pool_output_length(input_shape[3], pool_size=self.pool_size[1], stride=self.stride[1], pad=self.pad[1], ignore_border=True) return tuple(output_shape) def get_output_for(self, input, **kwargs): return dnn.dnn_pool(input, self.pool_size, self.stride, self.mode, self.pad)
class MaxPool2DDNNLayer(Pool2DDNNLayer): "\n 2D max-pooling layer\n\n Subclass of :class:`Pool2DDNNLayer` fixing ``mode='max'``, provided for\n compatibility to other ``MaxPool2DLayer`` classes.\n " def __init__(self, incoming, pool_size, stride=None, pad=(0, 0), ignore_border=True, **kwargs): super(MaxPool2DDNNLayer, self).__init__(incoming, pool_size, stride, pad, ignore_border, mode='max', **kwargs)
class Pool3DDNNLayer(Layer): "\n 3D pooling layer\n\n Performs 3D mean- or max-pooling over the 3 trailing axes of a 5D input\n tensor. This is an alternative implementation which uses\n ``theano.sandbox.cuda.dnn.dnn_pool`` directly.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or tuple\n The layer feeding into this layer, or the expected input shape.\n\n pool_size : integer or iterable\n The length of the pooling region in each dimension. If an integer, it\n is promoted to a square pooling region. If an iterable, it should have\n two elements.\n\n stride : integer, iterable or ``None``\n The strides between sucessive pooling regions in each dimension.\n If ``None`` then ``stride = pool_size``.\n\n pad : integer or iterable\n Number of elements to be added on each side of the input\n in each dimension. Each value must be less than\n the corresponding stride.\n\n ignore_border : bool (default: True)\n This implementation never includes partial pooling regions, so this\n argument must always be set to True. It exists only to make sure the\n interface is compatible with :class:`lasagne.layers.MaxPool2DLayer`.\n\n mode : string\n Pooling mode, one of 'max', 'average_inc_pad' or 'average_exc_pad'.\n Defaults to 'max'.\n\n **kwargs\n Any additional keyword arguments are passed to the :class:`Layer`\n superclass.\n\n Notes\n -----\n The value used to pad the input is chosen to be less than\n the minimum of the input, so that the output of each pooling region\n always corresponds to some element in the unpadded input region.\n\n " def __init__(self, incoming, pool_size, stride=None, pad=(0, 0, 0), ignore_border=True, mode='max', **kwargs): super(Pool3DDNNLayer, self).__init__(incoming, **kwargs) if (len(self.input_shape) != 5): raise ValueError(('Tried to create a 3D pooling layer with input shape %r. Expected 5 input dimensions (batchsize, channels, 3 spatial dimensions).' % (self.input_shape,))) self.pool_size = as_tuple(pool_size, 3) if (stride is None): self.stride = self.pool_size else: self.stride = as_tuple(stride, 3) self.pad = as_tuple(pad, 3) self.mode = mode if (not ignore_border): raise NotImplementedError('Pool3DDNNLayer does not support ignore_border=False.') def get_output_shape_for(self, input_shape): output_shape = list(input_shape) output_shape[2] = pool_output_length(input_shape[2], pool_size=self.pool_size[0], stride=self.stride[0], pad=self.pad[0], ignore_border=True) output_shape[3] = pool_output_length(input_shape[3], pool_size=self.pool_size[1], stride=self.stride[1], pad=self.pad[1], ignore_border=True) output_shape[4] = pool_output_length(input_shape[4], pool_size=self.pool_size[2], stride=self.stride[2], pad=self.pad[2], ignore_border=True) return tuple(output_shape) def get_output_for(self, input, **kwargs): return dnn.dnn_pool(input, self.pool_size, self.stride, self.mode, self.pad)
class MaxPool3DDNNLayer(Pool3DDNNLayer): "\n 3D max-pooling layer\n\n Subclass of :class:`Pool3DDNNLayer` fixing ``mode='max'``, provided for\n consistency to ``MaxPool2DLayer`` classes.\n " def __init__(self, incoming, pool_size, stride=None, pad=(0, 0, 0), ignore_border=True, **kwargs): super(MaxPool3DDNNLayer, self).__init__(incoming, pool_size, stride, pad, ignore_border, mode='max', **kwargs)
class Conv2DDNNLayer(BaseConvLayer): "\n lasagne.layers.Conv2DDNNLayer(incoming, num_filters, filter_size,\n stride=(1, 1), pad=0, untie_biases=False,\n W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),\n nonlinearity=lasagne.nonlinearities.rectify, flip_filters=False,\n **kwargs)\n\n 2D convolutional layer\n\n Performs a 2D convolution on its input and optionally adds a bias and\n applies an elementwise nonlinearity. This is an alternative implementation\n which uses ``theano.sandbox.cuda.dnn.dnn_conv`` directly.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape. The\n output of this layer should be a 4D tensor, with shape\n ``(batch_size, num_input_channels, input_rows, input_columns)``.\n\n num_filters : int\n The number of learnable convolutional filters this layer has.\n\n filter_size : int or iterable of int\n An integer or a 2-element tuple specifying the size of the filters.\n\n stride : int or iterable of int\n An integer or a 2-element tuple specifying the stride of the\n convolution operation.\n\n pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)\n By default, the convolution is only computed where the input and the\n filter fully overlap (a valid convolution). When ``stride=1``, this\n yields an output that is smaller than the input by ``filter_size - 1``.\n The `pad` argument allows you to implicitly pad the input with zeros,\n extending the output size.\n\n A single integer results in symmetric zero-padding of the given size on\n all borders, a tuple of two integers allows different symmetric padding\n per dimension.\n\n ``'full'`` pads with one less than the filter size on both sides. This\n is equivalent to computing the convolution wherever the input and the\n filter overlap by at least one position.\n\n ``'same'`` pads with half the filter size (rounded down) on both sides.\n When ``stride=1`` this results in an output size equal to the input\n size. Even filter size is not supported.\n\n ``'valid'`` is an alias for ``0`` (no padding / a valid convolution).\n\n Note that ``'full'`` and ``'same'`` can be faster than equivalent\n integer values due to optimizations by Theano.\n\n untie_biases : bool (default: False)\n If ``False``, the layer will have a bias parameter for each channel,\n which is shared across all positions in this channel. As a result, the\n `b` attribute will be a vector (1D).\n\n If True, the layer will have separate bias parameters for each\n position in each channel. As a result, the `b` attribute will be a\n 3D tensor.\n\n W : Theano shared variable, expression, numpy array or callable\n Initial value, expression or initializer for the weights.\n These should be a 4D tensor with shape\n ``(num_filters, num_input_channels, filter_rows, filter_columns)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n b : Theano shared variable, expression, numpy array, callable or ``None``\n Initial value, expression or initializer for the biases. If set to\n ``None``, the layer will have no biases. Otherwise, biases should be\n a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to\n ``False``. If it is set to ``True``, its shape should be\n ``(num_filters, output_rows, output_columns)`` instead.\n See :func:`lasagne.utils.create_param` for more information.\n\n nonlinearity : callable or None\n The nonlinearity that is applied to the layer activations. If None\n is provided, the layer will be linear.\n\n flip_filters : bool (default: False)\n Whether to flip the filters and perform a convolution, or not to flip\n them and perform a correlation. Flipping adds a bit of overhead, so it\n is disabled by default. In most cases this does not make a difference\n anyway because the filters are learnt. However, ``flip_filters`` should\n be set to ``True`` if weights are loaded into it that were learnt using\n a regular :class:`lasagne.layers.Conv2DLayer`, for example.\n\n **kwargs\n Any additional keyword arguments are passed to the `Layer` superclass.\n\n Attributes\n ----------\n W : Theano shared variable or expression\n Variable or expression representing the filter weights.\n\n b : Theano shared variable or expression\n Variable or expression representing the biases.\n " def __init__(self, incoming, num_filters, filter_size, stride=(1, 1), pad=0, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.0), nonlinearity=nonlinearities.rectify, flip_filters=False, **kwargs): super(Conv2DDNNLayer, self).__init__(incoming, num_filters, filter_size, stride, pad, untie_biases, W, b, nonlinearity, flip_filters, n=2, **kwargs) def convolve(self, input, **kwargs): conv_mode = ('conv' if self.flip_filters else 'cross') border_mode = self.pad if (border_mode == 'same'): border_mode = tuple(((s // 2) for s in self.filter_size)) conved = dnn.dnn_conv(img=input, kerns=self.W, subsample=self.stride, border_mode=border_mode, conv_mode=conv_mode) return conved
class Conv3DDNNLayer(BaseConvLayer): "\n lasagne.layers.Conv3DDNNLayer(incoming, num_filters, filter_size,\n stride=(1, 1, 1), pad=0, untie_biases=False,\n W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),\n nonlinearity=lasagne.nonlinearities.rectify, flip_filters=False,\n **kwargs)\n\n 3D convolutional layer\n\n Performs a 3D convolution on its input and optionally adds a bias and\n applies an elementwise nonlinearity. This implementation uses\n ``theano.sandbox.cuda.dnn.dnn_conv3d`` directly.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape. The\n output of this layer should be a 5D tensor, with shape ``(batch_size,\n num_input_channels, input_rows, input_columns, input_depth)``.\n\n num_filters : int\n The number of learnable convolutional filters this layer has.\n\n filter_size : int or iterable of int\n An integer or a 3-element tuple specifying the size of the filters.\n\n stride : int or iterable of int\n An integer or a 3-element tuple specifying the stride of the\n convolution operation.\n\n pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)\n By default, the convolution is only computed where the input and the\n filter fully overlap (a valid convolution). When ``stride=1``, this\n yields an output that is smaller than the input by ``filter_size - 1``.\n The `pad` argument allows you to implicitly pad the input with zeros,\n extending the output size.\n\n A single integer results in symmetric zero-padding of the given size on\n all borders, a tuple of three integers allows different symmetric\n padding per dimension.\n\n ``'full'`` pads with one less than the filter size on both sides. This\n is equivalent to computing the convolution wherever the input and the\n filter overlap by at least one position.\n\n ``'same'`` pads with half the filter size (rounded down) on both sides.\n When ``stride=1`` this results in an output size equal to the input\n size. Even filter size is not supported.\n\n ``'valid'`` is an alias for ``0`` (no padding / a valid convolution).\n\n Note that ``'full'`` and ``'same'`` can be faster than equivalent\n integer values due to optimizations by Theano.\n\n untie_biases : bool (default: False)\n If ``False``, the layer will have a bias parameter for each channel,\n which is shared across all positions in this channel. As a result, the\n `b` attribute will be a vector (1D).\n\n If True, the layer will have separate bias parameters for each\n position in each channel. As a result, the `b` attribute will be a\n 4D tensor.\n\n W : Theano shared variable, expression, numpy array or callable\n Initial value, expression or initializer for the weights.\n These should be a 5D tensor with shape ``(num_filters,\n num_input_channels, filter_rows, filter_columns, filter_depth)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n b : Theano shared variable, expression, numpy array, callable or ``None``\n Initial value, expression or initializer for the biases. If set to\n ``None``, the layer will have no biases. Otherwise, biases should be\n a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to\n ``False``. If it is set to ``True``, its shape should be\n ``(num_filters, output_rows, output_columns, output_depth)`` instead.\n See :func:`lasagne.utils.create_param` for more information.\n\n nonlinearity : callable or None\n The nonlinearity that is applied to the layer activations. If None\n is provided, the layer will be linear.\n\n flip_filters : bool (default: False)\n Whether to flip the filters and perform a convolution, or not to flip\n them and perform a correlation. Flipping adds a bit of overhead, so it\n is disabled by default. In most cases this does not make a difference\n anyway because the filters are learned, but if you want to compute\n predictions with pre-trained weights, take care if they need flipping.\n\n **kwargs\n Any additional keyword arguments are passed to the `Layer` superclass.\n\n Attributes\n ----------\n W : Theano shared variable or expression\n Variable or expression representing the filter weights.\n\n b : Theano shared variable or expression\n Variable or expression representing the biases.\n " def __init__(self, incoming, num_filters, filter_size, stride=(1, 1, 1), pad=0, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.0), nonlinearity=nonlinearities.rectify, flip_filters=False, **kwargs): super(Conv3DDNNLayer, self).__init__(incoming, num_filters, filter_size, stride, pad, untie_biases, W, b, nonlinearity, flip_filters, n=3, **kwargs) def convolve(self, input, **kwargs): conv_mode = ('conv' if self.flip_filters else 'cross') border_mode = self.pad if (border_mode == 'same'): border_mode = tuple(((s // 2) for s in self.filter_size)) conved = dnn.dnn_conv3d(img=input, kerns=self.W, subsample=self.stride, border_mode=border_mode, conv_mode=conv_mode) return conved
class EmbeddingLayer(Layer): "\n lasagne.layers.EmbeddingLayer(incoming, input_size, output_size,\n W=lasagne.init.Normal(), **kwargs)\n\n A layer for word embeddings. The input should be an integer type\n Tensor variable.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape.\n\n input_size: int\n The Number of different embeddings. The last embedding will have index\n input_size - 1.\n\n output_size : int\n The size of each embedding.\n\n W : Theano shared variable, expression, numpy array or callable\n Initial value, expression or initializer for the embedding matrix.\n This should be a matrix with shape ``(input_size, output_size)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n Examples\n --------\n >>> from lasagne.layers import EmbeddingLayer, InputLayer, get_output\n >>> import theano\n >>> x = T.imatrix()\n >>> l_in = InputLayer((3, ))\n >>> W = np.arange(3*5).reshape((3, 5)).astype('float32')\n >>> l1 = EmbeddingLayer(l_in, input_size=3, output_size=5, W=W)\n >>> output = get_output(l1, x)\n >>> f = theano.function([x], output)\n >>> x_test = np.array([[0, 2], [1, 2]]).astype('int32')\n >>> f(x_test)\n array([[[ 0., 1., 2., 3., 4.],\n [ 10., 11., 12., 13., 14.]],\n <BLANKLINE>\n [[ 5., 6., 7., 8., 9.],\n [ 10., 11., 12., 13., 14.]]], dtype=float32)\n " def __init__(self, incoming, input_size, output_size, W=init.Normal(), **kwargs): super(EmbeddingLayer, self).__init__(incoming, **kwargs) self.input_size = input_size self.output_size = output_size self.W = self.add_param(W, (input_size, output_size), name='W') def get_output_shape_for(self, input_shape): return (input_shape + (self.output_size,)) def get_output_for(self, input, **kwargs): return self.W[input]
def get_all_layers(layer, treat_as_input=None): '\n This function gathers all layers below one or more given :class:`Layer`\n instances, including the given layer(s). Its main use is to collect all\n layers of a network just given the output layer(s). The layers are\n guaranteed to be returned in a topological order: a layer in the result\n list is always preceded by all layers its input depends on.\n\n Parameters\n ----------\n layer : Layer or list\n the :class:`Layer` instance for which to gather all layers feeding\n into it, or a list of :class:`Layer` instances.\n\n treat_as_input : None or iterable\n an iterable of :class:`Layer` instances to treat as input layers\n with no layers feeding into them. They will show up in the result\n list, but their incoming layers will not be collected (unless they\n are required for other layers as well).\n\n Returns\n -------\n list\n a list of :class:`Layer` instances feeding into the given\n instance(s) either directly or indirectly, and the given\n instance(s) themselves, in topological order.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> l_in = InputLayer((100, 20))\n >>> l1 = DenseLayer(l_in, num_units=50)\n >>> get_all_layers(l1) == [l_in, l1]\n True\n >>> l2 = DenseLayer(l_in, num_units=10)\n >>> get_all_layers([l2, l1]) == [l_in, l2, l1]\n True\n >>> get_all_layers([l1, l2]) == [l_in, l1, l2]\n True\n >>> l3 = DenseLayer(l2, num_units=20)\n >>> get_all_layers(l3) == [l_in, l2, l3]\n True\n >>> get_all_layers(l3, treat_as_input=[l2]) == [l2, l3]\n True\n ' try: queue = deque(layer) except TypeError: queue = deque([layer]) seen = set() done = set() result = [] if (treat_as_input is not None): seen.update(treat_as_input) while queue: layer = queue[0] if (layer is None): queue.popleft() elif (layer not in seen): seen.add(layer) if hasattr(layer, 'input_layers'): queue.extendleft(reversed(layer.input_layers)) elif hasattr(layer, 'input_layer'): queue.appendleft(layer.input_layer) else: queue.popleft() if (layer not in done): result.append(layer) done.add(layer) return result
def get_output(layer_or_layers, inputs=None, **kwargs): "\n Computes the output of the network at one or more given layers.\n Optionally, you can define the input(s) to propagate through the network\n instead of using the input variable(s) associated with the network's\n input layer(s).\n\n Parameters\n ----------\n layer_or_layers : Layer or list\n the :class:`Layer` instance for which to compute the output\n expressions, or a list of :class:`Layer` instances.\n\n inputs : None, Theano expression, numpy array, or dict\n If None, uses the input variables associated with the\n :class:`InputLayer` instances.\n If a Theano expression, this defines the input for a single\n :class:`InputLayer` instance. Will throw a ValueError if there\n are multiple :class:`InputLayer` instances.\n If a numpy array, this will be wrapped as a Theano constant\n and used just like a Theano expression.\n If a dictionary, any :class:`Layer` instance (including the\n input layers) can be mapped to a Theano expression or numpy\n array to use instead of its regular output.\n\n Returns\n -------\n output : Theano expression or list\n the output of the given layer(s) for the given network input\n\n Notes\n -----\n Depending on your network architecture, `get_output([l1, l2])` may\n be crucially different from `[get_output(l1), get_output(l2)]`. Only\n the former ensures that the output expressions depend on the same\n intermediate expressions. For example, when `l1` and `l2` depend on\n a common dropout layer, the former will use the same dropout mask for\n both, while the latter will use two different dropout masks.\n " from .input import InputLayer from .base import MergeLayer treat_as_input = (inputs.keys() if isinstance(inputs, dict) else []) all_layers = get_all_layers(layer_or_layers, treat_as_input) all_outputs = dict(((layer, layer.input_var) for layer in all_layers if (isinstance(layer, InputLayer) and (layer not in treat_as_input)))) if isinstance(inputs, dict): all_outputs.update(((layer, utils.as_theano_expression(expr)) for (layer, expr) in inputs.items())) elif (inputs is not None): if (len(all_outputs) > 1): raise ValueError('get_output() was called with a single input expression on a network with multiple input layers. Please call it with a dictionary of input expressions instead.') for input_layer in all_outputs: all_outputs[input_layer] = utils.as_theano_expression(inputs) for layer in all_layers: if (layer not in all_outputs): try: if isinstance(layer, MergeLayer): layer_inputs = [all_outputs[input_layer] for input_layer in layer.input_layers] else: layer_inputs = all_outputs[layer.input_layer] except KeyError: raise ValueError(('get_output() was called without giving an input expression for the free-floating layer %r. Please call it with a dictionary mapping this layer to an input expression.' % layer)) all_outputs[layer] = layer.get_output_for(layer_inputs, **kwargs) try: return [all_outputs[layer] for layer in layer_or_layers] except TypeError: return all_outputs[layer_or_layers]
def get_output_shape(layer_or_layers, input_shapes=None): '\n Computes the output shape of the network at one or more given layers.\n\n Parameters\n ----------\n layer_or_layers : Layer or list\n the :class:`Layer` instance for which to compute the output\n shapes, or a list of :class:`Layer` instances.\n\n input_shapes : None, tuple, or dict\n If None, uses the input shapes associated with the\n :class:`InputLayer` instances.\n If a tuple, this defines the input shape for a single\n :class:`InputLayer` instance. Will throw a ValueError if there\n are multiple :class:`InputLayer` instances.\n If a dictionary, any :class:`Layer` instance (including the\n input layers) can be mapped to a shape tuple to use instead of\n its regular output shape.\n\n Returns\n -------\n tuple or list\n the output shape of the given layer(s) for the given network input\n ' if ((input_shapes is None) or (input_shapes == {})): try: return [layer.output_shape for layer in layer_or_layers] except TypeError: return layer_or_layers.output_shape from .input import InputLayer from .base import MergeLayer if isinstance(input_shapes, dict): treat_as_input = input_shapes.keys() else: treat_as_input = [] all_layers = get_all_layers(layer_or_layers, treat_as_input) all_shapes = dict(((layer, layer.shape) for layer in all_layers if (isinstance(layer, InputLayer) and (layer not in treat_as_input)))) if isinstance(input_shapes, dict): all_shapes.update(input_shapes) elif (input_shapes is not None): if (len(all_shapes) > 1): raise ValueError('get_output_shape() was called with a single input shape on a network with multiple input layers. Please call it with a dictionary of input shapes instead.') for input_layer in all_shapes: all_shapes[input_layer] = input_shapes for layer in all_layers: if (layer not in all_shapes): if isinstance(layer, MergeLayer): input_shapes = [all_shapes[input_layer] for input_layer in layer.input_layers] else: input_shapes = all_shapes[layer.input_layer] all_shapes[layer] = layer.get_output_shape_for(input_shapes) try: return [all_shapes[layer] for layer in layer_or_layers] except TypeError: return all_shapes[layer_or_layers]
def get_all_params(layer, **tags): "\n Returns a list of Theano shared variables that parameterize the layer.\n\n This function gathers all parameter variables of all layers below one or\n more given :class:`Layer` instances, including the layer(s) itself. Its\n main use is to collect all parameters of a network just given the output\n layer(s).\n\n By default, all shared variables that participate in the forward pass will\n be returned. The list can optionally be filtered by specifying tags as\n keyword arguments. For example, ``trainable=True`` will only return\n trainable parameters, and ``regularizable=True`` will only return\n parameters that can be regularized (e.g., by L2 decay).\n\n Parameters\n ----------\n layer : Layer or list\n The :class:`Layer` instance for which to gather all parameters, or a\n list of :class:`Layer` instances.\n\n **tags (optional)\n tags can be specified to filter the list. Specifying ``tag1=True``\n will limit the list to parameters that are tagged with ``tag1``.\n Specifying ``tag1=False`` will limit the list to parameters that\n are not tagged with ``tag1``. Commonly used tags are\n ``regularizable`` and ``trainable``.\n\n Returns\n -------\n params : list\n A list of Theano shared variables representing the parameters.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> l_in = InputLayer((100, 20))\n >>> l1 = DenseLayer(l_in, num_units=50)\n >>> all_params = get_all_params(l1)\n >>> all_params == [l1.W, l1.b]\n True\n\n Notes\n -----\n If any layer's parameter was set to a Theano expression instead of a shared\n variable, the shared variables involved in that expression will be returned\n rather than the expression itself. Tag filtering considers all variables\n within an expression to be tagged the same.\n >>> import theano\n >>> import numpy as np\n >>> from lasagne.utils import floatX\n >>> w1 = theano.shared(floatX(.01 * np.random.randn(50, 30)))\n >>> w2 = theano.shared(floatX(1))\n >>> l2 = DenseLayer(l1, num_units=30, W=theano.tensor.exp(w1) - w2, b=None)\n >>> all_params = get_all_params(l2, regularizable=True)\n >>> all_params == [l1.W, w1, w2]\n True\n " layers = get_all_layers(layer) params = sum([l.get_params(**tags) for l in layers], []) return utils.unique(params)
def count_params(layer, **tags): "\n This function counts all parameters (i.e., the number of scalar\n values) of all layers below one or more given :class:`Layer` instances,\n including the layer(s) itself.\n\n This is useful to compare the capacity of various network architectures.\n All parameters returned by the :class:`Layer`s' `get_params` methods are\n counted.\n\n Parameters\n ----------\n layer : Layer or list\n The :class:`Layer` instance for which to count the parameters, or a\n list of :class:`Layer` instances.\n\n **tags (optional)\n tags can be specified to filter the list of parameter variables that\n will be included in the count. Specifying ``tag1=True``\n will limit the list to parameters that are tagged with ``tag1``.\n Specifying ``tag1=False`` will limit the list to parameters that\n are not tagged with ``tag1``. Commonly used tags are\n ``regularizable`` and ``trainable``.\n\n Returns\n -------\n int\n The total number of learnable parameters.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> l_in = InputLayer((100, 20))\n >>> l1 = DenseLayer(l_in, num_units=50)\n >>> param_count = count_params(l1)\n >>> param_count\n 1050\n >>> param_count == 20 * 50 + 50 # 20 input * 50 units + 50 biases\n True\n " params = get_all_params(layer, **tags) shapes = [p.get_value().shape for p in params] counts = [np.prod(shape) for shape in shapes] return sum(counts)
def get_all_param_values(layer, **tags): '\n This function returns the values of the parameters of all layers below one\n or more given :class:`Layer` instances, including the layer(s) itself.\n\n This function can be used in conjunction with set_all_param_values to save\n and restore model parameters.\n\n Parameters\n ----------\n layer : Layer or list\n The :class:`Layer` instance for which to gather all parameter values,\n or a list of :class:`Layer` instances.\n\n **tags (optional)\n tags can be specified to filter the list. Specifying ``tag1=True``\n will limit the list to parameters that are tagged with ``tag1``.\n Specifying ``tag1=False`` will limit the list to parameters that\n are not tagged with ``tag1``. Commonly used tags are\n ``regularizable`` and ``trainable``.\n\n Returns\n -------\n list of numpy.array\n A list of numpy arrays representing the parameter values.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> l_in = InputLayer((100, 20))\n >>> l1 = DenseLayer(l_in, num_units=50)\n >>> all_param_values = get_all_param_values(l1)\n >>> (all_param_values[0] == l1.W.get_value()).all()\n True\n >>> (all_param_values[1] == l1.b.get_value()).all()\n True\n ' params = get_all_params(layer, **tags) return [p.get_value() for p in params]
def set_all_param_values(layer, values, **tags): "\n Given a list of numpy arrays, this function sets the parameters of all\n layers below one or more given :class:`Layer` instances (including the\n layer(s) itself) to the given values.\n\n This function can be used in conjunction with get_all_param_values to save\n and restore model parameters.\n\n Parameters\n ----------\n layer : Layer or list\n The :class:`Layer` instance for which to set all parameter values, or a\n list of :class:`Layer` instances.\n\n values : list of numpy.array\n A list of numpy arrays representing the parameter values, must match\n the number of parameters.\n Every parameter's shape must match the shape of its new value.\n\n **tags (optional)\n tags can be specified to filter the list of parameters to be set.\n Specifying ``tag1=True`` will limit the list to parameters that are\n tagged with ``tag1``.\n Specifying ``tag1=False`` will limit the list to parameters that\n are not tagged with ``tag1``. Commonly used tags are\n ``regularizable`` and ``trainable``.\n\n Raises\n ------\n ValueError\n If the number of values is not equal to the number of params, or\n if a parameter's shape does not match the shape of its new value.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> l_in = InputLayer((100, 20))\n >>> l1 = DenseLayer(l_in, num_units=50)\n >>> all_param_values = get_all_param_values(l1)\n >>> # all_param_values is now [l1.W.get_value(), l1.b.get_value()]\n >>> # ...\n >>> set_all_param_values(l1, all_param_values)\n >>> # the parameter values are restored.\n " params = get_all_params(layer, **tags) if (len(params) != len(values)): raise ValueError(('mismatch: got %d values to set %d parameters' % (len(values), len(params)))) for (p, v) in zip(params, values): if (p.get_value().shape != v.shape): raise ValueError(('mismatch: parameter has shape %r but value to set has shape %r' % (p.get_value().shape, v.shape))) else: p.set_value(v)
class InputLayer(Layer): '\n This layer holds a symbolic variable that represents a network input. A\n variable can be specified when the layer is instantiated, else it is\n created.\n\n Parameters\n ----------\n shape : tuple of `int` or `None` elements\n The shape of the input. Any element can be `None` to indicate that the\n size of that dimension is not fixed at compile time.\n\n input_var : Theano symbolic variable or `None` (default: `None`)\n A variable representing a network input. If it is not provided, a\n variable will be created.\n\n Raises\n ------\n ValueError\n If the dimension of `input_var` is not equal to `len(shape)`\n\n Notes\n -----\n The first dimension usually indicates the batch size. If you specify it,\n Theano may apply more optimizations while compiling the training or\n prediction function, but the compiled function will not accept data of a\n different batch size at runtime. To compile for a variable batch size, set\n the first shape element to `None` instead.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer\n >>> l_in = InputLayer((100, 20))\n ' def __init__(self, shape, input_var=None, name=None, **kwargs): self.shape = shape if any((((d is not None) and (d <= 0)) for d in self.shape)): raise ValueError(('Cannot create InputLayer with a non-positive shape dimension. shape=%r, self.name=%r' % (self.shape, name))) ndim = len(shape) if (input_var is None): input_var_type = T.TensorType(theano.config.floatX, ([False] * ndim)) var_name = (('%s.input' % name) if (name is not None) else 'input') input_var = input_var_type(var_name) elif (input_var.ndim != ndim): raise ValueError(('shape has %d dimensions, but variable has %d' % (ndim, input_var.ndim))) self.input_var = input_var self.name = name self.params = OrderedDict() @Layer.output_shape.getter def output_shape(self): return self.shape
def autocrop(inputs, cropping): "\n Crops the given input arrays.\n\n Cropping takes a sequence of inputs and crops them per-axis in order to\n ensure that their sizes are consistent so that they can be combined\n in an element-wise fashion. If cropping is enabled for a specific axis,\n the minimum size in that axis of all inputs is computed, and all\n inputs are cropped to that size.\n\n The per-axis cropping modes are:\n\n `None`: this axis is not cropped, inputs are unchanged in this axis\n\n `'lower'`: inputs are cropped choosing the lower portion in this axis\n (`a[:crop_size, ...]`)\n\n `'upper'`: inputs are cropped choosing the upper portion in this axis\n (`a[-crop_size:, ...]`)\n\n `'center'`: inputs are cropped choosing the central portion in this axis\n (``a[offset:offset+crop_size, ...]`` where\n ``offset = (a.shape[0]-crop_size)//2)``\n\n Parameters\n ----------\n inputs : list of Theano expressions\n The input arrays in the form of a list of Theano expressions\n\n cropping : list of cropping modes\n Cropping modes, one for each axis. If length of `cropping` is less\n than the number of axes in the inputs, it is padded with `None`.\n If `cropping` is None, `input` is returned as is.\n\n Returns\n -------\n list of Theano expressions\n each expression is the cropped version of the corresponding input\n\n Example\n -------\n For example, given three inputs:\n\n >>> import numpy\n >>> import theano\n\n >>> a = numpy.random.random((1, 2, 3, 4))\n >>> b = numpy.random.random((5, 4, 4, 2))\n >>> c = numpy.random.random((7, 1, 8, 9))\n\n Cropping mode for each axis:\n\n >>> cropping = [None, 'lower', 'center', 'upper']\n\n Crop (note that the input arrays are converted to Theano vars first,\n and that the results are converted back from Theano expressions to\n numpy arrays by calling `eval()`)\n >>> xa, xb, xc = autocrop([theano.shared(a), theano.shared(b), theano.shared(c)], cropping)\n >>> xa, xb, xc = xa.eval(), xb.eval(), xc.eval()\n\n They will be left as is in axis 0 and cropped in the other three,\n choosing the lower, center and upper portions:\n\n Axis 0: choose all, axis 1: lower 1 element,\n axis 2: central 3 (all) and axis 3: upper 2\n >>> (xa == a[:, :1, :3, -2:]).all()\n True\n\n Axis 0: choose all, axis 1: lower 1 element,\n axis 2: central 3 starting at 0 and axis 3: upper 2 (all)\n >>> (xb == b[:, :1, :3, -2:]).all()\n True\n\n Axis 0: all, axis 1: lower 1 element (all),\n axis 2: central 3 starting at 2 and axis 3: upper 2\n >>> (xc == c[:, :1, 2:5:, -2:]).all()\n True\n " if (cropping is None): return inputs else: ndim = inputs[0].ndim if (not all(((input.ndim == ndim) for input in inputs))): raise ValueError('Not all inputs are of the same dimensionality. Got {0} inputs of dimensionalities {1}.'.format(len(inputs), [input.ndim for input in inputs])) shapes = [input.shape for input in inputs] shapes_tensor = T.as_tensor_variable(shapes) min_shape = T.min(shapes_tensor, axis=0) slices_by_input = [[] for i in range(len(inputs))] cropping = list(cropping) if (ndim > len(cropping)): cropping = (list(cropping) + ([None] * (ndim - len(cropping)))) for (dim, cr) in enumerate(cropping): if (cr is None): slice_all = slice(None) for slices in slices_by_input: slices.append(slice_all) else: sz = min_shape[dim] if (cr == 'lower'): slc_lower = slice(None, sz) for slices in slices_by_input: slices.append(slc_lower) elif (cr == 'upper'): slc_upper = slice((- sz), None) for slices in slices_by_input: slices.append(slc_upper) elif (cr == 'center'): for (sh, slices) in zip(shapes, slices_by_input): offset = ((sh[dim] - sz) // 2) slices.append(slice(offset, (offset + sz))) else: raise ValueError("Unknown crop mode '{0}'".format(cr)) return [input[slices] for (input, slices) in zip(inputs, slices_by_input)]
def autocrop_array_shapes(input_shapes, cropping): "\n Computes the shapes of the given arrays after auto-cropping is applied.\n\n For more information on cropping, see the :func:`autocrop` function\n documentation.\n\n Parameters\n ----------\n input_shapes : the shapes of input arrays prior to cropping in\n the form of a list of tuples\n\n cropping : a list of cropping modes, one for each axis. If length of\n `cropping` is less than the number of axes in the inputs, it is\n padded with `None`. If `cropping` is None, `input_shapes` is returned\n as is. For more information on their values and operation, see the\n :func:`autocrop` documentation.\n\n Returns\n -------\n list of tuples\n each tuple is a cropped version of the corresponding input\n shape tuple in `input_shapes`\n\n For example, given three input shapes with 4 axes each:\n\n >>> a = (1, 2, 3, 4)\n >>> b = (5, 4, 4, 2)\n >>> c = (7, 1, 8, 9)\n\n Cropping mode for each axis:\n\n >>> cropping = [None, 'lower', 'center', 'upper']\n\n Apply:\n\n >>> cropped_shapes = autocrop_array_shapes([a, b, c], cropping)\n >>> cropped_shapes[0]\n (1, 1, 3, 2)\n\n >>> cropped_shapes[1]\n (5, 1, 3, 2)\n\n >>> cropped_shapes[2]\n (7, 1, 3, 2)\n\n Note that axis 0 remains unchanged, where all the others are cropped\n to the minimum size in that axis.\n " if (cropping is None): return input_shapes else: ndim = len(input_shapes[0]) if (not all(((len(sh) == ndim) for sh in input_shapes))): raise ValueError('Not all inputs are of the same dimensionality. Got {0} inputs of dimensionalities {1}.'.format(len(input_shapes), [len(sh) for sh in input_shapes])) result = [] cropping = list(cropping) if (ndim > len(cropping)): cropping = (list(cropping) + ([None] * (ndim - len(cropping)))) for (sh, cr) in zip(zip(*input_shapes), cropping): if (cr is None): result.append(sh) elif (cr in {'lower', 'center', 'upper'}): result.append(([min(sh)] * len(sh))) else: raise ValueError("Unknown crop mode '{0}'".format(cr)) return [tuple(sh) for sh in zip(*result)]
class ConcatLayer(MergeLayer): '\n Concatenates multiple inputs along the specified axis. Inputs should have\n the same shape except for the dimension specified in axis, which can have\n different sizes.\n\n Parameters\n -----------\n incomings : a list of :class:`Layer` instances or tuples\n The layers feeding into this layer, or expected input shapes\n\n axis : int\n Axis which inputs are joined over\n\n cropping : None or [crop]\n Cropping for each input axis. Cropping is described in the docstring\n for :func:`autocrop`. Cropping is always disabled for `axis`.\n ' def __init__(self, incomings, axis=1, cropping=None, **kwargs): super(ConcatLayer, self).__init__(incomings, **kwargs) self.axis = axis if (cropping is not None): cropping = list(cropping) cropping[axis] = None self.cropping = cropping def get_output_shape_for(self, input_shapes): input_shapes = autocrop_array_shapes(input_shapes, self.cropping) output_shape = [next((s for s in sizes if (s is not None)), None) for sizes in zip(*input_shapes)] def match(shape1, shape2): return ((len(shape1) == len(shape2)) and all((((i == self.axis) or (s1 is None) or (s2 is None) or (s1 == s2)) for (i, (s1, s2)) in enumerate(zip(shape1, shape2))))) if (not all((match(shape, output_shape) for shape in input_shapes))): raise ValueError('Mismatch: input shapes must be the same except in the concatenation axis') sizes = [input_shape[self.axis] for input_shape in input_shapes] concat_size = (None if any(((s is None) for s in sizes)) else sum(sizes)) output_shape[self.axis] = concat_size return tuple(output_shape) def get_output_for(self, inputs, **kwargs): inputs = autocrop(inputs, self.cropping) return T.concatenate(inputs, axis=self.axis)
class ElemwiseMergeLayer(MergeLayer): '\n This layer performs an elementwise merge of its input layers.\n It requires all input layers to have the same output shape.\n\n Parameters\n ----------\n incomings : a list of :class:`Layer` instances or tuples\n the layers feeding into this layer, or expected input shapes,\n with all incoming shapes being equal\n\n merge_function : callable\n the merge function to use. Should take two arguments and return the\n updated value. Some possible merge functions are ``theano.tensor``:\n ``mul``, ``add``, ``maximum`` and ``minimum``.\n\n cropping : None or [crop]\n Cropping for each input axis. Cropping is described in the docstring\n for :func:`autocrop`\n\n See Also\n --------\n ElemwiseSumLayer : Shortcut for sum layer.\n ' def __init__(self, incomings, merge_function, cropping=None, **kwargs): super(ElemwiseMergeLayer, self).__init__(incomings, **kwargs) self.merge_function = merge_function self.cropping = cropping def get_output_shape_for(self, input_shapes): input_shapes = autocrop_array_shapes(input_shapes, self.cropping) output_shape = tuple((next((s for s in sizes if (s is not None)), None) for sizes in zip(*input_shapes))) def match(shape1, shape2): return ((len(shape1) == len(shape2)) and all((((s1 is None) or (s2 is None) or (s1 == s2)) for (s1, s2) in zip(shape1, shape2)))) if (not all((match(shape, output_shape) for shape in input_shapes))): raise ValueError('Mismatch: not all input shapes are the same') return output_shape def get_output_for(self, inputs, **kwargs): inputs = autocrop(inputs, self.cropping) output = None for input in inputs: if (output is not None): output = self.merge_function(output, input) else: output = input return output
class ElemwiseSumLayer(ElemwiseMergeLayer): '\n This layer performs an elementwise sum of its input layers.\n It requires all input layers to have the same output shape.\n\n Parameters\n ----------\n incomings : a list of :class:`Layer` instances or tuples\n the layers feeding into this layer, or expected input shapes,\n with all incoming shapes being equal\n\n coeffs: list or scalar\n A same-sized list of coefficients, or a single coefficient that\n is to be applied to all instances. By default, these will not\n be included in the learnable parameters of this layer.\n\n cropping : None or [crop]\n Cropping for each input axis. Cropping is described in the docstring\n for :func:`autocrop`\n\n Notes\n -----\n Depending on your architecture, this can be used to avoid the more\n costly :class:`ConcatLayer`. For example, instead of concatenating layers\n before a :class:`DenseLayer`, insert separate :class:`DenseLayer` instances\n of the same number of output units and add them up afterwards. (This avoids\n the copy operations in concatenation, but splits up the dot product.)\n ' def __init__(self, incomings, coeffs=1, cropping=None, **kwargs): super(ElemwiseSumLayer, self).__init__(incomings, T.add, cropping=cropping, **kwargs) if isinstance(coeffs, list): if (len(coeffs) != len(incomings)): raise ValueError(('Mismatch: got %d coeffs for %d incomings' % (len(coeffs), len(incomings)))) else: coeffs = ([coeffs] * len(incomings)) self.coeffs = coeffs def get_output_for(self, inputs, **kwargs): inputs = [((input * coeff) if (coeff != 1) else input) for (coeff, input) in zip(self.coeffs, inputs)] return super(ElemwiseSumLayer, self).get_output_for(inputs, **kwargs)
class DropoutLayer(Layer): 'Dropout layer\n\n Sets values to zero with probability p. See notes for disabling dropout\n during testing.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n the layer feeding into this layer, or the expected input shape\n p : float or scalar tensor\n The probability of setting a value to zero\n rescale : bool\n If true the input is rescaled with input / (1-p) when deterministic\n is False.\n\n Notes\n -----\n The dropout layer is a regularizer that randomly sets input values to\n zero; see [1]_, [2]_ for why this might improve generalization.\n During training you should set deterministic to false and during\n testing you should set deterministic to true.\n\n If rescale is true the input is scaled with input / (1-p) when\n deterministic is false, see references for further discussion. Note that\n this implementation scales the input at training time.\n\n References\n ----------\n .. [1] Hinton, G., Srivastava, N., Krizhevsky, A., Sutskever, I.,\n Salakhutdinov, R. R. (2012):\n Improving neural networks by preventing co-adaptation of feature\n detectors. arXiv preprint arXiv:1207.0580.\n\n .. [2] Srivastava Nitish, Hinton, G., Krizhevsky, A., Sutskever,\n I., & Salakhutdinov, R. R. (2014):\n Dropout: A Simple Way to Prevent Neural Networks from Overfitting.\n Journal of Machine Learning Research, 5(Jun)(2), 1929-1958.\n ' def __init__(self, incoming, p=0.5, rescale=True, **kwargs): super(DropoutLayer, self).__init__(incoming, **kwargs) self._srng = RandomStreams(get_rng().randint(1, 2147462579)) self.p = p self.rescale = rescale def get_output_for(self, input, deterministic=False, **kwargs): '\n Parameters\n ----------\n input : tensor\n output from the previous layer\n deterministic : bool\n If true dropout and scaling is disabled, see notes\n ' if (deterministic or (self.p == 0)): return input else: retain_prob = (1 - self.p) if self.rescale: input /= retain_prob input_shape = self.input_shape if any(((s is None) for s in input_shape)): input_shape = input.shape return (input * self._srng.binomial(input_shape, p=retain_prob, dtype=theano.config.floatX))
class GaussianNoiseLayer(Layer): 'Gaussian noise layer.\n\n Add zero-mean Gaussian noise of given standard deviation to the input [1]_.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n the layer feeding into this layer, or the expected input shape\n sigma : float or tensor scalar\n Standard deviation of added Gaussian noise\n\n Notes\n -----\n The Gaussian noise layer is a regularizer. During training you should set\n deterministic to false and during testing you should set deterministic to\n true.\n\n References\n ----------\n .. [1] K.-C. Jim, C. Giles, and B. Horne (1996):\n An analysis of noise in recurrent neural networks: convergence and\n generalization.\n IEEE Transactions on Neural Networks, 7(6):1424-1438.\n ' def __init__(self, incoming, sigma=0.1, **kwargs): super(GaussianNoiseLayer, self).__init__(incoming, **kwargs) self._srng = RandomStreams(get_rng().randint(1, 2147462579)) self.sigma = sigma def get_output_for(self, input, deterministic=False, **kwargs): '\n Parameters\n ----------\n input : tensor\n output from the previous layer\n deterministic : bool\n If true noise is disabled, see notes\n ' if (deterministic or (self.sigma == 0)): return input else: return (input + self._srng.normal(input.shape, avg=0.0, std=self.sigma))
class FlattenLayer(Layer): '\n A layer that flattens its input. The leading ``outdim-1`` dimensions of\n the output will have the same shape as the input. The remaining dimensions\n are collapsed into the last dimension.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape.\n outdim : int\n The number of dimensions in the output.\n\n See Also\n --------\n flatten : Shortcut\n ' def __init__(self, incoming, outdim=2, **kwargs): super(FlattenLayer, self).__init__(incoming, **kwargs) self.outdim = outdim if (outdim < 1): raise ValueError('Dim must be >0, was %i', outdim) def get_output_shape_for(self, input_shape): shp = [input_shape[i] for i in range((self.outdim - 1))] shp += [int(np.prod(input_shape[(self.outdim - 1):]))] return tuple(shp) def get_output_for(self, input, **kwargs): return input.flatten(self.outdim)
class ReshapeLayer(Layer): '\n A layer reshaping its input tensor to another tensor of the same total\n number of elements.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape\n\n shape : tuple\n The target shape specification. Each element can be one of:\n\n * ``i``, a positive integer directly giving the size of the dimension\n * ``[i]``, a single-element list of int, denoting to use the size\n of the ``i`` th input dimension\n * ``-1``, denoting to infer the size for this dimension to match\n the total number of elements in the input tensor (cannot be used\n more than once in a specification)\n * TensorVariable directly giving the size of the dimension\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, ReshapeLayer\n >>> l_in = InputLayer((32, 100, 20))\n >>> l1 = ReshapeLayer(l_in, ((32, 50, 40)))\n >>> l1.output_shape\n (32, 50, 40)\n >>> l_in = InputLayer((None, 100, 20))\n >>> l1 = ReshapeLayer(l_in, ([0], [1], 5, -1))\n >>> l1.output_shape\n (None, 100, 5, 4)\n\n Notes\n -----\n The tensor elements will be fetched and placed in C-like order. That\n is, reshaping `[1,2,3,4,5,6]` to shape `(2,3)` will result in a matrix\n `[[1,2,3],[4,5,6]]`, not in `[[1,3,5],[2,4,6]]` (Fortran-like order),\n regardless of the memory layout of the input tensor. For C-contiguous\n input, reshaping is cheap, for others it may require copying the data.\n ' def __init__(self, incoming, shape, **kwargs): super(ReshapeLayer, self).__init__(incoming, **kwargs) shape = tuple(shape) for s in shape: if isinstance(s, int): if ((s == 0) or (s < (- 1))): raise ValueError('`shape` integers must be positive or -1') elif isinstance(s, list): if ((len(s) != 1) or (not isinstance(s[0], int)) or (s[0] < 0)): raise ValueError('`shape` input references must be single-element lists of int >= 0') elif isinstance(s, T.TensorVariable): if (s.ndim != 0): raise ValueError(('A symbolic variable in a shape specification must be a scalar, but had %i dimensions' % s.ndim)) else: raise ValueError('`shape` must be a tuple of int and/or [int]') if (sum(((s == (- 1)) for s in shape)) > 1): raise ValueError('`shape` cannot contain multiple -1') self.shape = shape self.get_output_shape_for(self.input_shape) def get_output_shape_for(self, input_shape, **kwargs): output_shape = list(self.shape) masked_input_shape = list(input_shape) masked_output_shape = list(output_shape) for (dim, o) in enumerate(output_shape): if isinstance(o, list): if (o[0] >= len(input_shape)): raise ValueError(('specification contains [%d], but input shape has %d dimensions only' % (o[0], len(input_shape)))) output_shape[dim] = input_shape[o[0]] masked_output_shape[dim] = input_shape[o[0]] if ((input_shape[o[0]] is None) and (masked_input_shape[o[0]] is None)): masked_input_shape[o[0]] = 1 masked_output_shape[dim] = 1 for (dim, o) in enumerate(output_shape): if isinstance(o, T.TensorVariable): output_shape[dim] = None masked_output_shape[dim] = None input_size = (None if any(((x is None) for x in masked_input_shape)) else np.prod(masked_input_shape)) output_size = (None if any(((x is None) for x in masked_output_shape)) else np.prod(masked_output_shape)) del masked_input_shape, masked_output_shape if ((- 1) in output_shape): dim = output_shape.index((- 1)) if ((input_size is None) or (output_size is None)): output_shape[dim] = None output_size = None else: output_size *= (- 1) output_shape[dim] = (input_size // output_size) output_size *= output_shape[dim] if ((input_size is not None) and (output_size is not None) and (input_size != output_size)): raise ValueError(('%s cannot be reshaped to specification %s. The total size mismatches.' % (input_shape, self.shape))) return tuple(output_shape) def get_output_for(self, input, **kwargs): output_shape = list(self.shape) for (dim, o) in enumerate(output_shape): if isinstance(o, list): output_shape[dim] = input.shape[o[0]] return input.reshape(tuple(output_shape))
class DimshuffleLayer(Layer): "\n A layer that rearranges the dimension of its input tensor, maintaining\n the same same total number of elements.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n the layer feeding into this layer, or the expected input shape\n\n pattern : tuple\n The new dimension order, with each element giving the index\n of the dimension in the input tensor or `'x'` to broadcast it.\n For example `(3,2,1,0)` will reverse the order of a 4-dimensional\n tensor. Use `'x'` to broadcast, e.g. `(3,2,1,'x',0)` will\n take a 4 tensor of shape `(2,3,5,7)` as input and produce a\n tensor of shape `(7,5,3,1,2)` with the 4th dimension being\n broadcast-able. In general, all dimensions in the input tensor\n must be used to generate the output tensor. Omitting a dimension\n attempts to collapse it; this can only be done to broadcast-able\n dimensions, e.g. a 5-tensor of shape `(7,5,3,1,2)` with the 4th\n being broadcast-able can be shuffled with the pattern `(4,2,1,0)`\n collapsing the 4th dimension resulting in a tensor of shape\n `(2,3,5,7)`.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, DimshuffleLayer\n >>> l_in = InputLayer((2, 3, 5, 7))\n >>> l1 = DimshuffleLayer(l_in, (3, 2, 1, 'x', 0))\n >>> l1.output_shape\n (7, 5, 3, 1, 2)\n >>> l2 = DimshuffleLayer(l1, (4, 2, 1, 0))\n >>> l2.output_shape\n (2, 3, 5, 7)\n " def __init__(self, incoming, pattern, **kwargs): super(DimshuffleLayer, self).__init__(incoming, **kwargs) used_dims = set() for p in pattern: if isinstance(p, int): if (p in used_dims): raise ValueError('pattern contains dimension {0} more than once'.format(p)) used_dims.add(p) elif (p == 'x'): pass else: raise ValueError("pattern should only contain dimensionindices or 'x', not {0}".format(p)) self.pattern = pattern self.get_output_shape_for(self.input_shape) def get_output_shape_for(self, input_shape): output_shape = [] dims_used = ([False] * len(input_shape)) for p in self.pattern: if isinstance(p, int): if ((p < 0) or (p >= len(input_shape))): raise ValueError('pattern contains {0}, but input shape has {1} dimensions only'.format(p, len(input_shape))) o = input_shape[p] dims_used[p] = True elif (p == 'x'): o = 1 output_shape.append(o) for (i, (dim_size, used)) in enumerate(zip(input_shape, dims_used)): if ((not used) and (dim_size != 1) and (dim_size is not None)): raise ValueError('pattern attempted to collapse dimension {0} of size {1}; dimensions with size != 1/None are notbroadcastable and cannot be collapsed'.format(i, dim_size)) return tuple(output_shape) def get_output_for(self, input, **kwargs): return input.dimshuffle(self.pattern)
class PadLayer(Layer): '\n Pad all dimensions except the first ``batch_ndim`` with ``width``\n zeros on both sides, or with another value specified in ``val``.\n Individual padding for each dimension or edge can be specified\n using a tuple or list of tuples for ``width``.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape\n\n width : int, iterable of int, or iterable of tuple\n Padding width. If an int, pads each axis symmetrically with the same\n amount in the beginning and end. If an iterable of int, defines the\n symmetric padding width separately for each axis. If an iterable of\n tuples of two ints, defines a seperate padding width for each beginning\n and end of each axis.\n\n val : float\n Value used for padding\n\n batch_ndim : int\n Dimensions up to this value are not padded. For padding convolutional\n layers this should be set to 2 so the sample and filter dimensions are\n not padded\n ' def __init__(self, incoming, width, val=0, batch_ndim=2, **kwargs): super(PadLayer, self).__init__(incoming, **kwargs) self.width = width self.val = val self.batch_ndim = batch_ndim def get_output_shape_for(self, input_shape): output_shape = list(input_shape) if isinstance(self.width, int): widths = ([self.width] * (len(input_shape) - self.batch_ndim)) else: widths = self.width for (k, w) in enumerate(widths): if (output_shape[(k + self.batch_ndim)] is None): continue else: try: (l, r) = w except TypeError: l = r = w output_shape[(k + self.batch_ndim)] += (l + r) return tuple(output_shape) def get_output_for(self, input, **kwargs): return padding.pad(input, self.width, self.val, self.batch_ndim)
class SliceLayer(Layer): '\n Slices the input at a specific axis and at specific indices.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape\n\n indices : int or slice instance\n If an ``int``, selects a single element from the given axis, dropping\n the axis. If a slice, selects all elements in the given range, keeping\n the axis.\n\n axis : int\n Specifies the axis from which the indices are selected.\n\n Examples\n --------\n >>> from lasagne.layers import SliceLayer, InputLayer\n >>> l_in = InputLayer((2, 3, 4))\n >>> SliceLayer(l_in, indices=0, axis=1).output_shape\n ... # equals input[:, 0]\n (2, 4)\n >>> SliceLayer(l_in, indices=slice(0, 1), axis=1).output_shape\n ... # equals input[:, 0:1]\n (2, 1, 4)\n >>> SliceLayer(l_in, indices=slice(-2, None), axis=-1).output_shape\n ... # equals input[..., -2:]\n (2, 3, 2)\n ' def __init__(self, incoming, indices, axis=(- 1), **kwargs): super(SliceLayer, self).__init__(incoming, **kwargs) self.slice = indices self.axis = axis def get_output_shape_for(self, input_shape): output_shape = list(input_shape) if isinstance(self.slice, int): del output_shape[self.axis] elif (input_shape[self.axis] is not None): output_shape[self.axis] = len(range(*self.slice.indices(input_shape[self.axis]))) else: output_shape[self.axis] = None return tuple(output_shape) def get_output_for(self, input, **kwargs): axis = self.axis if (axis < 0): axis += input.ndim return input[(((slice(None),) * axis) + (self.slice,))]
def sigmoid(x): 'Sigmoid activation function :math:`\\varphi(x) = \\frac{1}{1 + e^{-x}}`\n\n Parameters\n ----------\n x : float32\n The activation (the summed, weighted input of a neuron).\n\n Returns\n -------\n float32 in [0, 1]\n The output of the sigmoid function applied to the activation.\n ' return theano.tensor.nnet.sigmoid(x)
def softmax(x): 'Softmax activation function\n :math:`\\varphi(\\mathbf{x})_j =\n \\frac{e^{\\mathbf{x}_j}}{\\sum_{k=1}^K e^{\\mathbf{x}_k}}`\n where :math:`K` is the total number of neurons in the layer. This\n activation function gets applied row-wise.\n\n Parameters\n ----------\n x : float32\n The activation (the summed, weighted input of a neuron).\n\n Returns\n -------\n float32 where the sum of the row is 1 and each single value is in [0, 1]\n The output of the softmax function applied to the activation.\n ' return theano.tensor.nnet.softmax(x)
def tanh(x): 'Tanh activation function :math:`\\varphi(x) = \\tanh(x)`\n\n Parameters\n ----------\n x : float32\n The activation (the summed, weighted input of a neuron).\n\n Returns\n -------\n float32 in [-1, 1]\n The output of the tanh function applied to the activation.\n ' return theano.tensor.tanh(x)
class ScaledTanH(object): 'Scaled tanh :math:`\\varphi(x) = \\tanh(\\alpha \\cdot x) \\cdot \\beta`\n\n This is a modified tanh function which allows to rescale both the input and\n the output of the activation.\n\n Scaling the input down will result in decreasing the maximum slope of the\n tanh and as a result it will be in the linear regime in a larger interval\n of the input space. Scaling the input up will increase the maximum slope\n of the tanh and thus bring it closer to a step function.\n\n Scaling the output changes the output interval to :math:`[-\\beta,\\beta]`.\n\n Parameters\n ----------\n scale_in : float32\n The scale parameter :math:`\\alpha` for the input\n\n scale_out : float32\n The scale parameter :math:`\\beta` for the output\n\n Methods\n -------\n __call__(x)\n Apply the scaled tanh function to the activation `x`.\n\n Examples\n --------\n In contrast to other activation functions in this module, this is\n a class that needs to be instantiated to obtain a callable:\n\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> l_in = InputLayer((None, 100))\n >>> from lasagne.nonlinearities import ScaledTanH\n >>> scaled_tanh = ScaledTanH(scale_in=0.5, scale_out=2.27)\n >>> l1 = DenseLayer(l_in, num_units=200, nonlinearity=scaled_tanh)\n\n Notes\n -----\n LeCun et al. (in [1]_, Section 4.4) suggest ``scale_in=2./3`` and\n ``scale_out=1.7159``, which has :math:`\\varphi(\\pm 1) = \\pm 1`,\n maximum second derivative at 1, and an effective gain close to 1.\n\n By carefully matching :math:`\\alpha` and :math:`\\beta`, the nonlinearity\n can also be tuned to preserve the mean and variance of its input:\n\n * ``scale_in=0.5``, ``scale_out=2.4``: If the input is a random normal\n variable, the output will have zero mean and unit variance.\n * ``scale_in=1``, ``scale_out=1.6``: Same property, but with a smaller\n linear regime in input space.\n * ``scale_in=0.5``, ``scale_out=2.27``: If the input is a uniform normal\n variable, the output will have zero mean and unit variance.\n * ``scale_in=1``, ``scale_out=1.48``: Same property, but with a smaller\n linear regime in input space.\n\n References\n ----------\n .. [1] LeCun, Yann A., et al. (1998):\n Efficient BackProp,\n http://link.springer.com/chapter/10.1007/3-540-49430-8_2,\n http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf\n .. [2] Masci, Jonathan, et al. (2011):\n Stacked Convolutional Auto-Encoders for Hierarchical Feature Extraction,\n http://link.springer.com/chapter/10.1007/978-3-642-21735-7_7,\n http://people.idsia.ch/~ciresan/data/icann2011.pdf\n ' def __init__(self, scale_in=1, scale_out=1): self.scale_in = scale_in self.scale_out = scale_out def __call__(self, x): return (theano.tensor.tanh((x * self.scale_in)) * self.scale_out)
def rectify(x): 'Rectify activation function :math:`\\varphi(x) = \\max(0, x)`\n\n Parameters\n ----------\n x : float32\n The activation (the summed, weighted input of a neuron).\n\n Returns\n -------\n float32\n The output of the rectify function applied to the activation.\n ' return theano.tensor.nnet.relu(x)
class LeakyRectify(object): 'Leaky rectifier :math:`\\varphi(x) = \\max(\\alpha \\cdot x, x)`\n\n The leaky rectifier was introduced in [1]_. Compared to the standard\n rectifier :func:`rectify`, it has a nonzero gradient for negative input,\n which often helps convergence.\n\n Parameters\n ----------\n leakiness : float\n Slope for negative input, usually between 0 and 1.\n A leakiness of 0 will lead to the standard rectifier,\n a leakiness of 1 will lead to a linear activation function,\n and any value in between will give a leaky rectifier.\n\n Methods\n -------\n __call__(x)\n Apply the leaky rectify function to the activation `x`.\n\n Examples\n --------\n In contrast to other activation functions in this module, this is\n a class that needs to be instantiated to obtain a callable:\n\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> l_in = InputLayer((None, 100))\n >>> from lasagne.nonlinearities import LeakyRectify\n >>> custom_rectify = LeakyRectify(0.1)\n >>> l1 = DenseLayer(l_in, num_units=200, nonlinearity=custom_rectify)\n\n Alternatively, you can use the provided instance for leakiness=0.01:\n\n >>> from lasagne.nonlinearities import leaky_rectify\n >>> l2 = DenseLayer(l_in, num_units=200, nonlinearity=leaky_rectify)\n\n Or the one for a high leakiness of 1/3:\n\n >>> from lasagne.nonlinearities import very_leaky_rectify\n >>> l3 = DenseLayer(l_in, num_units=200, nonlinearity=very_leaky_rectify)\n\n See Also\n --------\n leaky_rectify: Instance with default leakiness of 0.01, as in [1]_.\n very_leaky_rectify: Instance with high leakiness of 1/3, as in [2]_.\n\n References\n ----------\n .. [1] Maas et al. (2013):\n Rectifier Nonlinearities Improve Neural Network Acoustic Models,\n http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf\n .. [2] Graham, Benjamin (2014):\n Spatially-sparse convolutional neural networks,\n http://arxiv.org/abs/1409.6070\n ' def __init__(self, leakiness=0.01): self.leakiness = leakiness def __call__(self, x): return theano.tensor.nnet.relu(x, self.leakiness)
def elu(x): 'Exponential Linear Unit :math:`\\varphi(x) = (x > 0) ? x : e^x - 1`\n\n The Exponential Linear Unit (EUL) was introduced in [1]_. Compared to the\n linear rectifier :func:`rectify`, it has a mean activation closer to zero\n and nonzero gradient for negative input, which can help convergence.\n Compared to the leaky rectifier :class:`LeakyRectify`, it saturates for\n highly negative inputs.\n\n Parameters\n ----------\n x : float32\n The activation (the summed, weighed input of a neuron).\n\n Returns\n -------\n float32\n The output of the exponential linear unit for the activation.\n\n Notes\n -----\n In [1]_, an additional parameter :math:`\\alpha` controls the (negative)\n saturation value for negative inputs, but is set to 1 for all experiments.\n It is omitted here.\n\n References\n ----------\n .. [1] Djork-Arné Clevert, Thomas Unterthiner, Sepp Hochreiter (2015):\n Fast and Accurate Deep Network Learning by Exponential Linear Units\n (ELUs), http://arxiv.org/abs/1511.07289\n ' return theano.tensor.switch((x > 0), x, (theano.tensor.exp(x) - 1))
def softplus(x): 'Softplus activation function :math:`\\varphi(x) = \\log(1 + e^x)`\n\n Parameters\n ----------\n x : float32\n The activation (the summed, weighted input of a neuron).\n\n Returns\n -------\n float32\n The output of the softplus function applied to the activation.\n ' return theano.tensor.nnet.softplus(x)
def linear(x): 'Linear activation function :math:`\\varphi(x) = x`\n\n Parameters\n ----------\n x : float32\n The activation (the summed, weighted input of a neuron).\n\n Returns\n -------\n float32\n The output of the identity applied to the activation.\n ' return x
def binary_crossentropy(predictions, targets): 'Computes the binary cross-entropy between predictions and targets.\n\n .. math:: L = -t \\log(p) - (1 - t) \\log(1 - p)\n\n Parameters\n ----------\n predictions : Theano tensor\n Predictions in (0, 1), such as sigmoidal output of a neural network.\n targets : Theano tensor\n Targets in [0, 1], such as ground truth labels.\n\n Returns\n -------\n Theano tensor\n An expression for the element-wise binary cross-entropy.\n\n Notes\n -----\n This is the loss function of choice for binary classification problems\n and sigmoid output units.\n ' return theano.tensor.nnet.binary_crossentropy(predictions, targets)
def categorical_crossentropy(predictions, targets): 'Computes the categorical cross-entropy between predictions and targets.\n\n .. math:: L_i = - \\sum_j{t_{i,j} \\log(p_{i,j})}\n\n Parameters\n ----------\n predictions : Theano 2D tensor\n Predictions in (0, 1), such as softmax output of a neural network,\n with data points in rows and class probabilities in columns.\n targets : Theano 2D tensor or 1D tensor\n Either targets in [0, 1] matching the layout of `predictions`, or\n a vector of int giving the correct class index per data point.\n\n Returns\n -------\n Theano 1D tensor\n An expression for the item-wise categorical cross-entropy.\n\n Notes\n -----\n This is the loss function of choice for multi-class classification\n problems and softmax output units. For hard targets, i.e., targets\n that assign all of the probability to a single class per data point,\n providing a vector of int for the targets is usually slightly more\n efficient than providing a matrix with a single 1.0 per row.\n ' return theano.tensor.nnet.categorical_crossentropy(predictions, targets)
def squared_error(a, b): 'Computes the element-wise squared difference between two tensors.\n\n .. math:: L = (p - t)^2\n\n Parameters\n ----------\n a, b : Theano tensor\n The tensors to compute the squared difference between.\n\n Returns\n -------\n Theano tensor\n An expression for the item-wise squared difference.\n\n Notes\n -----\n This is the loss function of choice for many regression problems\n or auto-encoders with linear output units.\n ' return ((a - b) ** 2)