code stringlengths 17 6.64M |
|---|
class Glorot(Initializer):
"Glorot weight initialization.\n\n This is also known as Xavier initialization [1]_.\n\n Parameters\n ----------\n initializer : lasagne.init.Initializer\n Initializer used to sample the weights, must accept `std` in its\n constructor to sample from a distribution with a given standard\n deviation.\n gain : float or 'relu'\n Scaling factor for the weights. Set this to ``1.0`` for linear and\n sigmoid units, to 'relu' or ``sqrt(2)`` for rectified linear units, and\n to ``sqrt(2/(1+alpha**2))`` for leaky rectified linear units with\n leakiness ``alpha``. Other transfer functions may need different\n factors.\n c01b : bool\n For a :class:`lasagne.layers.cuda_convnet.Conv2DCCLayer` constructed\n with ``dimshuffle=False``, `c01b` must be set to ``True`` to compute\n the correct fan-in and fan-out.\n\n References\n ----------\n .. [1] Xavier Glorot and Yoshua Bengio (2010):\n Understanding the difficulty of training deep feedforward neural\n networks. International conference on artificial intelligence and\n statistics.\n\n Notes\n -----\n For a :class:`DenseLayer <lasagne.layers.DenseLayer>`, if ``gain='relu'``\n and ``initializer=Uniform``, the weights are initialized as\n\n .. math::\n a &= \\sqrt{\\frac{12}{fan_{in}+fan_{out}}}\\\\\n W &\\sim U[-a, a]\n\n If ``gain=1`` and ``initializer=Normal``, the weights are initialized as\n\n .. math::\n \\sigma &= \\sqrt{\\frac{2}{fan_{in}+fan_{out}}}\\\\\n W &\\sim N(0, \\sigma)\n\n See Also\n --------\n GlorotNormal : Shortcut with Gaussian initializer.\n GlorotUniform : Shortcut with uniform initializer.\n "
def __init__(self, initializer, gain=1.0, c01b=False):
if (gain == 'relu'):
gain = np.sqrt(2)
self.initializer = initializer
self.gain = gain
self.c01b = c01b
def sample(self, shape):
if self.c01b:
if (len(shape) != 4):
raise RuntimeError('If c01b is True, only shapes of length 4 are accepted')
(n1, n2) = (shape[0], shape[3])
receptive_field_size = (shape[1] * shape[2])
else:
if (len(shape) < 2):
raise RuntimeError('This initializer only works with shapes of length >= 2')
(n1, n2) = shape[:2]
receptive_field_size = np.prod(shape[2:])
std = (self.gain * np.sqrt((2.0 / ((n1 + n2) * receptive_field_size))))
return self.initializer(std=std).sample(shape)
|
class GlorotNormal(Glorot):
'Glorot with weights sampled from the Normal distribution.\n\n See :class:`Glorot` for a description of the parameters.\n '
def __init__(self, gain=1.0, c01b=False):
super(GlorotNormal, self).__init__(Normal, gain, c01b)
|
class GlorotUniform(Glorot):
'Glorot with weights sampled from the Uniform distribution.\n\n See :class:`Glorot` for a description of the parameters.\n '
def __init__(self, gain=1.0, c01b=False):
super(GlorotUniform, self).__init__(Uniform, gain, c01b)
|
class He(Initializer):
"He weight initialization.\n\n Weights are initialized with a standard deviation of\n :math:`\\sigma = gain \\sqrt{\\frac{1}{fan_{in}}}` [1]_.\n\n Parameters\n ----------\n initializer : lasagne.init.Initializer\n Initializer used to sample the weights, must accept `std` in its\n constructor to sample from a distribution with a given standard\n deviation.\n gain : float or 'relu'\n Scaling factor for the weights. Set this to ``1.0`` for linear and\n sigmoid units, to 'relu' or ``sqrt(2)`` for rectified linear units, and\n to ``sqrt(2/(1+alpha**2))`` for leaky rectified linear units with\n leakiness ``alpha``. Other transfer functions may need different\n factors.\n c01b : bool\n For a :class:`lasagne.layers.cuda_convnet.Conv2DCCLayer` constructed\n with ``dimshuffle=False``, `c01b` must be set to ``True`` to compute\n the correct fan-in and fan-out.\n\n References\n ----------\n .. [1] Kaiming He et al. (2015):\n Delving deep into rectifiers: Surpassing human-level performance on\n imagenet classification. arXiv preprint arXiv:1502.01852.\n\n See Also\n ----------\n HeNormal : Shortcut with Gaussian initializer.\n HeUniform : Shortcut with uniform initializer.\n "
def __init__(self, initializer, gain=1.0, c01b=False):
if (gain == 'relu'):
gain = np.sqrt(2)
self.initializer = initializer
self.gain = gain
self.c01b = c01b
def sample(self, shape):
if self.c01b:
if (len(shape) != 4):
raise RuntimeError('If c01b is True, only shapes of length 4 are accepted')
fan_in = np.prod(shape[:3])
elif (len(shape) == 2):
fan_in = shape[0]
elif (len(shape) > 2):
fan_in = np.prod(shape[1:])
else:
raise RuntimeError('This initializer only works with shapes of length >= 2')
std = (self.gain * np.sqrt((1.0 / fan_in)))
return self.initializer(std=std).sample(shape)
|
class HeNormal(He):
'He initializer with weights sampled from the Normal distribution.\n\n See :class:`He` for a description of the parameters.\n '
def __init__(self, gain=1.0, c01b=False):
super(HeNormal, self).__init__(Normal, gain, c01b)
|
class HeUniform(He):
'He initializer with weights sampled from the Uniform distribution.\n\n See :class:`He` for a description of the parameters.\n '
def __init__(self, gain=1.0, c01b=False):
super(HeUniform, self).__init__(Uniform, gain, c01b)
|
class Constant(Initializer):
'Initialize weights with constant value.\n\n Parameters\n ----------\n val : float\n Constant value for weights.\n '
def __init__(self, val=0.0):
self.val = val
def sample(self, shape):
return floatX((np.ones(shape) * self.val))
|
class Sparse(Initializer):
'Initialize weights as sparse matrix.\n\n Parameters\n ----------\n sparsity : float\n Exact fraction of non-zero values per column. Larger values give less\n sparsity.\n std : float\n Non-zero weights are sampled from N(0, std).\n '
def __init__(self, sparsity=0.1, std=0.01):
self.sparsity = sparsity
self.std = std
def sample(self, shape):
if (len(shape) != 2):
raise RuntimeError('sparse initializer only works with shapes of length 2')
w = floatX(np.zeros(shape))
(n_inputs, n_outputs) = shape
size = int((self.sparsity * n_inputs))
for k in range(n_outputs):
indices = np.arange(n_inputs)
get_rng().shuffle(indices)
indices = indices[:size]
values = floatX(get_rng().normal(0.0, self.std, size=size))
w[(indices, k)] = values
return w
|
class Orthogonal(Initializer):
'Intialize weights as Orthogonal matrix.\n\n Orthogonal matrix initialization [1]_. For n-dimensional shapes where\n n > 2, the n-1 trailing axes are flattened. For convolutional layers, this\n corresponds to the fan-in, so this makes the initialization usable for\n both dense and convolutional layers.\n\n Parameters\n ----------\n gain : float or \'relu\'\n Scaling factor for the weights. Set this to ``1.0`` for linear and\n sigmoid units, to \'relu\' or ``sqrt(2)`` for rectified linear units, and\n to ``sqrt(2/(1+alpha**2))`` for leaky rectified linear units with\n leakiness ``alpha``. Other transfer functions may need different\n factors.\n\n References\n ----------\n .. [1] Saxe, Andrew M., James L. McClelland, and Surya Ganguli.\n "Exact solutions to the nonlinear dynamics of learning in deep\n linear neural networks." arXiv preprint arXiv:1312.6120 (2013).\n '
def __init__(self, gain=1.0):
if (gain == 'relu'):
gain = np.sqrt(2)
self.gain = gain
def sample(self, shape):
if (len(shape) < 2):
raise RuntimeError('Only shapes of length 2 or more are supported.')
flat_shape = (shape[0], np.prod(shape[1:]))
a = get_rng().normal(0.0, 1.0, flat_shape)
(u, _, v) = np.linalg.svd(a, full_matrices=False)
q = (u if (u.shape == flat_shape) else v)
q = q.reshape(shape)
return floatX((self.gain * q))
|
class Layer(object):
"\n The :class:`Layer` class represents a single layer of a neural network. It\n should be subclassed when implementing new types of layers.\n\n Because each layer can keep track of the layer(s) feeding into it, a\n network's output :class:`Layer` instance can double as a handle to the full\n network.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape.\n name : a string or None\n An optional name to attach to this layer.\n "
def __init__(self, incoming, name=None):
if isinstance(incoming, tuple):
self.input_shape = incoming
self.input_layer = None
else:
self.input_shape = incoming.output_shape
self.input_layer = incoming
self.name = name
self.params = OrderedDict()
if any((((d is not None) and (d <= 0)) for d in self.input_shape)):
raise ValueError(('Cannot create Layer with a non-positive input_shape dimension. input_shape=%r, self.name=%r' % (self.input_shape, self.name)))
@property
def output_shape(self):
return self.get_output_shape_for(self.input_shape)
def get_params(self, **tags):
"\n Returns a list of Theano shared variables that parameterize the layer.\n\n By default, all shared variables that participate in the forward pass\n will be returned (in the order they were registered in the Layer's\n constructor via :meth:`add_param()`). The list can optionally be\n filtered by specifying tags as keyword arguments. For example,\n ``trainable=True`` will only return trainable parameters, and\n ``regularizable=True`` will only return parameters that can be\n regularized (e.g., by L2 decay).\n\n If any of the layer's parameters was set to a Theano expression instead\n of a shared variable, the shared variables involved in that expression\n will be returned rather than the expression itself. Tag filtering\n considers all variables within an expression to be tagged the same.\n\n Parameters\n ----------\n **tags (optional)\n tags can be specified to filter the list. Specifying ``tag1=True``\n will limit the list to parameters that are tagged with ``tag1``.\n Specifying ``tag1=False`` will limit the list to parameters that\n are not tagged with ``tag1``. Commonly used tags are\n ``regularizable`` and ``trainable``.\n\n Returns\n -------\n list of Theano shared variables\n A list of variables that parameterize the layer\n\n Notes\n -----\n For layers without any parameters, this will return an empty list.\n "
result = list(self.params.keys())
only = set((tag for (tag, value) in tags.items() if value))
if only:
result = [param for param in result if (not (only - self.params[param]))]
exclude = set((tag for (tag, value) in tags.items() if (not value)))
if exclude:
result = [param for param in result if (not (self.params[param] & exclude))]
return utils.collect_shared_vars(result)
def get_output_shape_for(self, input_shape):
'\n Computes the output shape of this layer, given an input shape.\n\n Parameters\n ----------\n input_shape : tuple\n A tuple representing the shape of the input. The tuple should have\n as many elements as there are input dimensions, and the elements\n should be integers or `None`.\n\n Returns\n -------\n tuple\n A tuple representing the shape of the output of this layer. The\n tuple has as many elements as there are output dimensions, and the\n elements are all either integers or `None`.\n\n Notes\n -----\n This method will typically be overridden when implementing a new\n :class:`Layer` class. By default it simply returns the input\n shape. This means that a layer that does not modify the shape\n (e.g. because it applies an elementwise operation) does not need\n to override this method.\n '
return input_shape
def get_output_for(self, input, **kwargs):
'\n Propagates the given input through this layer (and only this layer).\n\n Parameters\n ----------\n input : Theano expression\n The expression to propagate through this layer.\n\n Returns\n -------\n output : Theano expression\n The output of this layer given the input to this layer.\n\n\n Notes\n -----\n This is called by the base :meth:`lasagne.layers.get_output()`\n to propagate data through a network.\n\n This method should be overridden when implementing a new\n :class:`Layer` class. By default it raises `NotImplementedError`.\n '
raise NotImplementedError
def add_param(self, spec, shape, name=None, **tags):
"\n Register and possibly initialize a parameter tensor for the layer.\n\n When defining a layer class, this method is called in the constructor\n to define which parameters the layer has, what their shapes are, how\n they should be initialized and what tags are associated with them.\n This allows layer classes to transparently support parameter\n initialization from numpy arrays and callables, as well as setting\n parameters to existing Theano shared variables or Theano expressions.\n\n All registered parameters are stored along with their tags in the\n ordered dictionary :attr:`Layer.params`, and can be retrieved with\n :meth:`Layer.get_params()`, optionally filtered by their tags.\n\n Parameters\n ----------\n spec : Theano shared variable, expression, numpy array or callable\n initial value, expression or initializer for this parameter.\n See :func:`lasagne.utils.create_param` for more information.\n\n shape : tuple of int\n a tuple of integers representing the desired shape of the\n parameter tensor.\n\n name : str (optional)\n a descriptive name for the parameter variable. This will be passed\n to ``theano.shared`` when the variable is created, prefixed by the\n layer's name if any (in the form ``'layer_name.param_name'``). If\n ``spec`` is already a shared variable or expression, this parameter\n will be ignored to avoid overwriting an existing name.\n\n **tags (optional)\n tags associated with the parameter can be specified as keyword\n arguments. To associate the tag ``tag1`` with the parameter, pass\n ``tag1=True``.\n\n By default, the tags ``regularizable`` and ``trainable`` are\n associated with the parameter. Pass ``regularizable=False`` or\n ``trainable=False`` respectively to prevent this.\n\n Returns\n -------\n Theano shared variable or Theano expression\n the resulting parameter variable or parameter expression\n\n Notes\n -----\n It is recommended to assign the resulting parameter variable/expression\n to an attribute of the layer for easy access, for example:\n\n >>> self.W = self.add_param(W, (2, 3), name='W') #doctest: +SKIP\n "
if (name is not None):
if (self.name is not None):
name = ('%s.%s' % (self.name, name))
param = utils.create_param(spec, shape, name)
tags['trainable'] = tags.get('trainable', True)
tags['regularizable'] = tags.get('regularizable', True)
self.params[param] = set((tag for (tag, value) in tags.items() if value))
return param
|
class MergeLayer(Layer):
'\n This class represents a layer that aggregates input from multiple layers.\n It should be subclassed when implementing new types of layers that obtain\n their input from multiple layers.\n\n Parameters\n ----------\n incomings : a list of :class:`Layer` instances or tuples\n The layers feeding into this layer, or expected input shapes.\n name : a string or None\n An optional name to attach to this layer.\n '
def __init__(self, incomings, name=None):
self.input_shapes = [(incoming if isinstance(incoming, tuple) else incoming.output_shape) for incoming in incomings]
self.input_layers = [(None if isinstance(incoming, tuple) else incoming) for incoming in incomings]
self.name = name
self.params = OrderedDict()
@Layer.output_shape.getter
def output_shape(self):
return self.get_output_shape_for(self.input_shapes)
def get_output_shape_for(self, input_shapes):
'\n Computes the output shape of this layer, given a list of input shapes.\n\n Parameters\n ----------\n input_shape : list of tuple\n A list of tuples, with each tuple representing the shape of one of\n the inputs (in the correct order). These tuples should have as many\n elements as there are input dimensions, and the elements should be\n integers or `None`.\n\n Returns\n -------\n tuple\n A tuple representing the shape of the output of this layer. The\n tuple has as many elements as there are output dimensions, and the\n elements are all either integers or `None`.\n\n Notes\n -----\n This method must be overridden when implementing a new\n :class:`Layer` class with multiple inputs. By default it raises\n `NotImplementedError`.\n '
raise NotImplementedError
def get_output_for(self, inputs, **kwargs):
'\n Propagates the given inputs through this layer (and only this layer).\n\n Parameters\n ----------\n inputs : list of Theano expressions\n The Theano expressions to propagate through this layer.\n\n Returns\n -------\n Theano expressions\n The output of this layer given the inputs to this layer.\n\n Notes\n -----\n This is called by the base :meth:`lasagne.layers.get_output()`\n to propagate data through a network.\n\n This method should be overridden when implementing a new\n :class:`Layer` class with multiple inputs. By default it raises\n `NotImplementedError`.\n '
raise NotImplementedError
|
def conv_output_length(input_length, filter_size, stride, pad=0):
"Helper function to compute the output size of a convolution operation\n\n This function computes the length along a single axis, which corresponds\n to a 1D convolution. It can also be used for convolutions with higher\n dimensionalities by using it individually for each axis.\n\n Parameters\n ----------\n input_length : int\n The size of the input.\n\n filter_size : int\n The size of the filter.\n\n stride : int\n The stride of the convolution operation.\n\n pad : int, 'full' or 'same' (default: 0)\n By default, the convolution is only computed where the input and the\n filter fully overlap (a valid convolution). When ``stride=1``, this\n yields an output that is smaller than the input by ``filter_size - 1``.\n The `pad` argument allows you to implicitly pad the input with zeros,\n extending the output size.\n\n A single integer results in symmetric zero-padding of the given size on\n both borders.\n\n ``'full'`` pads with one less than the filter size on both sides. This\n is equivalent to computing the convolution wherever the input and the\n filter overlap by at least one position.\n\n ``'same'`` pads with half the filter size on both sides (one less on\n the second side for an even filter size). When ``stride=1``, this\n results in an output size equal to the input size.\n\n Returns\n -------\n int\n The output size corresponding to the given convolution parameters.\n\n Raises\n ------\n RuntimeError\n When an invalid padding is specified, a `RuntimeError` is raised.\n "
if (input_length is None):
return None
if (pad == 'valid'):
output_length = ((input_length - filter_size) + 1)
elif (pad == 'full'):
output_length = ((input_length + filter_size) - 1)
elif (pad == 'same'):
output_length = input_length
elif isinstance(pad, int):
output_length = (((input_length + (2 * pad)) - filter_size) + 1)
else:
raise ValueError('Invalid pad: {0}'.format(pad))
output_length = (((output_length + stride) - 1) // stride)
return output_length
|
class BaseConvLayer(Layer):
"\n lasagne.layers.BaseConvLayer(incoming, num_filters, filter_size,\n stride=1, pad=0, untie_biases=False,\n W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),\n nonlinearity=lasagne.nonlinearities.rectify, flip_filters=True,\n n=None, **kwargs)\n\n Convolutional layer base class\n\n Base class for performing an `n`-dimensional convolution on its input,\n optionally adding a bias and applying an elementwise nonlinearity. Note\n that this class cannot be used in a Lasagne network, only its subclasses\n can (e.g., :class:`Conv1DLayer`, :class:`Conv2DLayer`).\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape. Must\n be a tensor of 2+`n` dimensions:\n ``(batch_size, num_input_channels, <n spatial dimensions>)``.\n\n num_filters : int\n The number of learnable convolutional filters this layer has.\n\n filter_size : int or iterable of int\n An integer or an `n`-element tuple specifying the size of the filters.\n\n stride : int or iterable of int\n An integer or an `n`-element tuple specifying the stride of the\n convolution operation.\n\n pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)\n By default, the convolution is only computed where the input and the\n filter fully overlap (a valid convolution). When ``stride=1``, this\n yields an output that is smaller than the input by ``filter_size - 1``.\n The `pad` argument allows you to implicitly pad the input with zeros,\n extending the output size.\n\n A single integer results in symmetric zero-padding of the given size on\n all borders, a tuple of `n` integers allows different symmetric padding\n per dimension.\n\n ``'full'`` pads with one less than the filter size on both sides. This\n is equivalent to computing the convolution wherever the input and the\n filter overlap by at least one position.\n\n ``'same'`` pads with half the filter size (rounded down) on both sides.\n When ``stride=1`` this results in an output size equal to the input\n size. Even filter size is not supported.\n\n ``'valid'`` is an alias for ``0`` (no padding / a valid convolution).\n\n Note that ``'full'`` and ``'same'`` can be faster than equivalent\n integer values due to optimizations by Theano.\n\n untie_biases : bool (default: False)\n If ``False``, the layer will have a bias parameter for each channel,\n which is shared across all positions in this channel. As a result, the\n `b` attribute will be a vector (1D).\n\n If ``True``, the layer will have separate bias parameters for each\n position in each channel. As a result, the `b` attribute will be an\n `n`-dimensional tensor.\n\n W : Theano shared variable, expression, numpy array or callable\n Initial value, expression or initializer for the weights.\n These should be a tensor of 2+`n` dimensions with shape\n ``(num_filters, num_input_channels, <n spatial dimensions>)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n b : Theano shared variable, expression, numpy array, callable or ``None``\n Initial value, expression or initializer for the biases. If set to\n ``None``, the layer will have no biases. Otherwise, biases should be\n a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to\n ``False``. If it is set to ``True``, its shape should be\n ``(num_filters, <n spatial dimensions>)`` instead.\n See :func:`lasagne.utils.create_param` for more information.\n\n nonlinearity : callable or None\n The nonlinearity that is applied to the layer activations. If None\n is provided, the layer will be linear.\n\n flip_filters : bool (default: True)\n Whether to flip the filters before sliding them over the input,\n performing a convolution (this is the default), or not to flip them and\n perform a correlation. Note that for some other convolutional layers in\n Lasagne, flipping incurs an overhead and is disabled by default --\n check the documentation when using learned weights from another layer.\n\n n : int or None\n The dimensionality of the convolution (i.e., the number of spatial\n dimensions of each feature map and each convolutional filter). If\n ``None``, will be inferred from the input shape.\n\n **kwargs\n Any additional keyword arguments are passed to the `Layer` superclass.\n\n Attributes\n ----------\n W : Theano shared variable or expression\n Variable or expression representing the filter weights.\n\n b : Theano shared variable or expression\n Variable or expression representing the biases.\n "
def __init__(self, incoming, num_filters, filter_size, stride=1, pad=0, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.0), nonlinearity=nonlinearities.rectify, flip_filters=True, n=None, **kwargs):
super(BaseConvLayer, self).__init__(incoming, **kwargs)
if (nonlinearity is None):
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
if (n is None):
n = (len(self.input_shape) - 2)
elif (n != (len(self.input_shape) - 2)):
raise ValueError(('Tried to create a %dD convolution layer with input shape %r. Expected %d input dimensions (batchsize, channels, %d spatial dimensions).' % (n, self.input_shape, (n + 2), n)))
self.n = n
self.num_filters = num_filters
self.filter_size = as_tuple(filter_size, n, int)
self.flip_filters = flip_filters
self.stride = as_tuple(stride, n, int)
self.untie_biases = untie_biases
if (pad == 'same'):
if any((((s % 2) == 0) for s in self.filter_size)):
raise NotImplementedError('`same` padding requires odd filter size.')
if (pad == 'valid'):
self.pad = as_tuple(0, n)
elif (pad in ('full', 'same')):
self.pad = pad
else:
self.pad = as_tuple(pad, n, int)
self.W = self.add_param(W, self.get_W_shape(), name='W')
if (b is None):
self.b = None
else:
if self.untie_biases:
biases_shape = ((num_filters,) + self.output_shape[2:])
else:
biases_shape = (num_filters,)
self.b = self.add_param(b, biases_shape, name='b', regularizable=False)
def get_W_shape(self):
'Get the shape of the weight matrix `W`.\n\n Returns\n -------\n tuple of int\n The shape of the weight matrix.\n '
num_input_channels = self.input_shape[1]
return ((self.num_filters, num_input_channels) + self.filter_size)
def get_output_shape_for(self, input_shape):
pad = (self.pad if isinstance(self.pad, tuple) else ((self.pad,) * self.n))
batchsize = input_shape[0]
return ((batchsize, self.num_filters) + tuple((conv_output_length(input, filter, stride, p) for (input, filter, stride, p) in zip(input_shape[2:], self.filter_size, self.stride, pad))))
def get_output_for(self, input, **kwargs):
conved = self.convolve(input, **kwargs)
if (self.b is None):
activation = conved
elif self.untie_biases:
activation = (conved + T.shape_padleft(self.b, 1))
else:
activation = (conved + self.b.dimshuffle((('x', 0) + (('x',) * self.n))))
return self.nonlinearity(activation)
def convolve(self, input, **kwargs):
'\n Symbolically convolves `input` with ``self.W``, producing an output of\n shape ``self.output_shape``. To be implemented by subclasses.\n\n Parameters\n ----------\n input : Theano tensor\n The input minibatch to convolve\n **kwargs\n Any additional keyword arguments from :meth:`get_output_for`\n\n Returns\n -------\n Theano tensor\n `input` convolved according to the configuration of this layer,\n without any bias or nonlinearity applied.\n '
raise NotImplementedError('BaseConvLayer does not implement the convolve() method. You will want to use a subclass such as Conv2DLayer.')
|
class Conv1DLayer(BaseConvLayer):
"\n lasagne.layers.Conv1DLayer(incoming, num_filters, filter_size, stride=1,\n pad=0, untie_biases=False, W=lasagne.init.GlorotUniform(),\n b=lasagne.init.Constant(0.), nonlinearity=lasagne.nonlinearities.rectify,\n flip_filters=True, convolution=lasagne.theano_extensions.conv.conv1d_mc0,\n **kwargs)\n\n 1D convolutional layer\n\n Performs a 1D convolution on its input and optionally adds a bias and\n applies an elementwise nonlinearity.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape. The\n output of this layer should be a 3D tensor, with shape\n ``(batch_size, num_input_channels, input_length)``.\n\n num_filters : int\n The number of learnable convolutional filters this layer has.\n\n filter_size : int or iterable of int\n An integer or a 1-element tuple specifying the size of the filters.\n\n stride : int or iterable of int\n An integer or a 1-element tuple specifying the stride of the\n convolution operation.\n\n pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)\n By default, the convolution is only computed where the input and the\n filter fully overlap (a valid convolution). When ``stride=1``, this\n yields an output that is smaller than the input by ``filter_size - 1``.\n The `pad` argument allows you to implicitly pad the input with zeros,\n extending the output size.\n\n An integer or a 1-element tuple results in symmetric zero-padding of\n the given size on both borders.\n\n ``'full'`` pads with one less than the filter size on both sides. This\n is equivalent to computing the convolution wherever the input and the\n filter overlap by at least one position.\n\n ``'same'`` pads with half the filter size (rounded down) on both sides.\n When ``stride=1`` this results in an output size equal to the input\n size. Even filter size is not supported.\n\n ``'valid'`` is an alias for ``0`` (no padding / a valid convolution).\n\n untie_biases : bool (default: False)\n If ``False``, the layer will have a bias parameter for each channel,\n which is shared across all positions in this channel. As a result, the\n `b` attribute will be a vector (1D).\n\n If True, the layer will have separate bias parameters for each\n position in each channel. As a result, the `b` attribute will be a\n matrix (2D).\n\n W : Theano shared variable, expression, numpy array or callable\n Initial value, expression or initializer for the weights.\n These should be a 3D tensor with shape\n ``(num_filters, num_input_channels, filter_length)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n b : Theano shared variable, expression, numpy array, callable or ``None``\n Initial value, expression or initializer for the biases. If set to\n ``None``, the layer will have no biases. Otherwise, biases should be\n a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to\n ``False``. If it is set to ``True``, its shape should be\n ``(num_filters, input_length)`` instead.\n See :func:`lasagne.utils.create_param` for more information.\n\n nonlinearity : callable or None\n The nonlinearity that is applied to the layer activations. If None\n is provided, the layer will be linear.\n\n flip_filters : bool (default: True)\n Whether to flip the filters before sliding them over the input,\n performing a convolution (this is the default), or not to flip them and\n perform a correlation. Note that for some other convolutional layers in\n Lasagne, flipping incurs an overhead and is disabled by default --\n check the documentation when using learned weights from another layer.\n\n convolution : callable\n The convolution implementation to use. The\n `lasagne.theano_extensions.conv` module provides some alternative\n implementations for 1D convolutions, because the Theano API only\n features a 2D convolution implementation. Usually it should be fine\n to leave this at the default value. Note that not all implementations\n support all settings for `pad` and `subsample`.\n\n **kwargs\n Any additional keyword arguments are passed to the `Layer` superclass.\n\n Attributes\n ----------\n W : Theano shared variable or expression\n Variable or expression representing the filter weights.\n\n b : Theano shared variable or expression\n Variable or expression representing the biases.\n "
def __init__(self, incoming, num_filters, filter_size, stride=1, pad=0, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.0), nonlinearity=nonlinearities.rectify, flip_filters=True, convolution=conv.conv1d_mc0, **kwargs):
super(Conv1DLayer, self).__init__(incoming, num_filters, filter_size, stride, pad, untie_biases, W, b, nonlinearity, flip_filters, n=1, **kwargs)
self.convolution = convolution
def convolve(self, input, **kwargs):
border_mode = ('half' if (self.pad == 'same') else self.pad)
conved = self.convolution(input, self.W, self.input_shape, self.get_W_shape(), subsample=self.stride, border_mode=border_mode, filter_flip=self.flip_filters)
return conved
|
class Conv2DLayer(BaseConvLayer):
"\n lasagne.layers.Conv2DLayer(incoming, num_filters, filter_size,\n stride=(1, 1), pad=0, untie_biases=False,\n W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),\n nonlinearity=lasagne.nonlinearities.rectify, flip_filters=True,\n convolution=theano.tensor.nnet.conv2d, **kwargs)\n\n 2D convolutional layer\n\n Performs a 2D convolution on its input and optionally adds a bias and\n applies an elementwise nonlinearity.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape. The\n output of this layer should be a 4D tensor, with shape\n ``(batch_size, num_input_channels, input_rows, input_columns)``.\n\n num_filters : int\n The number of learnable convolutional filters this layer has.\n\n filter_size : int or iterable of int\n An integer or a 2-element tuple specifying the size of the filters.\n\n stride : int or iterable of int\n An integer or a 2-element tuple specifying the stride of the\n convolution operation.\n\n pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)\n By default, the convolution is only computed where the input and the\n filter fully overlap (a valid convolution). When ``stride=1``, this\n yields an output that is smaller than the input by ``filter_size - 1``.\n The `pad` argument allows you to implicitly pad the input with zeros,\n extending the output size.\n\n A single integer results in symmetric zero-padding of the given size on\n all borders, a tuple of two integers allows different symmetric padding\n per dimension.\n\n ``'full'`` pads with one less than the filter size on both sides. This\n is equivalent to computing the convolution wherever the input and the\n filter overlap by at least one position.\n\n ``'same'`` pads with half the filter size (rounded down) on both sides.\n When ``stride=1`` this results in an output size equal to the input\n size. Even filter size is not supported.\n\n ``'valid'`` is an alias for ``0`` (no padding / a valid convolution).\n\n Note that ``'full'`` and ``'same'`` can be faster than equivalent\n integer values due to optimizations by Theano.\n\n untie_biases : bool (default: False)\n If ``False``, the layer will have a bias parameter for each channel,\n which is shared across all positions in this channel. As a result, the\n `b` attribute will be a vector (1D).\n\n If True, the layer will have separate bias parameters for each\n position in each channel. As a result, the `b` attribute will be a\n 3D tensor.\n\n W : Theano shared variable, expression, numpy array or callable\n Initial value, expression or initializer for the weights.\n These should be a 4D tensor with shape\n ``(num_filters, num_input_channels, filter_rows, filter_columns)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n b : Theano shared variable, expression, numpy array, callable or ``None``\n Initial value, expression or initializer for the biases. If set to\n ``None``, the layer will have no biases. Otherwise, biases should be\n a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to\n ``False``. If it is set to ``True``, its shape should be\n ``(num_filters, output_rows, output_columns)`` instead.\n See :func:`lasagne.utils.create_param` for more information.\n\n nonlinearity : callable or None\n The nonlinearity that is applied to the layer activations. If None\n is provided, the layer will be linear.\n\n flip_filters : bool (default: True)\n Whether to flip the filters before sliding them over the input,\n performing a convolution (this is the default), or not to flip them and\n perform a correlation. Note that for some other convolutional layers in\n Lasagne, flipping incurs an overhead and is disabled by default --\n check the documentation when using learned weights from another layer.\n\n convolution : callable\n The convolution implementation to use. Usually it should be fine to\n leave this at the default value.\n\n **kwargs\n Any additional keyword arguments are passed to the `Layer` superclass.\n\n Attributes\n ----------\n W : Theano shared variable or expression\n Variable or expression representing the filter weights.\n\n b : Theano shared variable or expression\n Variable or expression representing the biases.\n "
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1), pad=0, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.0), nonlinearity=nonlinearities.rectify, flip_filters=True, convolution=T.nnet.conv2d, **kwargs):
super(Conv2DLayer, self).__init__(incoming, num_filters, filter_size, stride, pad, untie_biases, W, b, nonlinearity, flip_filters, n=2, **kwargs)
self.convolution = convolution
def convolve(self, input, **kwargs):
border_mode = ('half' if (self.pad == 'same') else self.pad)
conved = self.convolution(input, self.W, self.input_shape, self.get_W_shape(), subsample=self.stride, border_mode=border_mode, filter_flip=self.flip_filters)
return conved
|
class DenseLayer(Layer):
'\n lasagne.layers.DenseLayer(incoming, num_units,\n W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),\n nonlinearity=lasagne.nonlinearities.rectify, **kwargs)\n\n A fully connected layer.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape\n\n num_units : int\n The number of units of the layer\n\n W : Theano shared variable, expression, numpy array or callable\n Initial value, expression or initializer for the weights.\n These should be a matrix with shape ``(num_inputs, num_units)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n b : Theano shared variable, expression, numpy array, callable or ``None``\n Initial value, expression or initializer for the biases. If set to\n ``None``, the layer will have no biases. Otherwise, biases should be\n a 1D array with shape ``(num_units,)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n nonlinearity : callable or None\n The nonlinearity that is applied to the layer activations. If None\n is provided, the layer will be linear.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> l_in = InputLayer((100, 20))\n >>> l1 = DenseLayer(l_in, num_units=50)\n\n Notes\n -----\n If the input to this layer has more than two axes, it will flatten the\n trailing axes. This is useful for when a dense layer follows a\n convolutional layer, for example. It is not necessary to insert a\n :class:`FlattenLayer` in this case.\n '
def __init__(self, incoming, num_units, W=init.GlorotUniform(), b=init.Constant(0.0), nonlinearity=nonlinearities.rectify, **kwargs):
super(DenseLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if (nonlinearity is None) else nonlinearity)
self.num_units = num_units
num_inputs = int(np.prod(self.input_shape[1:]))
self.W = self.add_param(W, (num_inputs, num_units), name='W')
if (b is None):
self.b = None
else:
self.b = self.add_param(b, (num_units,), name='b', regularizable=False)
def get_output_shape_for(self, input_shape):
return (input_shape[0], self.num_units)
def get_output_for(self, input, **kwargs):
if (input.ndim > 2):
input = input.flatten(2)
activation = T.dot(input, self.W)
if (self.b is not None):
activation = (activation + self.b.dimshuffle('x', 0))
return self.nonlinearity(activation)
|
class NINLayer(Layer):
'\n lasagne.layers.NINLayer(incoming, num_units, untie_biases=False,\n W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),\n nonlinearity=lasagne.nonlinearities.rectify, **kwargs)\n\n Network-in-network layer.\n Like DenseLayer, but broadcasting across all trailing dimensions beyond the\n 2nd. This results in a convolution operation with filter size 1 on all\n trailing dimensions. Any number of trailing dimensions is supported,\n so NINLayer can be used to implement 1D, 2D, 3D, ... convolutions.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape\n\n num_units : int\n The number of units of the layer\n\n untie_biases : bool\n If false the network has a single bias vector similar to a dense\n layer. If true a separate bias vector is used for each trailing\n dimension beyond the 2nd.\n\n W : Theano shared variable, expression, numpy array or callable\n Initial value, expression or initializer for the weights.\n These should be a matrix with shape ``(num_inputs, num_units)``,\n where ``num_inputs`` is the size of the second dimension of the input.\n See :func:`lasagne.utils.create_param` for more information.\n\n b : Theano shared variable, expression, numpy array, callable or ``None``\n Initial value, expression or initializer for the biases. If set to\n ``None``, the layer will have no biases. Otherwise, biases should be\n a 1D array with shape ``(num_units,)`` for ``untie_biases=False``, and\n a tensor of shape ``(num_units, input_shape[2], ..., input_shape[-1])``\n for ``untie_biases=True``.\n See :func:`lasagne.utils.create_param` for more information.\n\n nonlinearity : callable or None\n The nonlinearity that is applied to the layer activations. If None\n is provided, the layer will be linear.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, NINLayer\n >>> l_in = InputLayer((100, 20, 10, 3))\n >>> l1 = NINLayer(l_in, num_units=5)\n\n References\n ----------\n .. [1] Lin, Min, Qiang Chen, and Shuicheng Yan (2013):\n Network in network. arXiv preprint arXiv:1312.4400.\n '
def __init__(self, incoming, num_units, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.0), nonlinearity=nonlinearities.rectify, **kwargs):
super(NINLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if (nonlinearity is None) else nonlinearity)
self.num_units = num_units
self.untie_biases = untie_biases
num_input_channels = self.input_shape[1]
self.W = self.add_param(W, (num_input_channels, num_units), name='W')
if (b is None):
self.b = None
else:
if self.untie_biases:
biases_shape = ((num_units,) + self.output_shape[2:])
else:
biases_shape = (num_units,)
self.b = self.add_param(b, biases_shape, name='b', regularizable=False)
def get_output_shape_for(self, input_shape):
return ((input_shape[0], self.num_units) + input_shape[2:])
def get_output_for(self, input, **kwargs):
out_r = T.tensordot(self.W, input, axes=[[0], [1]])
remaining_dims = range(2, input.ndim)
out = out_r.dimshuffle(1, 0, *remaining_dims)
if (self.b is None):
activation = out
else:
if self.untie_biases:
remaining_dims_biases = range(1, (input.ndim - 1))
else:
remaining_dims_biases = (['x'] * (input.ndim - 2))
b_shuffled = self.b.dimshuffle('x', 0, *remaining_dims_biases)
activation = (out + b_shuffled)
return self.nonlinearity(activation)
|
class Pool2DDNNLayer(Layer):
"\n 2D pooling layer\n\n Performs 2D mean- or max-pooling over the two trailing axes of a 4D input\n tensor. This is an alternative implementation which uses\n ``theano.sandbox.cuda.dnn.dnn_pool`` directly.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or tuple\n The layer feeding into this layer, or the expected input shape.\n\n pool_size : integer or iterable\n The length of the pooling region in each dimension. If an integer, it\n is promoted to a square pooling region. If an iterable, it should have\n two elements.\n\n stride : integer, iterable or ``None``\n The strides between sucessive pooling regions in each dimension.\n If ``None`` then ``stride = pool_size``.\n\n pad : integer or iterable\n Number of elements to be added on each side of the input\n in each dimension. Each value must be less than\n the corresponding stride.\n\n ignore_border : bool (default: True)\n This implementation never includes partial pooling regions, so this\n argument must always be set to True. It exists only to make sure the\n interface is compatible with :class:`lasagne.layers.MaxPool2DLayer`.\n\n mode : string\n Pooling mode, one of 'max', 'average_inc_pad' or 'average_exc_pad'.\n Defaults to 'max'.\n\n **kwargs\n Any additional keyword arguments are passed to the :class:`Layer`\n superclass.\n\n Notes\n -----\n The value used to pad the input is chosen to be less than\n the minimum of the input, so that the output of each pooling region\n always corresponds to some element in the unpadded input region.\n\n This is a drop-in replacement for :class:`lasagne.layers.MaxPool2DLayer`.\n Its interface is the same, except it does not support the ``ignore_border``\n argument.\n "
def __init__(self, incoming, pool_size, stride=None, pad=(0, 0), ignore_border=True, mode='max', **kwargs):
super(Pool2DDNNLayer, self).__init__(incoming, **kwargs)
if (len(self.input_shape) != 4):
raise ValueError(('Tried to create a 2D pooling layer with input shape %r. Expected 4 input dimensions (batchsize, channels, 2 spatial dimensions).' % (self.input_shape,)))
self.pool_size = as_tuple(pool_size, 2)
if (stride is None):
self.stride = self.pool_size
else:
self.stride = as_tuple(stride, 2)
self.pad = as_tuple(pad, 2)
self.mode = mode
if (not ignore_border):
raise NotImplementedError('Pool2DDNNLayer does not support ignore_border=False.')
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape)
output_shape[2] = pool_output_length(input_shape[2], pool_size=self.pool_size[0], stride=self.stride[0], pad=self.pad[0], ignore_border=True)
output_shape[3] = pool_output_length(input_shape[3], pool_size=self.pool_size[1], stride=self.stride[1], pad=self.pad[1], ignore_border=True)
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
return dnn.dnn_pool(input, self.pool_size, self.stride, self.mode, self.pad)
|
class MaxPool2DDNNLayer(Pool2DDNNLayer):
"\n 2D max-pooling layer\n\n Subclass of :class:`Pool2DDNNLayer` fixing ``mode='max'``, provided for\n compatibility to other ``MaxPool2DLayer`` classes.\n "
def __init__(self, incoming, pool_size, stride=None, pad=(0, 0), ignore_border=True, **kwargs):
super(MaxPool2DDNNLayer, self).__init__(incoming, pool_size, stride, pad, ignore_border, mode='max', **kwargs)
|
class Pool3DDNNLayer(Layer):
"\n 3D pooling layer\n\n Performs 3D mean- or max-pooling over the 3 trailing axes of a 5D input\n tensor. This is an alternative implementation which uses\n ``theano.sandbox.cuda.dnn.dnn_pool`` directly.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or tuple\n The layer feeding into this layer, or the expected input shape.\n\n pool_size : integer or iterable\n The length of the pooling region in each dimension. If an integer, it\n is promoted to a square pooling region. If an iterable, it should have\n two elements.\n\n stride : integer, iterable or ``None``\n The strides between sucessive pooling regions in each dimension.\n If ``None`` then ``stride = pool_size``.\n\n pad : integer or iterable\n Number of elements to be added on each side of the input\n in each dimension. Each value must be less than\n the corresponding stride.\n\n ignore_border : bool (default: True)\n This implementation never includes partial pooling regions, so this\n argument must always be set to True. It exists only to make sure the\n interface is compatible with :class:`lasagne.layers.MaxPool2DLayer`.\n\n mode : string\n Pooling mode, one of 'max', 'average_inc_pad' or 'average_exc_pad'.\n Defaults to 'max'.\n\n **kwargs\n Any additional keyword arguments are passed to the :class:`Layer`\n superclass.\n\n Notes\n -----\n The value used to pad the input is chosen to be less than\n the minimum of the input, so that the output of each pooling region\n always corresponds to some element in the unpadded input region.\n\n "
def __init__(self, incoming, pool_size, stride=None, pad=(0, 0, 0), ignore_border=True, mode='max', **kwargs):
super(Pool3DDNNLayer, self).__init__(incoming, **kwargs)
if (len(self.input_shape) != 5):
raise ValueError(('Tried to create a 3D pooling layer with input shape %r. Expected 5 input dimensions (batchsize, channels, 3 spatial dimensions).' % (self.input_shape,)))
self.pool_size = as_tuple(pool_size, 3)
if (stride is None):
self.stride = self.pool_size
else:
self.stride = as_tuple(stride, 3)
self.pad = as_tuple(pad, 3)
self.mode = mode
if (not ignore_border):
raise NotImplementedError('Pool3DDNNLayer does not support ignore_border=False.')
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape)
output_shape[2] = pool_output_length(input_shape[2], pool_size=self.pool_size[0], stride=self.stride[0], pad=self.pad[0], ignore_border=True)
output_shape[3] = pool_output_length(input_shape[3], pool_size=self.pool_size[1], stride=self.stride[1], pad=self.pad[1], ignore_border=True)
output_shape[4] = pool_output_length(input_shape[4], pool_size=self.pool_size[2], stride=self.stride[2], pad=self.pad[2], ignore_border=True)
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
return dnn.dnn_pool(input, self.pool_size, self.stride, self.mode, self.pad)
|
class MaxPool3DDNNLayer(Pool3DDNNLayer):
"\n 3D max-pooling layer\n\n Subclass of :class:`Pool3DDNNLayer` fixing ``mode='max'``, provided for\n consistency to ``MaxPool2DLayer`` classes.\n "
def __init__(self, incoming, pool_size, stride=None, pad=(0, 0, 0), ignore_border=True, **kwargs):
super(MaxPool3DDNNLayer, self).__init__(incoming, pool_size, stride, pad, ignore_border, mode='max', **kwargs)
|
class Conv2DDNNLayer(BaseConvLayer):
"\n lasagne.layers.Conv2DDNNLayer(incoming, num_filters, filter_size,\n stride=(1, 1), pad=0, untie_biases=False,\n W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),\n nonlinearity=lasagne.nonlinearities.rectify, flip_filters=False,\n **kwargs)\n\n 2D convolutional layer\n\n Performs a 2D convolution on its input and optionally adds a bias and\n applies an elementwise nonlinearity. This is an alternative implementation\n which uses ``theano.sandbox.cuda.dnn.dnn_conv`` directly.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape. The\n output of this layer should be a 4D tensor, with shape\n ``(batch_size, num_input_channels, input_rows, input_columns)``.\n\n num_filters : int\n The number of learnable convolutional filters this layer has.\n\n filter_size : int or iterable of int\n An integer or a 2-element tuple specifying the size of the filters.\n\n stride : int or iterable of int\n An integer or a 2-element tuple specifying the stride of the\n convolution operation.\n\n pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)\n By default, the convolution is only computed where the input and the\n filter fully overlap (a valid convolution). When ``stride=1``, this\n yields an output that is smaller than the input by ``filter_size - 1``.\n The `pad` argument allows you to implicitly pad the input with zeros,\n extending the output size.\n\n A single integer results in symmetric zero-padding of the given size on\n all borders, a tuple of two integers allows different symmetric padding\n per dimension.\n\n ``'full'`` pads with one less than the filter size on both sides. This\n is equivalent to computing the convolution wherever the input and the\n filter overlap by at least one position.\n\n ``'same'`` pads with half the filter size (rounded down) on both sides.\n When ``stride=1`` this results in an output size equal to the input\n size. Even filter size is not supported.\n\n ``'valid'`` is an alias for ``0`` (no padding / a valid convolution).\n\n Note that ``'full'`` and ``'same'`` can be faster than equivalent\n integer values due to optimizations by Theano.\n\n untie_biases : bool (default: False)\n If ``False``, the layer will have a bias parameter for each channel,\n which is shared across all positions in this channel. As a result, the\n `b` attribute will be a vector (1D).\n\n If True, the layer will have separate bias parameters for each\n position in each channel. As a result, the `b` attribute will be a\n 3D tensor.\n\n W : Theano shared variable, expression, numpy array or callable\n Initial value, expression or initializer for the weights.\n These should be a 4D tensor with shape\n ``(num_filters, num_input_channels, filter_rows, filter_columns)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n b : Theano shared variable, expression, numpy array, callable or ``None``\n Initial value, expression or initializer for the biases. If set to\n ``None``, the layer will have no biases. Otherwise, biases should be\n a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to\n ``False``. If it is set to ``True``, its shape should be\n ``(num_filters, output_rows, output_columns)`` instead.\n See :func:`lasagne.utils.create_param` for more information.\n\n nonlinearity : callable or None\n The nonlinearity that is applied to the layer activations. If None\n is provided, the layer will be linear.\n\n flip_filters : bool (default: False)\n Whether to flip the filters and perform a convolution, or not to flip\n them and perform a correlation. Flipping adds a bit of overhead, so it\n is disabled by default. In most cases this does not make a difference\n anyway because the filters are learnt. However, ``flip_filters`` should\n be set to ``True`` if weights are loaded into it that were learnt using\n a regular :class:`lasagne.layers.Conv2DLayer`, for example.\n\n **kwargs\n Any additional keyword arguments are passed to the `Layer` superclass.\n\n Attributes\n ----------\n W : Theano shared variable or expression\n Variable or expression representing the filter weights.\n\n b : Theano shared variable or expression\n Variable or expression representing the biases.\n "
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1), pad=0, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.0), nonlinearity=nonlinearities.rectify, flip_filters=False, **kwargs):
super(Conv2DDNNLayer, self).__init__(incoming, num_filters, filter_size, stride, pad, untie_biases, W, b, nonlinearity, flip_filters, n=2, **kwargs)
def convolve(self, input, **kwargs):
conv_mode = ('conv' if self.flip_filters else 'cross')
border_mode = self.pad
if (border_mode == 'same'):
border_mode = tuple(((s // 2) for s in self.filter_size))
conved = dnn.dnn_conv(img=input, kerns=self.W, subsample=self.stride, border_mode=border_mode, conv_mode=conv_mode)
return conved
|
class Conv3DDNNLayer(BaseConvLayer):
"\n lasagne.layers.Conv3DDNNLayer(incoming, num_filters, filter_size,\n stride=(1, 1, 1), pad=0, untie_biases=False,\n W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),\n nonlinearity=lasagne.nonlinearities.rectify, flip_filters=False,\n **kwargs)\n\n 3D convolutional layer\n\n Performs a 3D convolution on its input and optionally adds a bias and\n applies an elementwise nonlinearity. This implementation uses\n ``theano.sandbox.cuda.dnn.dnn_conv3d`` directly.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape. The\n output of this layer should be a 5D tensor, with shape ``(batch_size,\n num_input_channels, input_rows, input_columns, input_depth)``.\n\n num_filters : int\n The number of learnable convolutional filters this layer has.\n\n filter_size : int or iterable of int\n An integer or a 3-element tuple specifying the size of the filters.\n\n stride : int or iterable of int\n An integer or a 3-element tuple specifying the stride of the\n convolution operation.\n\n pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)\n By default, the convolution is only computed where the input and the\n filter fully overlap (a valid convolution). When ``stride=1``, this\n yields an output that is smaller than the input by ``filter_size - 1``.\n The `pad` argument allows you to implicitly pad the input with zeros,\n extending the output size.\n\n A single integer results in symmetric zero-padding of the given size on\n all borders, a tuple of three integers allows different symmetric\n padding per dimension.\n\n ``'full'`` pads with one less than the filter size on both sides. This\n is equivalent to computing the convolution wherever the input and the\n filter overlap by at least one position.\n\n ``'same'`` pads with half the filter size (rounded down) on both sides.\n When ``stride=1`` this results in an output size equal to the input\n size. Even filter size is not supported.\n\n ``'valid'`` is an alias for ``0`` (no padding / a valid convolution).\n\n Note that ``'full'`` and ``'same'`` can be faster than equivalent\n integer values due to optimizations by Theano.\n\n untie_biases : bool (default: False)\n If ``False``, the layer will have a bias parameter for each channel,\n which is shared across all positions in this channel. As a result, the\n `b` attribute will be a vector (1D).\n\n If True, the layer will have separate bias parameters for each\n position in each channel. As a result, the `b` attribute will be a\n 4D tensor.\n\n W : Theano shared variable, expression, numpy array or callable\n Initial value, expression or initializer for the weights.\n These should be a 5D tensor with shape ``(num_filters,\n num_input_channels, filter_rows, filter_columns, filter_depth)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n b : Theano shared variable, expression, numpy array, callable or ``None``\n Initial value, expression or initializer for the biases. If set to\n ``None``, the layer will have no biases. Otherwise, biases should be\n a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to\n ``False``. If it is set to ``True``, its shape should be\n ``(num_filters, output_rows, output_columns, output_depth)`` instead.\n See :func:`lasagne.utils.create_param` for more information.\n\n nonlinearity : callable or None\n The nonlinearity that is applied to the layer activations. If None\n is provided, the layer will be linear.\n\n flip_filters : bool (default: False)\n Whether to flip the filters and perform a convolution, or not to flip\n them and perform a correlation. Flipping adds a bit of overhead, so it\n is disabled by default. In most cases this does not make a difference\n anyway because the filters are learned, but if you want to compute\n predictions with pre-trained weights, take care if they need flipping.\n\n **kwargs\n Any additional keyword arguments are passed to the `Layer` superclass.\n\n Attributes\n ----------\n W : Theano shared variable or expression\n Variable or expression representing the filter weights.\n\n b : Theano shared variable or expression\n Variable or expression representing the biases.\n "
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1, 1), pad=0, untie_biases=False, W=init.GlorotUniform(), b=init.Constant(0.0), nonlinearity=nonlinearities.rectify, flip_filters=False, **kwargs):
super(Conv3DDNNLayer, self).__init__(incoming, num_filters, filter_size, stride, pad, untie_biases, W, b, nonlinearity, flip_filters, n=3, **kwargs)
def convolve(self, input, **kwargs):
conv_mode = ('conv' if self.flip_filters else 'cross')
border_mode = self.pad
if (border_mode == 'same'):
border_mode = tuple(((s // 2) for s in self.filter_size))
conved = dnn.dnn_conv3d(img=input, kerns=self.W, subsample=self.stride, border_mode=border_mode, conv_mode=conv_mode)
return conved
|
class EmbeddingLayer(Layer):
"\n lasagne.layers.EmbeddingLayer(incoming, input_size, output_size,\n W=lasagne.init.Normal(), **kwargs)\n\n A layer for word embeddings. The input should be an integer type\n Tensor variable.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape.\n\n input_size: int\n The Number of different embeddings. The last embedding will have index\n input_size - 1.\n\n output_size : int\n The size of each embedding.\n\n W : Theano shared variable, expression, numpy array or callable\n Initial value, expression or initializer for the embedding matrix.\n This should be a matrix with shape ``(input_size, output_size)``.\n See :func:`lasagne.utils.create_param` for more information.\n\n Examples\n --------\n >>> from lasagne.layers import EmbeddingLayer, InputLayer, get_output\n >>> import theano\n >>> x = T.imatrix()\n >>> l_in = InputLayer((3, ))\n >>> W = np.arange(3*5).reshape((3, 5)).astype('float32')\n >>> l1 = EmbeddingLayer(l_in, input_size=3, output_size=5, W=W)\n >>> output = get_output(l1, x)\n >>> f = theano.function([x], output)\n >>> x_test = np.array([[0, 2], [1, 2]]).astype('int32')\n >>> f(x_test)\n array([[[ 0., 1., 2., 3., 4.],\n [ 10., 11., 12., 13., 14.]],\n <BLANKLINE>\n [[ 5., 6., 7., 8., 9.],\n [ 10., 11., 12., 13., 14.]]], dtype=float32)\n "
def __init__(self, incoming, input_size, output_size, W=init.Normal(), **kwargs):
super(EmbeddingLayer, self).__init__(incoming, **kwargs)
self.input_size = input_size
self.output_size = output_size
self.W = self.add_param(W, (input_size, output_size), name='W')
def get_output_shape_for(self, input_shape):
return (input_shape + (self.output_size,))
def get_output_for(self, input, **kwargs):
return self.W[input]
|
def get_all_layers(layer, treat_as_input=None):
'\n This function gathers all layers below one or more given :class:`Layer`\n instances, including the given layer(s). Its main use is to collect all\n layers of a network just given the output layer(s). The layers are\n guaranteed to be returned in a topological order: a layer in the result\n list is always preceded by all layers its input depends on.\n\n Parameters\n ----------\n layer : Layer or list\n the :class:`Layer` instance for which to gather all layers feeding\n into it, or a list of :class:`Layer` instances.\n\n treat_as_input : None or iterable\n an iterable of :class:`Layer` instances to treat as input layers\n with no layers feeding into them. They will show up in the result\n list, but their incoming layers will not be collected (unless they\n are required for other layers as well).\n\n Returns\n -------\n list\n a list of :class:`Layer` instances feeding into the given\n instance(s) either directly or indirectly, and the given\n instance(s) themselves, in topological order.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> l_in = InputLayer((100, 20))\n >>> l1 = DenseLayer(l_in, num_units=50)\n >>> get_all_layers(l1) == [l_in, l1]\n True\n >>> l2 = DenseLayer(l_in, num_units=10)\n >>> get_all_layers([l2, l1]) == [l_in, l2, l1]\n True\n >>> get_all_layers([l1, l2]) == [l_in, l1, l2]\n True\n >>> l3 = DenseLayer(l2, num_units=20)\n >>> get_all_layers(l3) == [l_in, l2, l3]\n True\n >>> get_all_layers(l3, treat_as_input=[l2]) == [l2, l3]\n True\n '
try:
queue = deque(layer)
except TypeError:
queue = deque([layer])
seen = set()
done = set()
result = []
if (treat_as_input is not None):
seen.update(treat_as_input)
while queue:
layer = queue[0]
if (layer is None):
queue.popleft()
elif (layer not in seen):
seen.add(layer)
if hasattr(layer, 'input_layers'):
queue.extendleft(reversed(layer.input_layers))
elif hasattr(layer, 'input_layer'):
queue.appendleft(layer.input_layer)
else:
queue.popleft()
if (layer not in done):
result.append(layer)
done.add(layer)
return result
|
def get_output(layer_or_layers, inputs=None, **kwargs):
"\n Computes the output of the network at one or more given layers.\n Optionally, you can define the input(s) to propagate through the network\n instead of using the input variable(s) associated with the network's\n input layer(s).\n\n Parameters\n ----------\n layer_or_layers : Layer or list\n the :class:`Layer` instance for which to compute the output\n expressions, or a list of :class:`Layer` instances.\n\n inputs : None, Theano expression, numpy array, or dict\n If None, uses the input variables associated with the\n :class:`InputLayer` instances.\n If a Theano expression, this defines the input for a single\n :class:`InputLayer` instance. Will throw a ValueError if there\n are multiple :class:`InputLayer` instances.\n If a numpy array, this will be wrapped as a Theano constant\n and used just like a Theano expression.\n If a dictionary, any :class:`Layer` instance (including the\n input layers) can be mapped to a Theano expression or numpy\n array to use instead of its regular output.\n\n Returns\n -------\n output : Theano expression or list\n the output of the given layer(s) for the given network input\n\n Notes\n -----\n Depending on your network architecture, `get_output([l1, l2])` may\n be crucially different from `[get_output(l1), get_output(l2)]`. Only\n the former ensures that the output expressions depend on the same\n intermediate expressions. For example, when `l1` and `l2` depend on\n a common dropout layer, the former will use the same dropout mask for\n both, while the latter will use two different dropout masks.\n "
from .input import InputLayer
from .base import MergeLayer
treat_as_input = (inputs.keys() if isinstance(inputs, dict) else [])
all_layers = get_all_layers(layer_or_layers, treat_as_input)
all_outputs = dict(((layer, layer.input_var) for layer in all_layers if (isinstance(layer, InputLayer) and (layer not in treat_as_input))))
if isinstance(inputs, dict):
all_outputs.update(((layer, utils.as_theano_expression(expr)) for (layer, expr) in inputs.items()))
elif (inputs is not None):
if (len(all_outputs) > 1):
raise ValueError('get_output() was called with a single input expression on a network with multiple input layers. Please call it with a dictionary of input expressions instead.')
for input_layer in all_outputs:
all_outputs[input_layer] = utils.as_theano_expression(inputs)
for layer in all_layers:
if (layer not in all_outputs):
try:
if isinstance(layer, MergeLayer):
layer_inputs = [all_outputs[input_layer] for input_layer in layer.input_layers]
else:
layer_inputs = all_outputs[layer.input_layer]
except KeyError:
raise ValueError(('get_output() was called without giving an input expression for the free-floating layer %r. Please call it with a dictionary mapping this layer to an input expression.' % layer))
all_outputs[layer] = layer.get_output_for(layer_inputs, **kwargs)
try:
return [all_outputs[layer] for layer in layer_or_layers]
except TypeError:
return all_outputs[layer_or_layers]
|
def get_output_shape(layer_or_layers, input_shapes=None):
'\n Computes the output shape of the network at one or more given layers.\n\n Parameters\n ----------\n layer_or_layers : Layer or list\n the :class:`Layer` instance for which to compute the output\n shapes, or a list of :class:`Layer` instances.\n\n input_shapes : None, tuple, or dict\n If None, uses the input shapes associated with the\n :class:`InputLayer` instances.\n If a tuple, this defines the input shape for a single\n :class:`InputLayer` instance. Will throw a ValueError if there\n are multiple :class:`InputLayer` instances.\n If a dictionary, any :class:`Layer` instance (including the\n input layers) can be mapped to a shape tuple to use instead of\n its regular output shape.\n\n Returns\n -------\n tuple or list\n the output shape of the given layer(s) for the given network input\n '
if ((input_shapes is None) or (input_shapes == {})):
try:
return [layer.output_shape for layer in layer_or_layers]
except TypeError:
return layer_or_layers.output_shape
from .input import InputLayer
from .base import MergeLayer
if isinstance(input_shapes, dict):
treat_as_input = input_shapes.keys()
else:
treat_as_input = []
all_layers = get_all_layers(layer_or_layers, treat_as_input)
all_shapes = dict(((layer, layer.shape) for layer in all_layers if (isinstance(layer, InputLayer) and (layer not in treat_as_input))))
if isinstance(input_shapes, dict):
all_shapes.update(input_shapes)
elif (input_shapes is not None):
if (len(all_shapes) > 1):
raise ValueError('get_output_shape() was called with a single input shape on a network with multiple input layers. Please call it with a dictionary of input shapes instead.')
for input_layer in all_shapes:
all_shapes[input_layer] = input_shapes
for layer in all_layers:
if (layer not in all_shapes):
if isinstance(layer, MergeLayer):
input_shapes = [all_shapes[input_layer] for input_layer in layer.input_layers]
else:
input_shapes = all_shapes[layer.input_layer]
all_shapes[layer] = layer.get_output_shape_for(input_shapes)
try:
return [all_shapes[layer] for layer in layer_or_layers]
except TypeError:
return all_shapes[layer_or_layers]
|
def get_all_params(layer, **tags):
"\n Returns a list of Theano shared variables that parameterize the layer.\n\n This function gathers all parameter variables of all layers below one or\n more given :class:`Layer` instances, including the layer(s) itself. Its\n main use is to collect all parameters of a network just given the output\n layer(s).\n\n By default, all shared variables that participate in the forward pass will\n be returned. The list can optionally be filtered by specifying tags as\n keyword arguments. For example, ``trainable=True`` will only return\n trainable parameters, and ``regularizable=True`` will only return\n parameters that can be regularized (e.g., by L2 decay).\n\n Parameters\n ----------\n layer : Layer or list\n The :class:`Layer` instance for which to gather all parameters, or a\n list of :class:`Layer` instances.\n\n **tags (optional)\n tags can be specified to filter the list. Specifying ``tag1=True``\n will limit the list to parameters that are tagged with ``tag1``.\n Specifying ``tag1=False`` will limit the list to parameters that\n are not tagged with ``tag1``. Commonly used tags are\n ``regularizable`` and ``trainable``.\n\n Returns\n -------\n params : list\n A list of Theano shared variables representing the parameters.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> l_in = InputLayer((100, 20))\n >>> l1 = DenseLayer(l_in, num_units=50)\n >>> all_params = get_all_params(l1)\n >>> all_params == [l1.W, l1.b]\n True\n\n Notes\n -----\n If any layer's parameter was set to a Theano expression instead of a shared\n variable, the shared variables involved in that expression will be returned\n rather than the expression itself. Tag filtering considers all variables\n within an expression to be tagged the same.\n >>> import theano\n >>> import numpy as np\n >>> from lasagne.utils import floatX\n >>> w1 = theano.shared(floatX(.01 * np.random.randn(50, 30)))\n >>> w2 = theano.shared(floatX(1))\n >>> l2 = DenseLayer(l1, num_units=30, W=theano.tensor.exp(w1) - w2, b=None)\n >>> all_params = get_all_params(l2, regularizable=True)\n >>> all_params == [l1.W, w1, w2]\n True\n "
layers = get_all_layers(layer)
params = sum([l.get_params(**tags) for l in layers], [])
return utils.unique(params)
|
def count_params(layer, **tags):
"\n This function counts all parameters (i.e., the number of scalar\n values) of all layers below one or more given :class:`Layer` instances,\n including the layer(s) itself.\n\n This is useful to compare the capacity of various network architectures.\n All parameters returned by the :class:`Layer`s' `get_params` methods are\n counted.\n\n Parameters\n ----------\n layer : Layer or list\n The :class:`Layer` instance for which to count the parameters, or a\n list of :class:`Layer` instances.\n\n **tags (optional)\n tags can be specified to filter the list of parameter variables that\n will be included in the count. Specifying ``tag1=True``\n will limit the list to parameters that are tagged with ``tag1``.\n Specifying ``tag1=False`` will limit the list to parameters that\n are not tagged with ``tag1``. Commonly used tags are\n ``regularizable`` and ``trainable``.\n\n Returns\n -------\n int\n The total number of learnable parameters.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> l_in = InputLayer((100, 20))\n >>> l1 = DenseLayer(l_in, num_units=50)\n >>> param_count = count_params(l1)\n >>> param_count\n 1050\n >>> param_count == 20 * 50 + 50 # 20 input * 50 units + 50 biases\n True\n "
params = get_all_params(layer, **tags)
shapes = [p.get_value().shape for p in params]
counts = [np.prod(shape) for shape in shapes]
return sum(counts)
|
def get_all_param_values(layer, **tags):
'\n This function returns the values of the parameters of all layers below one\n or more given :class:`Layer` instances, including the layer(s) itself.\n\n This function can be used in conjunction with set_all_param_values to save\n and restore model parameters.\n\n Parameters\n ----------\n layer : Layer or list\n The :class:`Layer` instance for which to gather all parameter values,\n or a list of :class:`Layer` instances.\n\n **tags (optional)\n tags can be specified to filter the list. Specifying ``tag1=True``\n will limit the list to parameters that are tagged with ``tag1``.\n Specifying ``tag1=False`` will limit the list to parameters that\n are not tagged with ``tag1``. Commonly used tags are\n ``regularizable`` and ``trainable``.\n\n Returns\n -------\n list of numpy.array\n A list of numpy arrays representing the parameter values.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> l_in = InputLayer((100, 20))\n >>> l1 = DenseLayer(l_in, num_units=50)\n >>> all_param_values = get_all_param_values(l1)\n >>> (all_param_values[0] == l1.W.get_value()).all()\n True\n >>> (all_param_values[1] == l1.b.get_value()).all()\n True\n '
params = get_all_params(layer, **tags)
return [p.get_value() for p in params]
|
def set_all_param_values(layer, values, **tags):
"\n Given a list of numpy arrays, this function sets the parameters of all\n layers below one or more given :class:`Layer` instances (including the\n layer(s) itself) to the given values.\n\n This function can be used in conjunction with get_all_param_values to save\n and restore model parameters.\n\n Parameters\n ----------\n layer : Layer or list\n The :class:`Layer` instance for which to set all parameter values, or a\n list of :class:`Layer` instances.\n\n values : list of numpy.array\n A list of numpy arrays representing the parameter values, must match\n the number of parameters.\n Every parameter's shape must match the shape of its new value.\n\n **tags (optional)\n tags can be specified to filter the list of parameters to be set.\n Specifying ``tag1=True`` will limit the list to parameters that are\n tagged with ``tag1``.\n Specifying ``tag1=False`` will limit the list to parameters that\n are not tagged with ``tag1``. Commonly used tags are\n ``regularizable`` and ``trainable``.\n\n Raises\n ------\n ValueError\n If the number of values is not equal to the number of params, or\n if a parameter's shape does not match the shape of its new value.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> l_in = InputLayer((100, 20))\n >>> l1 = DenseLayer(l_in, num_units=50)\n >>> all_param_values = get_all_param_values(l1)\n >>> # all_param_values is now [l1.W.get_value(), l1.b.get_value()]\n >>> # ...\n >>> set_all_param_values(l1, all_param_values)\n >>> # the parameter values are restored.\n "
params = get_all_params(layer, **tags)
if (len(params) != len(values)):
raise ValueError(('mismatch: got %d values to set %d parameters' % (len(values), len(params))))
for (p, v) in zip(params, values):
if (p.get_value().shape != v.shape):
raise ValueError(('mismatch: parameter has shape %r but value to set has shape %r' % (p.get_value().shape, v.shape)))
else:
p.set_value(v)
|
class InputLayer(Layer):
'\n This layer holds a symbolic variable that represents a network input. A\n variable can be specified when the layer is instantiated, else it is\n created.\n\n Parameters\n ----------\n shape : tuple of `int` or `None` elements\n The shape of the input. Any element can be `None` to indicate that the\n size of that dimension is not fixed at compile time.\n\n input_var : Theano symbolic variable or `None` (default: `None`)\n A variable representing a network input. If it is not provided, a\n variable will be created.\n\n Raises\n ------\n ValueError\n If the dimension of `input_var` is not equal to `len(shape)`\n\n Notes\n -----\n The first dimension usually indicates the batch size. If you specify it,\n Theano may apply more optimizations while compiling the training or\n prediction function, but the compiled function will not accept data of a\n different batch size at runtime. To compile for a variable batch size, set\n the first shape element to `None` instead.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer\n >>> l_in = InputLayer((100, 20))\n '
def __init__(self, shape, input_var=None, name=None, **kwargs):
self.shape = shape
if any((((d is not None) and (d <= 0)) for d in self.shape)):
raise ValueError(('Cannot create InputLayer with a non-positive shape dimension. shape=%r, self.name=%r' % (self.shape, name)))
ndim = len(shape)
if (input_var is None):
input_var_type = T.TensorType(theano.config.floatX, ([False] * ndim))
var_name = (('%s.input' % name) if (name is not None) else 'input')
input_var = input_var_type(var_name)
elif (input_var.ndim != ndim):
raise ValueError(('shape has %d dimensions, but variable has %d' % (ndim, input_var.ndim)))
self.input_var = input_var
self.name = name
self.params = OrderedDict()
@Layer.output_shape.getter
def output_shape(self):
return self.shape
|
def autocrop(inputs, cropping):
"\n Crops the given input arrays.\n\n Cropping takes a sequence of inputs and crops them per-axis in order to\n ensure that their sizes are consistent so that they can be combined\n in an element-wise fashion. If cropping is enabled for a specific axis,\n the minimum size in that axis of all inputs is computed, and all\n inputs are cropped to that size.\n\n The per-axis cropping modes are:\n\n `None`: this axis is not cropped, inputs are unchanged in this axis\n\n `'lower'`: inputs are cropped choosing the lower portion in this axis\n (`a[:crop_size, ...]`)\n\n `'upper'`: inputs are cropped choosing the upper portion in this axis\n (`a[-crop_size:, ...]`)\n\n `'center'`: inputs are cropped choosing the central portion in this axis\n (``a[offset:offset+crop_size, ...]`` where\n ``offset = (a.shape[0]-crop_size)//2)``\n\n Parameters\n ----------\n inputs : list of Theano expressions\n The input arrays in the form of a list of Theano expressions\n\n cropping : list of cropping modes\n Cropping modes, one for each axis. If length of `cropping` is less\n than the number of axes in the inputs, it is padded with `None`.\n If `cropping` is None, `input` is returned as is.\n\n Returns\n -------\n list of Theano expressions\n each expression is the cropped version of the corresponding input\n\n Example\n -------\n For example, given three inputs:\n\n >>> import numpy\n >>> import theano\n\n >>> a = numpy.random.random((1, 2, 3, 4))\n >>> b = numpy.random.random((5, 4, 4, 2))\n >>> c = numpy.random.random((7, 1, 8, 9))\n\n Cropping mode for each axis:\n\n >>> cropping = [None, 'lower', 'center', 'upper']\n\n Crop (note that the input arrays are converted to Theano vars first,\n and that the results are converted back from Theano expressions to\n numpy arrays by calling `eval()`)\n >>> xa, xb, xc = autocrop([theano.shared(a), theano.shared(b), theano.shared(c)], cropping)\n >>> xa, xb, xc = xa.eval(), xb.eval(), xc.eval()\n\n They will be left as is in axis 0 and cropped in the other three,\n choosing the lower, center and upper portions:\n\n Axis 0: choose all, axis 1: lower 1 element,\n axis 2: central 3 (all) and axis 3: upper 2\n >>> (xa == a[:, :1, :3, -2:]).all()\n True\n\n Axis 0: choose all, axis 1: lower 1 element,\n axis 2: central 3 starting at 0 and axis 3: upper 2 (all)\n >>> (xb == b[:, :1, :3, -2:]).all()\n True\n\n Axis 0: all, axis 1: lower 1 element (all),\n axis 2: central 3 starting at 2 and axis 3: upper 2\n >>> (xc == c[:, :1, 2:5:, -2:]).all()\n True\n "
if (cropping is None):
return inputs
else:
ndim = inputs[0].ndim
if (not all(((input.ndim == ndim) for input in inputs))):
raise ValueError('Not all inputs are of the same dimensionality. Got {0} inputs of dimensionalities {1}.'.format(len(inputs), [input.ndim for input in inputs]))
shapes = [input.shape for input in inputs]
shapes_tensor = T.as_tensor_variable(shapes)
min_shape = T.min(shapes_tensor, axis=0)
slices_by_input = [[] for i in range(len(inputs))]
cropping = list(cropping)
if (ndim > len(cropping)):
cropping = (list(cropping) + ([None] * (ndim - len(cropping))))
for (dim, cr) in enumerate(cropping):
if (cr is None):
slice_all = slice(None)
for slices in slices_by_input:
slices.append(slice_all)
else:
sz = min_shape[dim]
if (cr == 'lower'):
slc_lower = slice(None, sz)
for slices in slices_by_input:
slices.append(slc_lower)
elif (cr == 'upper'):
slc_upper = slice((- sz), None)
for slices in slices_by_input:
slices.append(slc_upper)
elif (cr == 'center'):
for (sh, slices) in zip(shapes, slices_by_input):
offset = ((sh[dim] - sz) // 2)
slices.append(slice(offset, (offset + sz)))
else:
raise ValueError("Unknown crop mode '{0}'".format(cr))
return [input[slices] for (input, slices) in zip(inputs, slices_by_input)]
|
def autocrop_array_shapes(input_shapes, cropping):
"\n Computes the shapes of the given arrays after auto-cropping is applied.\n\n For more information on cropping, see the :func:`autocrop` function\n documentation.\n\n Parameters\n ----------\n input_shapes : the shapes of input arrays prior to cropping in\n the form of a list of tuples\n\n cropping : a list of cropping modes, one for each axis. If length of\n `cropping` is less than the number of axes in the inputs, it is\n padded with `None`. If `cropping` is None, `input_shapes` is returned\n as is. For more information on their values and operation, see the\n :func:`autocrop` documentation.\n\n Returns\n -------\n list of tuples\n each tuple is a cropped version of the corresponding input\n shape tuple in `input_shapes`\n\n For example, given three input shapes with 4 axes each:\n\n >>> a = (1, 2, 3, 4)\n >>> b = (5, 4, 4, 2)\n >>> c = (7, 1, 8, 9)\n\n Cropping mode for each axis:\n\n >>> cropping = [None, 'lower', 'center', 'upper']\n\n Apply:\n\n >>> cropped_shapes = autocrop_array_shapes([a, b, c], cropping)\n >>> cropped_shapes[0]\n (1, 1, 3, 2)\n\n >>> cropped_shapes[1]\n (5, 1, 3, 2)\n\n >>> cropped_shapes[2]\n (7, 1, 3, 2)\n\n Note that axis 0 remains unchanged, where all the others are cropped\n to the minimum size in that axis.\n "
if (cropping is None):
return input_shapes
else:
ndim = len(input_shapes[0])
if (not all(((len(sh) == ndim) for sh in input_shapes))):
raise ValueError('Not all inputs are of the same dimensionality. Got {0} inputs of dimensionalities {1}.'.format(len(input_shapes), [len(sh) for sh in input_shapes]))
result = []
cropping = list(cropping)
if (ndim > len(cropping)):
cropping = (list(cropping) + ([None] * (ndim - len(cropping))))
for (sh, cr) in zip(zip(*input_shapes), cropping):
if (cr is None):
result.append(sh)
elif (cr in {'lower', 'center', 'upper'}):
result.append(([min(sh)] * len(sh)))
else:
raise ValueError("Unknown crop mode '{0}'".format(cr))
return [tuple(sh) for sh in zip(*result)]
|
class ConcatLayer(MergeLayer):
'\n Concatenates multiple inputs along the specified axis. Inputs should have\n the same shape except for the dimension specified in axis, which can have\n different sizes.\n\n Parameters\n -----------\n incomings : a list of :class:`Layer` instances or tuples\n The layers feeding into this layer, or expected input shapes\n\n axis : int\n Axis which inputs are joined over\n\n cropping : None or [crop]\n Cropping for each input axis. Cropping is described in the docstring\n for :func:`autocrop`. Cropping is always disabled for `axis`.\n '
def __init__(self, incomings, axis=1, cropping=None, **kwargs):
super(ConcatLayer, self).__init__(incomings, **kwargs)
self.axis = axis
if (cropping is not None):
cropping = list(cropping)
cropping[axis] = None
self.cropping = cropping
def get_output_shape_for(self, input_shapes):
input_shapes = autocrop_array_shapes(input_shapes, self.cropping)
output_shape = [next((s for s in sizes if (s is not None)), None) for sizes in zip(*input_shapes)]
def match(shape1, shape2):
return ((len(shape1) == len(shape2)) and all((((i == self.axis) or (s1 is None) or (s2 is None) or (s1 == s2)) for (i, (s1, s2)) in enumerate(zip(shape1, shape2)))))
if (not all((match(shape, output_shape) for shape in input_shapes))):
raise ValueError('Mismatch: input shapes must be the same except in the concatenation axis')
sizes = [input_shape[self.axis] for input_shape in input_shapes]
concat_size = (None if any(((s is None) for s in sizes)) else sum(sizes))
output_shape[self.axis] = concat_size
return tuple(output_shape)
def get_output_for(self, inputs, **kwargs):
inputs = autocrop(inputs, self.cropping)
return T.concatenate(inputs, axis=self.axis)
|
class ElemwiseMergeLayer(MergeLayer):
'\n This layer performs an elementwise merge of its input layers.\n It requires all input layers to have the same output shape.\n\n Parameters\n ----------\n incomings : a list of :class:`Layer` instances or tuples\n the layers feeding into this layer, or expected input shapes,\n with all incoming shapes being equal\n\n merge_function : callable\n the merge function to use. Should take two arguments and return the\n updated value. Some possible merge functions are ``theano.tensor``:\n ``mul``, ``add``, ``maximum`` and ``minimum``.\n\n cropping : None or [crop]\n Cropping for each input axis. Cropping is described in the docstring\n for :func:`autocrop`\n\n See Also\n --------\n ElemwiseSumLayer : Shortcut for sum layer.\n '
def __init__(self, incomings, merge_function, cropping=None, **kwargs):
super(ElemwiseMergeLayer, self).__init__(incomings, **kwargs)
self.merge_function = merge_function
self.cropping = cropping
def get_output_shape_for(self, input_shapes):
input_shapes = autocrop_array_shapes(input_shapes, self.cropping)
output_shape = tuple((next((s for s in sizes if (s is not None)), None) for sizes in zip(*input_shapes)))
def match(shape1, shape2):
return ((len(shape1) == len(shape2)) and all((((s1 is None) or (s2 is None) or (s1 == s2)) for (s1, s2) in zip(shape1, shape2))))
if (not all((match(shape, output_shape) for shape in input_shapes))):
raise ValueError('Mismatch: not all input shapes are the same')
return output_shape
def get_output_for(self, inputs, **kwargs):
inputs = autocrop(inputs, self.cropping)
output = None
for input in inputs:
if (output is not None):
output = self.merge_function(output, input)
else:
output = input
return output
|
class ElemwiseSumLayer(ElemwiseMergeLayer):
'\n This layer performs an elementwise sum of its input layers.\n It requires all input layers to have the same output shape.\n\n Parameters\n ----------\n incomings : a list of :class:`Layer` instances or tuples\n the layers feeding into this layer, or expected input shapes,\n with all incoming shapes being equal\n\n coeffs: list or scalar\n A same-sized list of coefficients, or a single coefficient that\n is to be applied to all instances. By default, these will not\n be included in the learnable parameters of this layer.\n\n cropping : None or [crop]\n Cropping for each input axis. Cropping is described in the docstring\n for :func:`autocrop`\n\n Notes\n -----\n Depending on your architecture, this can be used to avoid the more\n costly :class:`ConcatLayer`. For example, instead of concatenating layers\n before a :class:`DenseLayer`, insert separate :class:`DenseLayer` instances\n of the same number of output units and add them up afterwards. (This avoids\n the copy operations in concatenation, but splits up the dot product.)\n '
def __init__(self, incomings, coeffs=1, cropping=None, **kwargs):
super(ElemwiseSumLayer, self).__init__(incomings, T.add, cropping=cropping, **kwargs)
if isinstance(coeffs, list):
if (len(coeffs) != len(incomings)):
raise ValueError(('Mismatch: got %d coeffs for %d incomings' % (len(coeffs), len(incomings))))
else:
coeffs = ([coeffs] * len(incomings))
self.coeffs = coeffs
def get_output_for(self, inputs, **kwargs):
inputs = [((input * coeff) if (coeff != 1) else input) for (coeff, input) in zip(self.coeffs, inputs)]
return super(ElemwiseSumLayer, self).get_output_for(inputs, **kwargs)
|
class DropoutLayer(Layer):
'Dropout layer\n\n Sets values to zero with probability p. See notes for disabling dropout\n during testing.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n the layer feeding into this layer, or the expected input shape\n p : float or scalar tensor\n The probability of setting a value to zero\n rescale : bool\n If true the input is rescaled with input / (1-p) when deterministic\n is False.\n\n Notes\n -----\n The dropout layer is a regularizer that randomly sets input values to\n zero; see [1]_, [2]_ for why this might improve generalization.\n During training you should set deterministic to false and during\n testing you should set deterministic to true.\n\n If rescale is true the input is scaled with input / (1-p) when\n deterministic is false, see references for further discussion. Note that\n this implementation scales the input at training time.\n\n References\n ----------\n .. [1] Hinton, G., Srivastava, N., Krizhevsky, A., Sutskever, I.,\n Salakhutdinov, R. R. (2012):\n Improving neural networks by preventing co-adaptation of feature\n detectors. arXiv preprint arXiv:1207.0580.\n\n .. [2] Srivastava Nitish, Hinton, G., Krizhevsky, A., Sutskever,\n I., & Salakhutdinov, R. R. (2014):\n Dropout: A Simple Way to Prevent Neural Networks from Overfitting.\n Journal of Machine Learning Research, 5(Jun)(2), 1929-1958.\n '
def __init__(self, incoming, p=0.5, rescale=True, **kwargs):
super(DropoutLayer, self).__init__(incoming, **kwargs)
self._srng = RandomStreams(get_rng().randint(1, 2147462579))
self.p = p
self.rescale = rescale
def get_output_for(self, input, deterministic=False, **kwargs):
'\n Parameters\n ----------\n input : tensor\n output from the previous layer\n deterministic : bool\n If true dropout and scaling is disabled, see notes\n '
if (deterministic or (self.p == 0)):
return input
else:
retain_prob = (1 - self.p)
if self.rescale:
input /= retain_prob
input_shape = self.input_shape
if any(((s is None) for s in input_shape)):
input_shape = input.shape
return (input * self._srng.binomial(input_shape, p=retain_prob, dtype=theano.config.floatX))
|
class GaussianNoiseLayer(Layer):
'Gaussian noise layer.\n\n Add zero-mean Gaussian noise of given standard deviation to the input [1]_.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n the layer feeding into this layer, or the expected input shape\n sigma : float or tensor scalar\n Standard deviation of added Gaussian noise\n\n Notes\n -----\n The Gaussian noise layer is a regularizer. During training you should set\n deterministic to false and during testing you should set deterministic to\n true.\n\n References\n ----------\n .. [1] K.-C. Jim, C. Giles, and B. Horne (1996):\n An analysis of noise in recurrent neural networks: convergence and\n generalization.\n IEEE Transactions on Neural Networks, 7(6):1424-1438.\n '
def __init__(self, incoming, sigma=0.1, **kwargs):
super(GaussianNoiseLayer, self).__init__(incoming, **kwargs)
self._srng = RandomStreams(get_rng().randint(1, 2147462579))
self.sigma = sigma
def get_output_for(self, input, deterministic=False, **kwargs):
'\n Parameters\n ----------\n input : tensor\n output from the previous layer\n deterministic : bool\n If true noise is disabled, see notes\n '
if (deterministic or (self.sigma == 0)):
return input
else:
return (input + self._srng.normal(input.shape, avg=0.0, std=self.sigma))
|
class FlattenLayer(Layer):
'\n A layer that flattens its input. The leading ``outdim-1`` dimensions of\n the output will have the same shape as the input. The remaining dimensions\n are collapsed into the last dimension.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape.\n outdim : int\n The number of dimensions in the output.\n\n See Also\n --------\n flatten : Shortcut\n '
def __init__(self, incoming, outdim=2, **kwargs):
super(FlattenLayer, self).__init__(incoming, **kwargs)
self.outdim = outdim
if (outdim < 1):
raise ValueError('Dim must be >0, was %i', outdim)
def get_output_shape_for(self, input_shape):
shp = [input_shape[i] for i in range((self.outdim - 1))]
shp += [int(np.prod(input_shape[(self.outdim - 1):]))]
return tuple(shp)
def get_output_for(self, input, **kwargs):
return input.flatten(self.outdim)
|
class ReshapeLayer(Layer):
'\n A layer reshaping its input tensor to another tensor of the same total\n number of elements.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape\n\n shape : tuple\n The target shape specification. Each element can be one of:\n\n * ``i``, a positive integer directly giving the size of the dimension\n * ``[i]``, a single-element list of int, denoting to use the size\n of the ``i`` th input dimension\n * ``-1``, denoting to infer the size for this dimension to match\n the total number of elements in the input tensor (cannot be used\n more than once in a specification)\n * TensorVariable directly giving the size of the dimension\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, ReshapeLayer\n >>> l_in = InputLayer((32, 100, 20))\n >>> l1 = ReshapeLayer(l_in, ((32, 50, 40)))\n >>> l1.output_shape\n (32, 50, 40)\n >>> l_in = InputLayer((None, 100, 20))\n >>> l1 = ReshapeLayer(l_in, ([0], [1], 5, -1))\n >>> l1.output_shape\n (None, 100, 5, 4)\n\n Notes\n -----\n The tensor elements will be fetched and placed in C-like order. That\n is, reshaping `[1,2,3,4,5,6]` to shape `(2,3)` will result in a matrix\n `[[1,2,3],[4,5,6]]`, not in `[[1,3,5],[2,4,6]]` (Fortran-like order),\n regardless of the memory layout of the input tensor. For C-contiguous\n input, reshaping is cheap, for others it may require copying the data.\n '
def __init__(self, incoming, shape, **kwargs):
super(ReshapeLayer, self).__init__(incoming, **kwargs)
shape = tuple(shape)
for s in shape:
if isinstance(s, int):
if ((s == 0) or (s < (- 1))):
raise ValueError('`shape` integers must be positive or -1')
elif isinstance(s, list):
if ((len(s) != 1) or (not isinstance(s[0], int)) or (s[0] < 0)):
raise ValueError('`shape` input references must be single-element lists of int >= 0')
elif isinstance(s, T.TensorVariable):
if (s.ndim != 0):
raise ValueError(('A symbolic variable in a shape specification must be a scalar, but had %i dimensions' % s.ndim))
else:
raise ValueError('`shape` must be a tuple of int and/or [int]')
if (sum(((s == (- 1)) for s in shape)) > 1):
raise ValueError('`shape` cannot contain multiple -1')
self.shape = shape
self.get_output_shape_for(self.input_shape)
def get_output_shape_for(self, input_shape, **kwargs):
output_shape = list(self.shape)
masked_input_shape = list(input_shape)
masked_output_shape = list(output_shape)
for (dim, o) in enumerate(output_shape):
if isinstance(o, list):
if (o[0] >= len(input_shape)):
raise ValueError(('specification contains [%d], but input shape has %d dimensions only' % (o[0], len(input_shape))))
output_shape[dim] = input_shape[o[0]]
masked_output_shape[dim] = input_shape[o[0]]
if ((input_shape[o[0]] is None) and (masked_input_shape[o[0]] is None)):
masked_input_shape[o[0]] = 1
masked_output_shape[dim] = 1
for (dim, o) in enumerate(output_shape):
if isinstance(o, T.TensorVariable):
output_shape[dim] = None
masked_output_shape[dim] = None
input_size = (None if any(((x is None) for x in masked_input_shape)) else np.prod(masked_input_shape))
output_size = (None if any(((x is None) for x in masked_output_shape)) else np.prod(masked_output_shape))
del masked_input_shape, masked_output_shape
if ((- 1) in output_shape):
dim = output_shape.index((- 1))
if ((input_size is None) or (output_size is None)):
output_shape[dim] = None
output_size = None
else:
output_size *= (- 1)
output_shape[dim] = (input_size // output_size)
output_size *= output_shape[dim]
if ((input_size is not None) and (output_size is not None) and (input_size != output_size)):
raise ValueError(('%s cannot be reshaped to specification %s. The total size mismatches.' % (input_shape, self.shape)))
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
output_shape = list(self.shape)
for (dim, o) in enumerate(output_shape):
if isinstance(o, list):
output_shape[dim] = input.shape[o[0]]
return input.reshape(tuple(output_shape))
|
class DimshuffleLayer(Layer):
"\n A layer that rearranges the dimension of its input tensor, maintaining\n the same same total number of elements.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n the layer feeding into this layer, or the expected input shape\n\n pattern : tuple\n The new dimension order, with each element giving the index\n of the dimension in the input tensor or `'x'` to broadcast it.\n For example `(3,2,1,0)` will reverse the order of a 4-dimensional\n tensor. Use `'x'` to broadcast, e.g. `(3,2,1,'x',0)` will\n take a 4 tensor of shape `(2,3,5,7)` as input and produce a\n tensor of shape `(7,5,3,1,2)` with the 4th dimension being\n broadcast-able. In general, all dimensions in the input tensor\n must be used to generate the output tensor. Omitting a dimension\n attempts to collapse it; this can only be done to broadcast-able\n dimensions, e.g. a 5-tensor of shape `(7,5,3,1,2)` with the 4th\n being broadcast-able can be shuffled with the pattern `(4,2,1,0)`\n collapsing the 4th dimension resulting in a tensor of shape\n `(2,3,5,7)`.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, DimshuffleLayer\n >>> l_in = InputLayer((2, 3, 5, 7))\n >>> l1 = DimshuffleLayer(l_in, (3, 2, 1, 'x', 0))\n >>> l1.output_shape\n (7, 5, 3, 1, 2)\n >>> l2 = DimshuffleLayer(l1, (4, 2, 1, 0))\n >>> l2.output_shape\n (2, 3, 5, 7)\n "
def __init__(self, incoming, pattern, **kwargs):
super(DimshuffleLayer, self).__init__(incoming, **kwargs)
used_dims = set()
for p in pattern:
if isinstance(p, int):
if (p in used_dims):
raise ValueError('pattern contains dimension {0} more than once'.format(p))
used_dims.add(p)
elif (p == 'x'):
pass
else:
raise ValueError("pattern should only contain dimensionindices or 'x', not {0}".format(p))
self.pattern = pattern
self.get_output_shape_for(self.input_shape)
def get_output_shape_for(self, input_shape):
output_shape = []
dims_used = ([False] * len(input_shape))
for p in self.pattern:
if isinstance(p, int):
if ((p < 0) or (p >= len(input_shape))):
raise ValueError('pattern contains {0}, but input shape has {1} dimensions only'.format(p, len(input_shape)))
o = input_shape[p]
dims_used[p] = True
elif (p == 'x'):
o = 1
output_shape.append(o)
for (i, (dim_size, used)) in enumerate(zip(input_shape, dims_used)):
if ((not used) and (dim_size != 1) and (dim_size is not None)):
raise ValueError('pattern attempted to collapse dimension {0} of size {1}; dimensions with size != 1/None are notbroadcastable and cannot be collapsed'.format(i, dim_size))
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
return input.dimshuffle(self.pattern)
|
class PadLayer(Layer):
'\n Pad all dimensions except the first ``batch_ndim`` with ``width``\n zeros on both sides, or with another value specified in ``val``.\n Individual padding for each dimension or edge can be specified\n using a tuple or list of tuples for ``width``.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape\n\n width : int, iterable of int, or iterable of tuple\n Padding width. If an int, pads each axis symmetrically with the same\n amount in the beginning and end. If an iterable of int, defines the\n symmetric padding width separately for each axis. If an iterable of\n tuples of two ints, defines a seperate padding width for each beginning\n and end of each axis.\n\n val : float\n Value used for padding\n\n batch_ndim : int\n Dimensions up to this value are not padded. For padding convolutional\n layers this should be set to 2 so the sample and filter dimensions are\n not padded\n '
def __init__(self, incoming, width, val=0, batch_ndim=2, **kwargs):
super(PadLayer, self).__init__(incoming, **kwargs)
self.width = width
self.val = val
self.batch_ndim = batch_ndim
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape)
if isinstance(self.width, int):
widths = ([self.width] * (len(input_shape) - self.batch_ndim))
else:
widths = self.width
for (k, w) in enumerate(widths):
if (output_shape[(k + self.batch_ndim)] is None):
continue
else:
try:
(l, r) = w
except TypeError:
l = r = w
output_shape[(k + self.batch_ndim)] += (l + r)
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
return padding.pad(input, self.width, self.val, self.batch_ndim)
|
class SliceLayer(Layer):
'\n Slices the input at a specific axis and at specific indices.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or a tuple\n The layer feeding into this layer, or the expected input shape\n\n indices : int or slice instance\n If an ``int``, selects a single element from the given axis, dropping\n the axis. If a slice, selects all elements in the given range, keeping\n the axis.\n\n axis : int\n Specifies the axis from which the indices are selected.\n\n Examples\n --------\n >>> from lasagne.layers import SliceLayer, InputLayer\n >>> l_in = InputLayer((2, 3, 4))\n >>> SliceLayer(l_in, indices=0, axis=1).output_shape\n ... # equals input[:, 0]\n (2, 4)\n >>> SliceLayer(l_in, indices=slice(0, 1), axis=1).output_shape\n ... # equals input[:, 0:1]\n (2, 1, 4)\n >>> SliceLayer(l_in, indices=slice(-2, None), axis=-1).output_shape\n ... # equals input[..., -2:]\n (2, 3, 2)\n '
def __init__(self, incoming, indices, axis=(- 1), **kwargs):
super(SliceLayer, self).__init__(incoming, **kwargs)
self.slice = indices
self.axis = axis
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape)
if isinstance(self.slice, int):
del output_shape[self.axis]
elif (input_shape[self.axis] is not None):
output_shape[self.axis] = len(range(*self.slice.indices(input_shape[self.axis])))
else:
output_shape[self.axis] = None
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
axis = self.axis
if (axis < 0):
axis += input.ndim
return input[(((slice(None),) * axis) + (self.slice,))]
|
def sigmoid(x):
'Sigmoid activation function :math:`\\varphi(x) = \\frac{1}{1 + e^{-x}}`\n\n Parameters\n ----------\n x : float32\n The activation (the summed, weighted input of a neuron).\n\n Returns\n -------\n float32 in [0, 1]\n The output of the sigmoid function applied to the activation.\n '
return theano.tensor.nnet.sigmoid(x)
|
def softmax(x):
'Softmax activation function\n :math:`\\varphi(\\mathbf{x})_j =\n \\frac{e^{\\mathbf{x}_j}}{\\sum_{k=1}^K e^{\\mathbf{x}_k}}`\n where :math:`K` is the total number of neurons in the layer. This\n activation function gets applied row-wise.\n\n Parameters\n ----------\n x : float32\n The activation (the summed, weighted input of a neuron).\n\n Returns\n -------\n float32 where the sum of the row is 1 and each single value is in [0, 1]\n The output of the softmax function applied to the activation.\n '
return theano.tensor.nnet.softmax(x)
|
def tanh(x):
'Tanh activation function :math:`\\varphi(x) = \\tanh(x)`\n\n Parameters\n ----------\n x : float32\n The activation (the summed, weighted input of a neuron).\n\n Returns\n -------\n float32 in [-1, 1]\n The output of the tanh function applied to the activation.\n '
return theano.tensor.tanh(x)
|
class ScaledTanH(object):
'Scaled tanh :math:`\\varphi(x) = \\tanh(\\alpha \\cdot x) \\cdot \\beta`\n\n This is a modified tanh function which allows to rescale both the input and\n the output of the activation.\n\n Scaling the input down will result in decreasing the maximum slope of the\n tanh and as a result it will be in the linear regime in a larger interval\n of the input space. Scaling the input up will increase the maximum slope\n of the tanh and thus bring it closer to a step function.\n\n Scaling the output changes the output interval to :math:`[-\\beta,\\beta]`.\n\n Parameters\n ----------\n scale_in : float32\n The scale parameter :math:`\\alpha` for the input\n\n scale_out : float32\n The scale parameter :math:`\\beta` for the output\n\n Methods\n -------\n __call__(x)\n Apply the scaled tanh function to the activation `x`.\n\n Examples\n --------\n In contrast to other activation functions in this module, this is\n a class that needs to be instantiated to obtain a callable:\n\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> l_in = InputLayer((None, 100))\n >>> from lasagne.nonlinearities import ScaledTanH\n >>> scaled_tanh = ScaledTanH(scale_in=0.5, scale_out=2.27)\n >>> l1 = DenseLayer(l_in, num_units=200, nonlinearity=scaled_tanh)\n\n Notes\n -----\n LeCun et al. (in [1]_, Section 4.4) suggest ``scale_in=2./3`` and\n ``scale_out=1.7159``, which has :math:`\\varphi(\\pm 1) = \\pm 1`,\n maximum second derivative at 1, and an effective gain close to 1.\n\n By carefully matching :math:`\\alpha` and :math:`\\beta`, the nonlinearity\n can also be tuned to preserve the mean and variance of its input:\n\n * ``scale_in=0.5``, ``scale_out=2.4``: If the input is a random normal\n variable, the output will have zero mean and unit variance.\n * ``scale_in=1``, ``scale_out=1.6``: Same property, but with a smaller\n linear regime in input space.\n * ``scale_in=0.5``, ``scale_out=2.27``: If the input is a uniform normal\n variable, the output will have zero mean and unit variance.\n * ``scale_in=1``, ``scale_out=1.48``: Same property, but with a smaller\n linear regime in input space.\n\n References\n ----------\n .. [1] LeCun, Yann A., et al. (1998):\n Efficient BackProp,\n http://link.springer.com/chapter/10.1007/3-540-49430-8_2,\n http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf\n .. [2] Masci, Jonathan, et al. (2011):\n Stacked Convolutional Auto-Encoders for Hierarchical Feature Extraction,\n http://link.springer.com/chapter/10.1007/978-3-642-21735-7_7,\n http://people.idsia.ch/~ciresan/data/icann2011.pdf\n '
def __init__(self, scale_in=1, scale_out=1):
self.scale_in = scale_in
self.scale_out = scale_out
def __call__(self, x):
return (theano.tensor.tanh((x * self.scale_in)) * self.scale_out)
|
def rectify(x):
'Rectify activation function :math:`\\varphi(x) = \\max(0, x)`\n\n Parameters\n ----------\n x : float32\n The activation (the summed, weighted input of a neuron).\n\n Returns\n -------\n float32\n The output of the rectify function applied to the activation.\n '
return theano.tensor.nnet.relu(x)
|
class LeakyRectify(object):
'Leaky rectifier :math:`\\varphi(x) = \\max(\\alpha \\cdot x, x)`\n\n The leaky rectifier was introduced in [1]_. Compared to the standard\n rectifier :func:`rectify`, it has a nonzero gradient for negative input,\n which often helps convergence.\n\n Parameters\n ----------\n leakiness : float\n Slope for negative input, usually between 0 and 1.\n A leakiness of 0 will lead to the standard rectifier,\n a leakiness of 1 will lead to a linear activation function,\n and any value in between will give a leaky rectifier.\n\n Methods\n -------\n __call__(x)\n Apply the leaky rectify function to the activation `x`.\n\n Examples\n --------\n In contrast to other activation functions in this module, this is\n a class that needs to be instantiated to obtain a callable:\n\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> l_in = InputLayer((None, 100))\n >>> from lasagne.nonlinearities import LeakyRectify\n >>> custom_rectify = LeakyRectify(0.1)\n >>> l1 = DenseLayer(l_in, num_units=200, nonlinearity=custom_rectify)\n\n Alternatively, you can use the provided instance for leakiness=0.01:\n\n >>> from lasagne.nonlinearities import leaky_rectify\n >>> l2 = DenseLayer(l_in, num_units=200, nonlinearity=leaky_rectify)\n\n Or the one for a high leakiness of 1/3:\n\n >>> from lasagne.nonlinearities import very_leaky_rectify\n >>> l3 = DenseLayer(l_in, num_units=200, nonlinearity=very_leaky_rectify)\n\n See Also\n --------\n leaky_rectify: Instance with default leakiness of 0.01, as in [1]_.\n very_leaky_rectify: Instance with high leakiness of 1/3, as in [2]_.\n\n References\n ----------\n .. [1] Maas et al. (2013):\n Rectifier Nonlinearities Improve Neural Network Acoustic Models,\n http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf\n .. [2] Graham, Benjamin (2014):\n Spatially-sparse convolutional neural networks,\n http://arxiv.org/abs/1409.6070\n '
def __init__(self, leakiness=0.01):
self.leakiness = leakiness
def __call__(self, x):
return theano.tensor.nnet.relu(x, self.leakiness)
|
def elu(x):
'Exponential Linear Unit :math:`\\varphi(x) = (x > 0) ? x : e^x - 1`\n\n The Exponential Linear Unit (EUL) was introduced in [1]_. Compared to the\n linear rectifier :func:`rectify`, it has a mean activation closer to zero\n and nonzero gradient for negative input, which can help convergence.\n Compared to the leaky rectifier :class:`LeakyRectify`, it saturates for\n highly negative inputs.\n\n Parameters\n ----------\n x : float32\n The activation (the summed, weighed input of a neuron).\n\n Returns\n -------\n float32\n The output of the exponential linear unit for the activation.\n\n Notes\n -----\n In [1]_, an additional parameter :math:`\\alpha` controls the (negative)\n saturation value for negative inputs, but is set to 1 for all experiments.\n It is omitted here.\n\n References\n ----------\n .. [1] Djork-Arné Clevert, Thomas Unterthiner, Sepp Hochreiter (2015):\n Fast and Accurate Deep Network Learning by Exponential Linear Units\n (ELUs), http://arxiv.org/abs/1511.07289\n '
return theano.tensor.switch((x > 0), x, (theano.tensor.exp(x) - 1))
|
def softplus(x):
'Softplus activation function :math:`\\varphi(x) = \\log(1 + e^x)`\n\n Parameters\n ----------\n x : float32\n The activation (the summed, weighted input of a neuron).\n\n Returns\n -------\n float32\n The output of the softplus function applied to the activation.\n '
return theano.tensor.nnet.softplus(x)
|
def linear(x):
'Linear activation function :math:`\\varphi(x) = x`\n\n Parameters\n ----------\n x : float32\n The activation (the summed, weighted input of a neuron).\n\n Returns\n -------\n float32\n The output of the identity applied to the activation.\n '
return x
|
def binary_crossentropy(predictions, targets):
'Computes the binary cross-entropy between predictions and targets.\n\n .. math:: L = -t \\log(p) - (1 - t) \\log(1 - p)\n\n Parameters\n ----------\n predictions : Theano tensor\n Predictions in (0, 1), such as sigmoidal output of a neural network.\n targets : Theano tensor\n Targets in [0, 1], such as ground truth labels.\n\n Returns\n -------\n Theano tensor\n An expression for the element-wise binary cross-entropy.\n\n Notes\n -----\n This is the loss function of choice for binary classification problems\n and sigmoid output units.\n '
return theano.tensor.nnet.binary_crossentropy(predictions, targets)
|
def categorical_crossentropy(predictions, targets):
'Computes the categorical cross-entropy between predictions and targets.\n\n .. math:: L_i = - \\sum_j{t_{i,j} \\log(p_{i,j})}\n\n Parameters\n ----------\n predictions : Theano 2D tensor\n Predictions in (0, 1), such as softmax output of a neural network,\n with data points in rows and class probabilities in columns.\n targets : Theano 2D tensor or 1D tensor\n Either targets in [0, 1] matching the layout of `predictions`, or\n a vector of int giving the correct class index per data point.\n\n Returns\n -------\n Theano 1D tensor\n An expression for the item-wise categorical cross-entropy.\n\n Notes\n -----\n This is the loss function of choice for multi-class classification\n problems and softmax output units. For hard targets, i.e., targets\n that assign all of the probability to a single class per data point,\n providing a vector of int for the targets is usually slightly more\n efficient than providing a matrix with a single 1.0 per row.\n '
return theano.tensor.nnet.categorical_crossentropy(predictions, targets)
|
def squared_error(a, b):
'Computes the element-wise squared difference between two tensors.\n\n .. math:: L = (p - t)^2\n\n Parameters\n ----------\n a, b : Theano tensor\n The tensors to compute the squared difference between.\n\n Returns\n -------\n Theano tensor\n An expression for the item-wise squared difference.\n\n Notes\n -----\n This is the loss function of choice for many regression problems\n or auto-encoders with linear output units.\n '
return ((a - b) ** 2)
|
def aggregate(loss, weights=None, mode='mean'):
"Aggregates an element- or item-wise loss to a scalar loss.\n\n Parameters\n ----------\n loss : Theano tensor\n The loss expression to aggregate.\n weights : Theano tensor, optional\n The weights for each element or item, must be broadcastable to\n the same shape as `loss` if given. If omitted, all elements will\n be weighted the same.\n mode : {'mean', 'sum', 'normalized_sum'}\n Whether to aggregate by averaging, by summing or by summing and\n dividing by the total weights (which requires `weights` to be given).\n\n Returns\n -------\n Theano scalar\n A scalar loss expression suitable for differentiation.\n\n Notes\n -----\n By supplying binary weights (i.e., only using values 0 and 1), this\n function can also be used for masking out particular entries in the\n loss expression. Note that masked entries still need to be valid\n values, not-a-numbers (NaNs) will propagate through.\n\n When applied to batch-wise loss expressions, setting `mode` to\n ``'normalized_sum'`` ensures that the loss per batch is of a similar\n magnitude, independent of associated weights. However, it means that\n a given data point contributes more to the loss when it shares a batch\n with low-weighted or masked data points than with high-weighted ones.\n "
if (weights is not None):
loss = (loss * weights)
if (mode == 'mean'):
return loss.mean()
elif (mode == 'sum'):
return loss.sum()
elif (mode == 'normalized_sum'):
if (weights is None):
raise ValueError("require weights for mode='normalized_sum'")
return (loss.sum() / weights.sum())
else:
raise ValueError(("mode must be 'mean', 'sum' or 'normalized_sum', got %r" % mode))
|
def binary_hinge_loss(predictions, targets, binary=True, delta=1):
'Computes the binary hinge loss between predictions and targets.\n\n .. math:: L_i = \\max(0, \\delta - t_i p_i)\n\n Parameters\n ----------\n predictions : Theano tensor\n Predictions in (0, 1), such as sigmoidal output of a neural network.\n targets : Theano tensor\n Targets in {0, 1} (or in {-1, 1} depending on `binary`), such as\n ground truth labels.\n binary : bool, default True\n ``True`` if targets are in {0, 1}, ``False`` if they are in {-1, 1}\n delta : scalar, default 1\n The hinge loss margin\n\n Returns\n -------\n Theano tensor\n An expression for the element-wise binary hinge loss\n\n Notes\n -----\n This is an alternative to the binary cross-entropy loss for binary\n classification problems\n '
if binary:
targets = ((2 * targets) - 1)
return theano.tensor.nnet.relu((delta - (predictions * targets)))
|
def multiclass_hinge_loss(predictions, targets, delta=1):
'Computes the multi-class hinge loss between predictions and targets.\n\n .. math:: L_i = \\max_{j \\not = p_i} (0, t_j - t_{p_i} + \\delta)\n\n Parameters\n ----------\n predictions : Theano 2D tensor\n Predictions in (0, 1), such as softmax output of a neural network,\n with data points in rows and class probabilities in columns.\n targets : Theano 2D tensor or 1D tensor\n Either a vector of int giving the correct class index per data point\n or a 2D tensor of one-hot encoding of the correct class in the same\n layout as predictions (non-binary targets in [0, 1] do not work!)\n delta : scalar, default 1\n The hinge loss margin\n\n Returns\n -------\n Theano 1D tensor\n An expression for the item-wise multi-class hinge loss\n\n Notes\n -----\n This is an alternative to the categorical cross-entropy loss for\n multi-class classification problems\n '
num_cls = predictions.shape[1]
if (targets.ndim == (predictions.ndim - 1)):
targets = theano.tensor.extra_ops.to_one_hot(targets, num_cls)
elif (targets.ndim != predictions.ndim):
raise TypeError('rank mismatch between targets and predictions')
corrects = predictions[targets.nonzero()]
rest = theano.tensor.reshape(predictions[(1 - targets).nonzero()], ((- 1), (num_cls - 1)))
rest = theano.tensor.max(rest, axis=1)
return theano.tensor.nnet.relu(((rest - corrects) + delta))
|
def binary_accuracy(predictions, targets, threshold=0.5):
'Computes the binary accuracy between predictions and targets.\n\n .. math:: L_i = \\mathbb{I}(t_i = \\mathbb{I}(p_i \\ge \\alpha))\n\n Parameters\n ----------\n predictions : Theano tensor\n Predictions in [0, 1], such as a sigmoidal output of a neural network,\n giving the probability of the positive class\n targets : Theano tensor\n Targets in {0, 1}, such as ground truth labels.\n threshold : scalar, default: 0.5\n Specifies at what threshold to consider the predictions being of the\n positive class\n\n Returns\n -------\n Theano tensor\n An expression for the element-wise binary accuracy in {0, 1}\n\n Notes\n -----\n This objective function should not be used with a gradient calculation;\n its gradient is zero everywhere. It is intended as a convenience for\n validation and testing, not training.\n\n To obtain the average accuracy, call :func:`theano.tensor.mean()` on the\n result, passing ``dtype=theano.config.floatX`` to compute the mean on GPU.\n '
predictions = theano.tensor.ge(predictions, threshold)
return theano.tensor.eq(predictions, targets)
|
def categorical_accuracy(predictions, targets, top_k=1):
'Computes the categorical accuracy between predictions and targets.\n\n .. math:: L_i = \\mathbb{I}(t_i = \\operatorname{argmax}_c p_{i,c})\n\n Can be relaxed to allow matches among the top :math:`k` predictions:\n\n .. math::\n L_i = \\mathbb{I}(t_i \\in \\operatorname{argsort}_c (-p_{i,c})_{:k})\n\n Parameters\n ----------\n predictions : Theano 2D tensor\n Predictions in (0, 1), such as softmax output of a neural network,\n with data points in rows and class probabilities in columns.\n targets : Theano 2D tensor or 1D tensor\n Either a vector of int giving the correct class index per data point\n or a 2D tensor of 1 hot encoding of the correct class in the same\n layout as predictions\n top_k : int\n Regard a prediction to be correct if the target class is among the\n `top_k` largest class probabilities. For the default value of 1, a\n prediction is correct only if the target class is the most probable.\n\n Returns\n -------\n Theano 1D tensor\n An expression for the item-wise categorical accuracy in {0, 1}\n\n Notes\n -----\n This is a strictly non differential function as it includes an argmax.\n This objective function should never be used with a gradient calculation.\n It is intended as a convenience for validation and testing not training.\n\n To obtain the average accuracy, call :func:`theano.tensor.mean()` on the\n result, passing ``dtype=theano.config.floatX`` to compute the mean on GPU.\n '
if (targets.ndim == predictions.ndim):
targets = theano.tensor.argmax(targets, axis=(- 1))
elif (targets.ndim != (predictions.ndim - 1)):
raise TypeError('rank mismatch between targets and predictions')
if (top_k == 1):
top = theano.tensor.argmax(predictions, axis=(- 1))
return theano.tensor.eq(top, targets)
else:
top = theano.tensor.argsort(predictions, axis=(- 1))
top = top[([slice(None) for _ in range((top.ndim - 1))] + [slice((- top_k), None)])]
targets = theano.tensor.shape_padaxis(targets, axis=(- 1))
return theano.tensor.any(theano.tensor.eq(top, targets), axis=(- 1))
|
def get_rng():
'Get the package-level random number generator.\n\n Returns\n -------\n :class:`numpy.random.RandomState` instance\n The :class:`numpy.random.RandomState` instance passed to the most\n recent call of :func:`set_rng`, or ``numpy.random`` if :func:`set_rng`\n has never been called.\n '
return _rng
|
def set_rng(new_rng):
'Set the package-level random number generator.\n\n Parameters\n ----------\n new_rng : ``numpy.random`` or a :class:`numpy.random.RandomState` instance\n The random number generator to use.\n '
global _rng
_rng = new_rng
|
def l1(x):
'Computes the L1 norm of a tensor\n\n Parameters\n ----------\n x : Theano tensor\n\n Returns\n -------\n Theano scalar\n l1 norm (sum of absolute values of elements)\n '
return T.sum(abs(x))
|
def l2(x):
'Computes the squared L2 norm of a tensor\n\n Parameters\n ----------\n x : Theano tensor\n\n Returns\n -------\n Theano scalar\n squared l2 norm (sum of squared values of elements)\n '
return T.sum((x ** 2))
|
def apply_penalty(tensor_or_tensors, penalty, **kwargs):
'\n Computes the total cost for applying a specified penalty\n to a tensor or group of tensors.\n\n Parameters\n ----------\n tensor_or_tensors : Theano tensor or list of tensors\n penalty : callable\n **kwargs\n keyword arguments passed to penalty.\n\n Returns\n -------\n Theano scalar\n a scalar expression for the total penalty cost\n '
try:
return sum((penalty(x, **kwargs) for x in tensor_or_tensors))
except (TypeError, ValueError):
return penalty(tensor_or_tensors, **kwargs)
|
def regularize_layer_params(layer, penalty, tags={'regularizable': True}, **kwargs):
'\n Computes a regularization cost by applying a penalty to the parameters\n of a layer or group of layers.\n\n Parameters\n ----------\n layer : a :class:`Layer` instances or list of layers.\n penalty : callable\n tags: dict\n Tag specifications which filter the parameters of the layer or layers.\n By default, only parameters with the `regularizable` tag are included.\n **kwargs\n keyword arguments passed to penalty.\n\n Returns\n -------\n Theano scalar\n a scalar expression for the cost\n '
layers = ([layer] if isinstance(layer, Layer) else layer)
all_params = []
for layer in layers:
all_params += layer.get_params(**tags)
return apply_penalty(all_params, penalty, **kwargs)
|
def regularize_layer_params_weighted(layers, penalty, tags={'regularizable': True}, **kwargs):
'\n Computes a regularization cost by applying a penalty to the parameters\n of a layer or group of layers, weighted by a coefficient for each layer.\n\n Parameters\n ----------\n layers : dict\n A mapping from :class:`Layer` instances to coefficients.\n penalty : callable\n tags: dict\n Tag specifications which filter the parameters of the layer or layers.\n By default, only parameters with the `regularizable` tag are included.\n **kwargs\n keyword arguments passed to penalty.\n\n Returns\n -------\n Theano scalar\n a scalar expression for the cost\n '
return sum(((coeff * apply_penalty(layer.get_params(**tags), penalty, **kwargs)) for (layer, coeff) in layers.items()))
|
def regularize_network_params(layer, penalty, tags={'regularizable': True}, **kwargs):
'\n Computes a regularization cost by applying a penalty to the parameters\n of all layers in a network.\n\n Parameters\n ----------\n layer : a :class:`Layer` instance.\n Parameters of this layer and all layers below it will be penalized.\n penalty : callable\n tags: dict\n Tag specifications which filter the parameters of the layer or layers.\n By default, only parameters with the `regularizable` tag are included.\n **kwargs\n keyword arguments passed to penalty.\n\n Returns\n -------\n Theano scalar\n a scalar expression for the cost\n '
return apply_penalty(get_all_params(layer, **tags), penalty, **kwargs)
|
def pytest_addoption(parser):
parser.addoption('--runslow', action='store_true', help='run slow tests')
|
def pytest_runtest_setup(item):
if (('slow' in item.keywords) and (not item.config.getoption('--runslow'))):
pytest.skip('need --runslow option to run')
|
@pytest.fixture
def dummy_input_layer():
from lasagne.layers.input import InputLayer
input_layer = InputLayer((2, 3, 4))
mock = Mock(input_layer)
mock.shape = input_layer.shape
mock.input_var = input_layer.input_var
mock.output_shape = input_layer.output_shape
return mock
|
class TestLayer():
@pytest.fixture
def layer(self):
from lasagne.layers.base import Layer
return Layer(Mock(output_shape=(None,)))
@pytest.fixture
def named_layer(self):
from lasagne.layers.base import Layer
return Layer(Mock(output_shape=(None,)), name='layer_name')
def test_input_shape(self, layer):
assert (layer.input_shape == layer.input_layer.output_shape)
def test_get_output_shape_for(self, layer):
shape = Mock()
assert (layer.get_output_shape_for(shape) == shape)
@pytest.fixture
def layer_from_shape(self):
from lasagne.layers.base import Layer
return Layer((None, 20))
def test_layer_from_shape(self, layer_from_shape):
layer = layer_from_shape
assert (layer.input_layer is None)
assert (layer.input_shape == (None, 20))
def test_named_layer(self, named_layer):
assert (named_layer.name == 'layer_name')
def test_get_params(self, layer):
assert (layer.get_params() == [])
def test_get_params_tags(self, layer):
a_shape = (20, 50)
a = numpy.random.normal(0, 1, a_shape)
A = layer.add_param(a, a_shape, name='A', tag1=True, tag2=False)
b_shape = (30, 20)
b = numpy.random.normal(0, 1, b_shape)
B = layer.add_param(b, b_shape, name='B', tag1=True, tag2=True)
c_shape = (40, 10)
c = numpy.random.normal(0, 1, c_shape)
C = layer.add_param(c, c_shape, name='C', tag2=True)
assert (layer.get_params() == [A, B, C])
assert (layer.get_params(tag1=True) == [A, B])
assert (layer.get_params(tag1=False) == [C])
assert (layer.get_params(tag2=True) == [B, C])
assert (layer.get_params(tag2=False) == [A])
assert (layer.get_params(tag1=True, tag2=True) == [B])
def test_get_params_expressions(self, layer):
(x, y, z) = (theano.shared(0, name=n) for n in 'xyz')
W1 = layer.add_param(((x ** 2) + theano.tensor.log(y)), (), tag1=True)
W2 = layer.add_param(theano.tensor.matrix(), (10, 10), tag1=True)
W3 = layer.add_param(z.T, (), tag2=True)
assert (list(layer.params.keys()) == [W1, W2, W3])
assert (layer.get_params() == [x, y, z])
assert (layer.get_params(tag1=True) == [x, y])
assert (layer.get_params(tag2=True) == [z])
def test_add_param_tags(self, layer):
a_shape = (20, 50)
a = numpy.random.normal(0, 1, a_shape)
A = layer.add_param(a, a_shape)
assert (A in layer.params)
assert ('trainable' in layer.params[A])
assert ('regularizable' in layer.params[A])
b_shape = (30, 20)
b = numpy.random.normal(0, 1, b_shape)
B = layer.add_param(b, b_shape, trainable=False)
assert (B in layer.params)
assert ('trainable' not in layer.params[B])
assert ('regularizable' in layer.params[B])
c_shape = (40, 10)
c = numpy.random.normal(0, 1, c_shape)
C = layer.add_param(c, c_shape, tag1=True)
assert (C in layer.params)
assert ('trainable' in layer.params[C])
assert ('regularizable' in layer.params[C])
assert ('tag1' in layer.params[C])
def test_add_param_name(self, layer):
a_shape = (20, 50)
a = numpy.random.normal(0, 1, a_shape)
A = layer.add_param(a, a_shape, name='A')
assert (A.name == 'A')
def test_add_param_named_layer_name(self, named_layer):
a_shape = (20, 50)
a = numpy.random.normal(0, 1, a_shape)
A = named_layer.add_param(a, a_shape, name='A')
assert (A.name == 'layer_name.A')
def test_get_output_for_notimplemented(self, layer):
with pytest.raises(NotImplementedError):
layer.get_output_for(Mock())
def test_nonpositive_input_dims_raises_value_error(self, layer):
from lasagne.layers.base import Layer
neg_input_layer = Mock(output_shape=(None, (- 1), (- 1)))
zero_input_layer = Mock(output_shape=(None, 0, 0))
pos_input_layer = Mock(output_shape=(None, 1, 1))
with pytest.raises(ValueError):
Layer(neg_input_layer)
with pytest.raises(ValueError):
Layer(zero_input_layer)
Layer(pos_input_layer)
|
class TestMergeLayer():
@pytest.fixture
def layer(self):
from lasagne.layers.base import MergeLayer
return MergeLayer([Mock(), Mock()])
def test_input_shapes(self, layer):
assert (layer.input_shapes == [l.output_shape for l in layer.input_layers])
@pytest.fixture
def layer_from_shape(self):
from lasagne.layers.input import InputLayer
from lasagne.layers.base import MergeLayer
return MergeLayer([(None, 20), Mock(InputLayer((None,)), output_shape=(None,))])
def test_layer_from_shape(self, layer_from_shape):
layer = layer_from_shape
assert (layer.input_layers[0] is None)
assert (layer.input_shapes[0] == (None, 20))
assert (layer.input_layers[1] is not None)
assert (layer.input_shapes[1] == layer.input_layers[1].output_shape)
def test_get_params(self, layer):
assert (layer.get_params() == [])
def test_get_output_shape_for_notimplemented(self, layer):
with pytest.raises(NotImplementedError):
layer.get_output_shape_for(Mock())
def test_get_output_for_notimplemented(self, layer):
with pytest.raises(NotImplementedError):
layer.get_output_for(Mock())
|
def test_embedding_2D_input():
import numpy as np
import theano
import theano.tensor as T
from lasagne.layers import EmbeddingLayer, InputLayer, helper
x = T.imatrix()
batch_size = 2
seq_len = 3
emb_size = 5
vocab_size = 3
l_in = InputLayer((None, seq_len))
W = np.arange((vocab_size * emb_size)).reshape((vocab_size, emb_size)).astype('float32')
l1 = EmbeddingLayer(l_in, input_size=vocab_size, output_size=emb_size, W=W)
x_test = np.array([[0, 1, 2], [0, 0, 2]], dtype='int32')
assert (helper.get_output_shape(l1, (batch_size, seq_len)) == (batch_size, seq_len, emb_size))
output = helper.get_output(l1, x)
f = theano.function([x], output)
np.testing.assert_array_almost_equal(f(x_test), W[x_test])
|
def test_embedding_1D_input():
import numpy as np
import theano
import theano.tensor as T
from lasagne.layers import EmbeddingLayer, InputLayer, helper
x = T.ivector()
batch_size = 2
emb_size = 10
vocab_size = 3
l_in = InputLayer((None,))
W = np.arange((vocab_size * emb_size)).reshape((vocab_size, emb_size)).astype('float32')
l1 = EmbeddingLayer(l_in, input_size=vocab_size, output_size=emb_size, W=W)
x_test = np.array([0, 1, 2], dtype='int32')
assert (helper.get_output_shape(l1, (batch_size,)) == (batch_size, emb_size))
output = helper.get_output(l1, x)
f = theano.function([x], output)
np.testing.assert_array_almost_equal(f(x_test), W[x_test])
|
class TestGetAllLayers():
def test_stack(self):
from lasagne.layers import InputLayer, DenseLayer, get_all_layers
from itertools import permutations
l1 = InputLayer((10, 20))
l2 = DenseLayer(l1, 30)
l3 = DenseLayer(l2, 40)
for count in (0, 1, 2, 3):
for query in permutations([l1, l2, l3], count):
if (l3 in query):
expected = [l1, l2, l3]
elif (l2 in query):
expected = [l1, l2]
elif (l1 in query):
expected = [l1]
else:
expected = []
assert (get_all_layers(query) == expected)
assert (get_all_layers(l3, treat_as_input=[l2]) == [l2, l3])
def test_merge(self):
from lasagne.layers import InputLayer, DenseLayer, ElemwiseSumLayer, get_all_layers
l1 = InputLayer((10, 20))
l2 = DenseLayer(l1, 30)
l3 = DenseLayer(l2, 40)
l4 = InputLayer((10, 30))
l5 = DenseLayer(l4, 40)
l6 = ElemwiseSumLayer([l3, l5])
assert (get_all_layers(l6) == [l1, l2, l3, l4, l5, l6])
assert (get_all_layers([l4, l6]) == [l4, l1, l2, l3, l5, l6])
assert (get_all_layers([l5, l6]) == [l4, l5, l1, l2, l3, l6])
assert (get_all_layers([l4, l2, l5, l6]) == [l4, l1, l2, l5, l3, l6])
assert (get_all_layers(l6, treat_as_input=[l2]) == [l2, l3, l4, l5, l6])
assert (get_all_layers(l6, treat_as_input=[l3, l5]) == [l3, l5, l6])
assert (get_all_layers([l6, l2], treat_as_input=[l6]) == [l6, l1, l2])
def test_split(self):
from lasagne.layers import InputLayer, DenseLayer, get_all_layers
l1 = InputLayer((10, 20))
l2 = DenseLayer(l1, 30)
l3 = DenseLayer(l2, 40)
l4 = DenseLayer(l1, 50)
assert (get_all_layers(l3) == [l1, l2, l3])
assert (get_all_layers(l4) == [l1, l4])
assert (get_all_layers([l3, l4]) == [l1, l2, l3, l4])
assert (get_all_layers([l4, l3]) == [l1, l4, l2, l3])
assert (get_all_layers(l3, treat_as_input=[l2]) == [l2, l3])
assert (get_all_layers([l3, l4], treat_as_input=[l2]) == [l2, l3, l1, l4])
def test_bridge(self):
from lasagne.layers import InputLayer, DenseLayer, ElemwiseSumLayer, get_all_layers
l1 = InputLayer((10, 20))
l2 = DenseLayer(l1, 30)
l3 = DenseLayer(l2, 30)
l4 = ElemwiseSumLayer([l2, l3])
l5 = DenseLayer(l4, 40)
assert (get_all_layers(l5) == [l1, l2, l3, l4, l5])
assert (get_all_layers(l5, treat_as_input=[l4]) == [l4, l5])
assert (get_all_layers(l5, treat_as_input=[l3]) == [l1, l2, l3, l4, l5])
|
class TestGetOutput_InputLayer():
@pytest.fixture
def get_output(self):
from lasagne.layers.helper import get_output
return get_output
@pytest.fixture
def layer(self):
from lasagne.layers.input import InputLayer
return InputLayer((3, 2))
def test_get_output_without_arguments(self, layer, get_output):
assert (get_output(layer) is layer.input_var)
def test_get_output_input_is_variable(self, layer, get_output):
variable = theano.Variable('myvariable')
assert (get_output(layer, variable) is variable)
def test_get_output_input_is_array(self, layer, get_output):
inputs = [[1, 2, 3]]
output = get_output(layer, inputs)
assert numpy.all((output.eval() == inputs))
def test_get_output_input_is_a_mapping(self, layer, get_output):
inputs = {layer: theano.tensor.matrix()}
assert (get_output(layer, inputs) is inputs[layer])
|
class TestGetOutput_Layer():
@pytest.fixture
def get_output(self):
from lasagne.layers.helper import get_output
return get_output
@pytest.fixture
def layers(self):
from lasagne.layers.base import Layer
from lasagne.layers.input import InputLayer
l1 = Mock(InputLayer((None,)), output_shape=(None,))
l2 = Mock(Layer(l1), output_shape=(None,))
l2.input_layer = l1
l3 = Mock(Layer(l2), output_shape=(None,))
l3.input_layer = l2
return (l1, l2, l3)
def test_get_output_without_arguments(self, layers, get_output):
(l1, l2, l3) = layers
output = get_output(l3)
assert (output is l3.get_output_for.return_value)
l3.get_output_for.assert_called_with(l2.get_output_for.return_value)
l2.get_output_for.assert_called_with(l1.input_var)
def test_get_output_with_single_argument(self, layers, get_output):
(l1, l2, l3) = layers
(inputs, kwarg) = (theano.tensor.matrix(), object())
output = get_output(l3, inputs, kwarg=kwarg)
assert (output is l3.get_output_for.return_value)
l3.get_output_for.assert_called_with(l2.get_output_for.return_value, kwarg=kwarg)
l2.get_output_for.assert_called_with(inputs, kwarg=kwarg)
def test_get_output_input_is_a_mapping(self, layers, get_output):
(l1, l2, l3) = layers
p = PropertyMock()
type(l1).input_var = p
inputs = {l3: theano.tensor.matrix()}
assert (get_output(l3, inputs) is inputs[l3])
assert (l3.get_output_for.call_count == 0)
assert (l2.get_output_for.call_count == 0)
assert (p.call_count == 0)
def test_get_output_input_is_a_mapping_no_key(self, layers, get_output):
(l1, l2, l3) = layers
output = get_output(l3, {})
assert (output is l3.get_output_for.return_value)
l3.get_output_for.assert_called_with(l2.get_output_for.return_value)
l2.get_output_for.assert_called_with(l1.input_var)
def test_get_output_input_is_a_mapping_to_array(self, layers, get_output):
(l1, l2, l3) = layers
p = PropertyMock()
type(l1).input_var = p
inputs = {l3: [[1, 2, 3]]}
output = get_output(l3, inputs)
assert numpy.all((output.eval() == inputs[l3]))
assert (l3.get_output_for.call_count == 0)
assert (l2.get_output_for.call_count == 0)
assert (p.call_count == 0)
def test_get_output_input_is_a_mapping_for_layer(self, layers, get_output):
(l1, l2, l3) = layers
p = PropertyMock()
type(l1).input_var = p
(input_expr, kwarg) = (theano.tensor.matrix(), object())
inputs = {l2: input_expr}
output = get_output(l3, inputs, kwarg=kwarg)
assert (output is l3.get_output_for.return_value)
l3.get_output_for.assert_called_with(input_expr, kwarg=kwarg)
assert (l2.get_output_for.call_count == 0)
assert (p.call_count == 0)
def test_get_output_input_is_a_mapping_for_input_layer(self, layers, get_output):
(l1, l2, l3) = layers
p = PropertyMock()
type(l1).input_var = p
(input_expr, kwarg) = (theano.tensor.matrix(), object())
inputs = {l1: input_expr}
output = get_output(l3, inputs, kwarg=kwarg)
assert (output is l3.get_output_for.return_value)
l3.get_output_for.assert_called_with(l2.get_output_for.return_value, kwarg=kwarg)
l2.get_output_for.assert_called_with(input_expr, kwarg=kwarg)
assert (p.call_count == 0)
@pytest.fixture
def layer_from_shape(self):
from lasagne.layers.base import Layer
return Layer((None, 20))
def test_layer_from_shape_invalid_get_output(self, layer_from_shape, get_output):
layer = layer_from_shape
with pytest.raises(ValueError):
get_output(layer)
with pytest.raises(ValueError):
get_output(layer, [1, 2])
with pytest.raises(ValueError):
get_output(layer, {Mock(): [1, 2]})
def test_layer_from_shape_valid_get_output(self, layer_from_shape, get_output):
layer = layer_from_shape
inputs = {layer: theano.tensor.matrix()}
assert (get_output(layer, inputs) is inputs[layer])
inputs = {None: theano.tensor.matrix()}
layer.get_output_for = Mock()
assert (get_output(layer, inputs) is layer.get_output_for.return_value)
layer.get_output_for.assert_called_with(inputs[None])
|
class TestGetOutput_MergeLayer():
@pytest.fixture
def get_output(self):
from lasagne.layers.helper import get_output
return get_output
@pytest.fixture
def layers(self):
from lasagne.layers.base import Layer, MergeLayer
from lasagne.layers.input import InputLayer
l1 = [Mock(InputLayer((None,)), output_shape=(None,)), Mock(InputLayer((None,)), output_shape=(None,))]
l2 = [Mock(Layer(l1[0]), output_shape=(None,)), Mock(Layer(l1[1]), output_shape=(None,))]
l2[0].input_layer = l1[0]
l2[1].input_layer = l1[1]
l3 = Mock(MergeLayer(l2))
l3.input_layers = l2
return (l1, l2, l3)
def test_get_output_without_arguments(self, layers, get_output):
(l1, l2, l3) = layers
output = get_output(l3)
assert (output is l3.get_output_for.return_value)
l3.get_output_for.assert_called_with([l2[0].get_output_for.return_value, l2[1].get_output_for.return_value])
l2[0].get_output_for.assert_called_with(l1[0].input_var)
l2[1].get_output_for.assert_called_with(l1[1].input_var)
def test_get_output_with_single_argument_fails(self, layers, get_output):
(l1, l2, l3) = layers
(inputs, kwarg) = (theano.tensor.matrix(), object())
with pytest.raises(ValueError):
output = get_output(l3, inputs, kwarg=kwarg)
def test_get_output_input_is_a_mapping(self, layers, get_output):
(l1, l2, l3) = layers
p = PropertyMock()
type(l1[0]).input_var = p
type(l1[1]).input_var = p
inputs = {l3: theano.tensor.matrix()}
assert (get_output(l3, inputs) is inputs[l3])
assert (l3.get_output_for.call_count == 0)
assert (l2[0].get_output_for.call_count == 0)
assert (l2[1].get_output_for.call_count == 0)
assert (p.call_count == 0)
def test_get_output_input_is_a_mapping_no_key(self, layers, get_output):
(l1, l2, l3) = layers
output = get_output(l3, {})
assert (output is l3.get_output_for.return_value)
l3.get_output_for.assert_called_with([l2[0].get_output_for.return_value, l2[1].get_output_for.return_value])
l2[0].get_output_for.assert_called_with(l1[0].input_var)
l2[1].get_output_for.assert_called_with(l1[1].input_var)
def test_get_output_input_is_a_mapping_to_array(self, layers, get_output):
(l1, l2, l3) = layers
p = PropertyMock()
type(l1[0]).input_var = p
type(l1[1]).input_var = p
inputs = {l3: [[1, 2, 3]]}
output = get_output(l3, inputs)
assert numpy.all((output.eval() == inputs[l3]))
assert (l3.get_output_for.call_count == 0)
assert (l2[0].get_output_for.call_count == 0)
assert (l2[1].get_output_for.call_count == 0)
assert (p.call_count == 0)
def test_get_output_input_is_a_mapping_for_layer(self, layers, get_output):
(l1, l2, l3) = layers
p = PropertyMock()
type(l1[0]).input_var = p
(input_expr, kwarg) = (theano.tensor.matrix(), object())
inputs = {l2[0]: input_expr}
output = get_output(l3, inputs, kwarg=kwarg)
assert (output is l3.get_output_for.return_value)
l3.get_output_for.assert_called_with([input_expr, l2[1].get_output_for.return_value], kwarg=kwarg)
l2[1].get_output_for.assert_called_with(l1[1].input_var, kwarg=kwarg)
assert (l2[0].get_output_for.call_count == 0)
assert (p.call_count == 0)
def test_get_output_input_is_a_mapping_for_input_layer(self, layers, get_output):
(l1, l2, l3) = layers
p = PropertyMock()
type(l1[0]).input_var = p
(input_expr, kwarg) = (theano.tensor.matrix(), object())
inputs = {l1[0]: input_expr}
output = get_output(l3, inputs, kwarg=kwarg)
assert (output is l3.get_output_for.return_value)
l3.get_output_for.assert_called_with([l2[0].get_output_for.return_value, l2[1].get_output_for.return_value], kwarg=kwarg)
l2[0].get_output_for.assert_called_with(input_expr, kwarg=kwarg)
l2[1].get_output_for.assert_called_with(l1[1].input_var, kwarg=kwarg)
assert (p.call_count == 0)
@pytest.fixture
def layer_from_shape(self):
from lasagne.layers.input import InputLayer
from lasagne.layers.base import MergeLayer
return MergeLayer([(None, 20), Mock(InputLayer((None,)), output_shape=(None,))])
def test_layer_from_shape_invalid_get_output(self, layer_from_shape, get_output):
layer = layer_from_shape
with pytest.raises(ValueError):
get_output(layer)
with pytest.raises(ValueError):
get_output(layer, [1, 2])
with pytest.raises(ValueError):
get_output(layer, {layer.input_layers[1]: [1, 2]})
def test_layer_from_shape_valid_get_output(self, layer_from_shape, get_output):
layer = layer_from_shape
inputs = {layer: theano.tensor.matrix()}
assert (get_output(layer, inputs) is inputs[layer])
inputs = {None: theano.tensor.matrix()}
layer.get_output_for = Mock()
assert (get_output(layer, inputs) is layer.get_output_for.return_value)
layer.get_output_for.assert_called_with([inputs[None], layer.input_layers[1].input_var])
|
class TestGetOutputShape_InputLayer():
@pytest.fixture
def get_output_shape(self):
from lasagne.layers.helper import get_output_shape
return get_output_shape
@pytest.fixture
def layer(self):
from lasagne.layers.input import InputLayer
return InputLayer((3, 2))
def test_get_output_shape_without_arguments(self, layer, get_output_shape):
assert (get_output_shape(layer) == (3, 2))
def test_get_output_shape_input_is_tuple(self, layer, get_output_shape):
shp = (4, 5, 6)
assert (get_output_shape(layer, shp) == shp)
def test_get_output_shape_input_is_a_mapping(self, layer, get_output_shape):
input_shapes = {layer: (4, 5, 6)}
assert (get_output_shape(layer, input_shapes) == input_shapes[layer])
|
class TestGetOutputShape_Layer():
@pytest.fixture
def get_output_shape(self):
from lasagne.layers.helper import get_output_shape
return get_output_shape
@pytest.fixture
def layers(self):
from lasagne.layers.base import Layer
from lasagne.layers.input import InputLayer
l1 = Mock(InputLayer((None,)), output_shape=(None,))
l2 = Mock(Layer(l1), output_shape=(None,))
l2.input_layer = l1
l3 = Mock(Layer(l2), output_shape=(None,))
l3.input_layer = l2
return (l1, l2, l3)
def test_get_output_shape_without_arguments(self, layers, get_output_shape):
(l1, l2, l3) = layers
output_shape = get_output_shape(l3)
assert (output_shape is l3.output_shape)
assert (l3.get_output_shape_for.call_count == 0)
assert (l2.get_output_shape_for.call_count == 0)
def test_get_output_shape_with_single_argument(self, layers, get_output_shape):
(l1, l2, l3) = layers
shp = (3, 4, 5)
output_shape = get_output_shape(l3, shp)
assert (output_shape is l3.get_output_shape_for.return_value)
l3.get_output_shape_for.assert_called_with(l2.get_output_shape_for.return_value)
l2.get_output_shape_for.assert_called_with(shp)
def test_get_output_shape_input_is_a_mapping(self, layers, get_output_shape):
(l1, l2, l3) = layers
input_shapes = {l3: (4, 5, 6)}
assert (get_output_shape(l3, input_shapes) is input_shapes[l3])
assert (l3.get_output_shape_for.call_count == 0)
assert (l2.get_output_shape_for.call_count == 0)
def test_get_output_shape_input_is_a_mapping_no_key(self, layers, get_output_shape):
(l1, l2, l3) = layers
output_shape = get_output_shape(l3, {})
assert (output_shape is l3.output_shape)
assert (l3.get_output_shape_for.call_count == 0)
assert (l2.get_output_shape_for.call_count == 0)
def test_get_output_shape_input_is_a_mapping_for_layer(self, layers, get_output_shape):
(l1, l2, l3) = layers
shp = (4, 5, 6)
input_shapes = {l2: shp}
output_shape = get_output_shape(l3, input_shapes)
assert (output_shape is l3.get_output_shape_for.return_value)
l3.get_output_shape_for.assert_called_with(shp)
assert (l2.get_output_shape_for.call_count == 0)
def test_get_output_shape_input_is_a_mapping_for_input_layer(self, layers, get_output_shape):
(l1, l2, l3) = layers
shp = (4, 5, 6)
input_shapes = {l1: shp}
output_shape = get_output_shape(l3, input_shapes)
assert (output_shape is l3.get_output_shape_for.return_value)
l3.get_output_shape_for.assert_called_with(l2.get_output_shape_for.return_value)
l2.get_output_shape_for.assert_called_with(shp)
@pytest.fixture
def layer_from_shape(self):
from lasagne.layers.base import Layer
return Layer((None, 20))
def test_layer_from_shape(self, layer_from_shape, get_output_shape):
layer = layer_from_shape
input_shapes = {layer: (4, 5, 6)}
assert (get_output_shape(layer, input_shapes) is input_shapes[layer])
input_shapes = {None: (4, 5, 6)}
layer.get_output_shape_for = Mock()
assert (get_output_shape(layer, input_shapes) is layer.get_output_shape_for.return_value)
layer.get_output_shape_for.assert_called_with(input_shapes[None])
|
class TestGetOutputShape_MergeLayer():
@pytest.fixture
def get_output_shape(self):
from lasagne.layers.helper import get_output_shape
return get_output_shape
@pytest.fixture
def layers(self):
from lasagne.layers.base import Layer, MergeLayer
from lasagne.layers.input import InputLayer
l1 = [Mock(InputLayer((None,)), output_shape=(None,)), Mock(InputLayer((None,)), output_shape=(None,))]
l2 = [Mock(Layer(l1[0]), output_shape=(None,)), Mock(Layer(l1[1]), output_shape=(None,))]
l2[0].input_layer = l1[0]
l2[1].input_layer = l1[1]
l3 = Mock(MergeLayer(l2))
l3.input_layers = l2
return (l1, l2, l3)
def test_get_output_shape_without_arguments(self, layers, get_output_shape):
(l1, l2, l3) = layers
output_shape = get_output_shape(l3)
assert (output_shape is l3.output_shape)
assert (l3.get_output_shape_for.call_count == 0)
assert (l2[0].get_output_shape_for.call_count == 0)
assert (l2[1].get_output_shape_for.call_count == 0)
def test_get_output_shape_with_single_argument_fails(self, layers, get_output_shape):
(l1, l2, l3) = layers
shp = (4, 5, 6)
with pytest.raises(ValueError):
output_shape = get_output_shape(l3, shp)
def test_get_output_shape_input_is_a_mapping(self, layers, get_output_shape):
(l1, l2, l3) = layers
input_shapes = {l3: (4, 5, 6)}
assert (get_output_shape(l3, input_shapes) is input_shapes[l3])
assert (l3.get_output_shape_for.call_count == 0)
assert (l2[0].get_output_shape_for.call_count == 0)
assert (l2[1].get_output_shape_for.call_count == 0)
def test_get_output_shape_input_is_a_mapping_no_key(self, layers, get_output_shape):
(l1, l2, l3) = layers
output_shape = get_output_shape(l3, {})
assert (output_shape is l3.output_shape)
assert (l3.get_output_shape_for.call_count == 0)
assert (l2[0].get_output_shape_for.call_count == 0)
assert (l2[1].get_output_shape_for.call_count == 0)
def test_get_output_shape_input_is_a_mapping_for_layer(self, layers, get_output_shape):
(l1, l2, l3) = layers
shp = (4, 5, 6)
input_shapes = {l2[0]: shp}
output = get_output_shape(l3, input_shapes)
assert (output is l3.get_output_shape_for.return_value)
l3.get_output_shape_for.assert_called_with([shp, l2[1].get_output_shape_for.return_value])
l2[1].get_output_shape_for.assert_called_with(l1[1].shape)
assert (l2[0].get_output_shape_for.call_count == 0)
def test_get_output_shape_input_is_a_mapping_for_input_layer(self, layers, get_output_shape):
(l1, l2, l3) = layers
shp = (4, 5, 6)
input_shapes = {l1[0]: shp}
output = get_output_shape(l3, input_shapes)
assert (output is l3.get_output_shape_for.return_value)
l3.get_output_shape_for.assert_called_with([l2[0].get_output_shape_for.return_value, l2[1].get_output_shape_for.return_value])
l2[0].get_output_shape_for.assert_called_with(shp)
l2[1].get_output_shape_for.assert_called_with(l1[1].shape)
@pytest.fixture
def layer_from_shape(self):
from lasagne.layers.input import InputLayer
from lasagne.layers.base import MergeLayer
return MergeLayer([(None, 20), Mock(InputLayer((None,)), output_shape=(None,))])
def test_layer_from_shape_valid_get_output_shape(self, layer_from_shape, get_output_shape):
layer = layer_from_shape
input_shapes = {layer: (4, 5, 6)}
assert (get_output_shape(layer, input_shapes) is input_shapes[layer])
input_shapes = {None: (4, 5, 6)}
layer.get_output_shape_for = Mock()
assert (get_output_shape(layer, input_shapes) is layer.get_output_shape_for.return_value)
layer.get_output_shape_for.assert_called_with([input_shapes[None], layer.input_layers[1].shape])
|
class TestGetAllParams():
def test_get_all_params(self):
from lasagne.layers import InputLayer, DenseLayer, get_all_params
l1 = InputLayer((10, 20))
l2 = DenseLayer(l1, 30)
l3 = DenseLayer(l2, 40)
assert (get_all_params(l3) == (l2.get_params() + l3.get_params()))
assert (get_all_params(l3, regularizable=False) == (l2.get_params(regularizable=False) + l3.get_params(regularizable=False)))
assert (get_all_params(l3, regularizable=True) == (l2.get_params(regularizable=True) + l3.get_params(regularizable=True)))
|
class TestCountParams():
def test_get_all_params(self):
from lasagne.layers import InputLayer, DenseLayer, count_params
l1 = InputLayer((10, 20))
l2 = DenseLayer(l1, 30)
l3 = DenseLayer(l2, 40)
num_weights = ((20 * 30) + (30 * 40))
num_biases = (30 + 40)
assert (count_params(l3, regularizable=True) == num_weights)
assert (count_params(l3, regularizable=False) == num_biases)
assert (count_params(l3) == (num_weights + num_biases))
|
class TestGetAllParamValues():
def test_get_all_param_values(self):
from lasagne.layers import InputLayer, DenseLayer, get_all_param_values
l1 = InputLayer((10, 20))
l2 = DenseLayer(l1, 30)
l3 = DenseLayer(l2, 40)
pvs = get_all_param_values(l3)
assert (len(pvs) == 4)
|
class TestSetAllParamValues():
def test_set_all_param_values(self):
from lasagne.layers import InputLayer, DenseLayer, set_all_param_values
from lasagne.utils import floatX
l1 = InputLayer((10, 20))
l2 = DenseLayer(l1, 30)
l3 = DenseLayer(l2, 40)
a2 = floatX(numpy.random.normal(0, 1, (20, 30)))
b2 = floatX(numpy.random.normal(0, 1, (30,)))
a3 = floatX(numpy.random.normal(0, 1, (30, 40)))
b3 = floatX(numpy.random.normal(0, 1, (40,)))
set_all_param_values(l3, [a2, b2, a3, b3])
assert numpy.allclose(l3.W.get_value(), a3)
assert numpy.allclose(l3.b.get_value(), b3)
assert numpy.allclose(l2.W.get_value(), a2)
assert numpy.allclose(l2.b.get_value(), b2)
with pytest.raises(ValueError):
set_all_param_values(l3, [a3, b3, a2])
with pytest.raises(ValueError):
a3_bad = floatX(numpy.random.normal(0, 1, (25, 40)))
set_all_param_values(l3, [a2, b2, a3_bad, b3])
|
class TestInputLayer():
@pytest.fixture
def layer(self):
from lasagne.layers.input import InputLayer
return InputLayer((3, 2))
def test_input_var(self, layer):
assert (layer.input_var.ndim == 2)
def test_shape(self, layer):
assert (layer.shape == (3, 2))
def test_input_var_name(self, layer):
assert (layer.input_var.name == 'input')
def test_named_layer_input_var_name(self):
from lasagne.layers.input import InputLayer
layer = InputLayer((3, 2), name='foo')
assert (layer.input_var.name == 'foo.input')
def test_get_params(self, layer):
assert (layer.get_params() == [])
def test_bad_shape_fails(self):
from lasagne.layers.input import InputLayer
input_var = theano.tensor.tensor4()
with pytest.raises(ValueError):
InputLayer((3, 2), input_var)
def test_nonpositive_input_dims_raises_value_error(self):
from lasagne.layers import InputLayer
with pytest.raises(ValueError):
InputLayer(shape=(None, (- 1), (- 1)))
with pytest.raises(ValueError):
InputLayer(shape=(None, 0, 0))
InputLayer(shape=(None, 1, 1))
|
class TestDropoutLayer():
@pytest.fixture(params=[(100, 100), (None, 100)])
def input_layer(self, request):
from lasagne.layers.input import InputLayer
return InputLayer(request.param)
@pytest.fixture
def layer(self, input_layer):
from lasagne.layers.noise import DropoutLayer
return DropoutLayer(input_layer)
@pytest.fixture
def layer_no_rescale(self, input_layer):
from lasagne.layers.noise import DropoutLayer
return DropoutLayer(input_layer, rescale=False)
@pytest.fixture
def layer_p_02(self, input_layer):
from lasagne.layers.noise import DropoutLayer
return DropoutLayer(input_layer, p=0.2)
def test_get_output_for_non_deterministic(self, layer):
input = theano.shared(numpy.ones((100, 100)))
result = layer.get_output_for(input)
result_eval = result.eval()
assert (0.9 < result_eval.mean() < 1.1)
assert (numpy.unique(result_eval) == [0.0, 2.0]).all()
def test_get_output_for_deterministic(self, layer):
input = theano.shared(numpy.ones((100, 100)))
result = layer.get_output_for(input, deterministic=True)
result_eval = result.eval()
assert (result_eval == input.get_value()).all()
def test_get_output_for_no_rescale(self, layer_no_rescale):
input = theano.shared(numpy.ones((100, 100)))
result = layer_no_rescale.get_output_for(input)
result_eval = result.eval()
assert (0.4 < result_eval.mean() < 0.6)
assert (numpy.unique(result_eval) == [0.0, 1.0]).all()
def test_get_output_for_p_02(self, layer_p_02):
input = theano.shared(numpy.ones((100, 100)))
result = layer_p_02.get_output_for(input)
result_eval = result.eval()
assert (0.9 < result_eval.mean() < 1.1)
assert (numpy.round(numpy.unique(result_eval), 2) == [0.0, 1.25]).all()
def test_specified_rng(self, input_layer):
from lasagne.layers.noise import DropoutLayer
input = theano.shared(numpy.ones((100, 100)))
seed = 123456789
rng = get_rng()
set_rng(RandomState(seed))
result = DropoutLayer(input_layer).get_output_for(input)
result_eval1 = result.eval()
set_rng(RandomState(seed))
result = DropoutLayer(input_layer).get_output_for(input)
result_eval2 = result.eval()
set_rng(rng)
assert numpy.allclose(result_eval1, result_eval2)
|
class TestGaussianNoiseLayer():
@pytest.fixture
def layer(self):
from lasagne.layers.noise import GaussianNoiseLayer
return GaussianNoiseLayer(Mock(output_shape=(None,)))
@pytest.fixture(params=[(100, 100), (None, 100)])
def input_layer(self, request):
from lasagne.layers.input import InputLayer
return InputLayer(request.param)
def test_get_output_for_non_deterministic(self, layer):
input = theano.shared(numpy.ones((100, 100)))
result = layer.get_output_for(input, deterministic=False)
result_eval = result.eval()
assert (result_eval != input.eval()).all()
assert (result_eval.mean() != 1.0)
assert (numpy.round(result_eval.mean()) == 1.0)
def test_get_output_for_deterministic(self, layer):
input = theano.shared(numpy.ones((3, 3)))
result = layer.get_output_for(input, deterministic=True)
result_eval = result.eval()
assert (result_eval == input.eval()).all()
def test_specified_rng(self, input_layer):
from lasagne.layers.noise import GaussianNoiseLayer
input = theano.shared(numpy.ones((100, 100)))
seed = 123456789
rng = get_rng()
set_rng(RandomState(seed))
result = GaussianNoiseLayer(input_layer).get_output_for(input)
result_eval1 = result.eval()
set_rng(RandomState(seed))
result = GaussianNoiseLayer(input_layer).get_output_for(input)
result_eval2 = result.eval()
set_rng(rng)
assert numpy.allclose(result_eval1, result_eval2)
|
def _example_modules():
paths = glob(join(EXAMPLES_DIR, '*py'))
return [splitext(basename(path))[0] for path in paths]
|
@pytest.fixture
def example(request):
sys.path.insert(0, EXAMPLES_DIR)
request.addfinalizer((lambda : sys.path.remove(EXAMPLES_DIR)))
|
@pytest.mark.slow
@pytest.mark.parametrize('module_name', _example_modules())
def test_example(example, module_name):
try:
main = getattr(import_module(module_name), 'main')
except ImportError as e:
skip_exceptions = ['requires a GPU', 'pylearn2', 'dnn not available']
if any([(text in str(e)) for text in skip_exceptions]):
pytest.skip(e)
else:
raise
main(num_epochs=1)
|
def test_initializer_sample():
from lasagne.init import Initializer
with pytest.raises(NotImplementedError):
Initializer().sample((100, 100))
|
def test_shape():
from lasagne.init import Initializer
for klass in Initializer.__subclasses__():
if len(klass.__subclasses__()):
for sub_klass in klass.__subclasses__():
assert (sub_klass().sample((12, 23)).shape == (12, 23))
else:
assert (klass().sample((12, 23)).shape == (12, 23))
|
def test_specified_rng():
from lasagne.random import get_rng, set_rng
from lasagne.init import Normal, Uniform, GlorotNormal, GlorotUniform, Sparse, Orthogonal
from numpy.random import RandomState
from numpy import allclose
seed = 123456789
rng = get_rng()
for init_class in [Normal, Uniform, GlorotNormal, GlorotUniform, Sparse, Orthogonal]:
set_rng(RandomState(seed))
sample1 = init_class().sample((100, 100))
set_rng(RandomState(seed))
sample2 = init_class().sample((100, 100))
set_rng(rng)
assert allclose(sample1, sample2), 'random initialization was inconsistent for {}'.format(init_class.__name__)
|
def test_normal():
from lasagne.init import Normal
sample = Normal().sample((100, 200))
assert ((- 0.001) < sample.mean() < 0.001)
assert (0.009 < sample.std() < 0.011)
|
def test_uniform_range_as_number():
from lasagne.init import Uniform
sample = Uniform(1.0).sample((300, 400))
assert (sample.shape == (300, 400))
assert ((- 1.0) <= sample.min() < (- 0.9))
assert (0.9 < sample.max() <= 1.0)
|
def test_uniform_range_as_range():
from lasagne.init import Uniform
sample = Uniform((0.0, 1.0)).sample((300, 400))
assert (sample.shape == (300, 400))
assert (0.0 <= sample.min() < 0.1)
assert (0.9 < sample.max() <= 1.0)
|
def test_uniform_mean_std():
from lasagne.init import Uniform
sample = Uniform(std=1.0, mean=5.0).sample((300, 400))
assert (4.9 < sample.mean() < 5.1)
assert (0.9 < sample.std() < 1.1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.