code stringlengths 17 6.64M |
|---|
def aggregate(loss, weights=None, mode='mean'):
"Aggregates an element- or item-wise loss to a scalar loss.\n\n Parameters\n ----------\n loss : Theano tensor\n The loss expression to aggregate.\n weights : Theano tensor, optional\n The weights for each element or item, must be broadcastable to\n the same shape as `loss` if given. If omitted, all elements will\n be weighted the same.\n mode : {'mean', 'sum', 'normalized_sum'}\n Whether to aggregate by averaging, by summing or by summing and\n dividing by the total weights (which requires `weights` to be given).\n\n Returns\n -------\n Theano scalar\n A scalar loss expression suitable for differentiation.\n\n Notes\n -----\n By supplying binary weights (i.e., only using values 0 and 1), this\n function can also be used for masking out particular entries in the\n loss expression. Note that masked entries still need to be valid\n values, not-a-numbers (NaNs) will propagate through.\n\n When applied to batch-wise loss expressions, setting `mode` to\n ``'normalized_sum'`` ensures that the loss per batch is of a similar\n magnitude, independent of associated weights. However, it means that\n a given data point contributes more to the loss when it shares a batch\n with low-weighted or masked data points than with high-weighted ones.\n "
if (weights is not None):
loss = (loss * weights)
if (mode == 'mean'):
return loss.mean()
elif (mode == 'sum'):
return loss.sum()
elif (mode == 'normalized_sum'):
if (weights is None):
raise ValueError("require weights for mode='normalized_sum'")
return (loss.sum() / weights.sum())
else:
raise ValueError(("mode must be 'mean', 'sum' or 'normalized_sum', got %r" % mode))
|
def binary_hinge_loss(predictions, targets, binary=True, delta=1):
'Computes the binary hinge loss between predictions and targets.\n\n .. math:: L_i = \\max(0, \\delta - t_i p_i)\n\n Parameters\n ----------\n predictions : Theano tensor\n Predictions in (0, 1), such as sigmoidal output of a neural network.\n targets : Theano tensor\n Targets in {0, 1} (or in {-1, 1} depending on `binary`), such as\n ground truth labels.\n binary : bool, default True\n ``True`` if targets are in {0, 1}, ``False`` if they are in {-1, 1}\n delta : scalar, default 1\n The hinge loss margin\n\n Returns\n -------\n Theano tensor\n An expression for the element-wise binary hinge loss\n\n Notes\n -----\n This is an alternative to the binary cross-entropy loss for binary\n classification problems\n '
if binary:
targets = ((2 * targets) - 1)
return theano.tensor.nnet.relu((delta - (predictions * targets)))
|
def multiclass_hinge_loss(predictions, targets, delta=1):
'Computes the multi-class hinge loss between predictions and targets.\n\n .. math:: L_i = \\max_{j \\not = p_i} (0, t_j - t_{p_i} + \\delta)\n\n Parameters\n ----------\n predictions : Theano 2D tensor\n Predictions in (0, 1), such as softmax output of a neural network,\n with data points in rows and class probabilities in columns.\n targets : Theano 2D tensor or 1D tensor\n Either a vector of int giving the correct class index per data point\n or a 2D tensor of one-hot encoding of the correct class in the same\n layout as predictions (non-binary targets in [0, 1] do not work!)\n delta : scalar, default 1\n The hinge loss margin\n\n Returns\n -------\n Theano 1D tensor\n An expression for the item-wise multi-class hinge loss\n\n Notes\n -----\n This is an alternative to the categorical cross-entropy loss for\n multi-class classification problems\n '
num_cls = predictions.shape[1]
if (targets.ndim == (predictions.ndim - 1)):
targets = theano.tensor.extra_ops.to_one_hot(targets, num_cls)
elif (targets.ndim != predictions.ndim):
raise TypeError('rank mismatch between targets and predictions')
corrects = predictions[targets.nonzero()]
rest = theano.tensor.reshape(predictions[(1 - targets).nonzero()], ((- 1), (num_cls - 1)))
rest = theano.tensor.max(rest, axis=1)
return theano.tensor.nnet.relu(((rest - corrects) + delta))
|
def binary_accuracy(predictions, targets, threshold=0.5):
'Computes the binary accuracy between predictions and targets.\n\n .. math:: L_i = \\mathbb{I}(t_i = \\mathbb{I}(p_i \\ge \\alpha))\n\n Parameters\n ----------\n predictions : Theano tensor\n Predictions in [0, 1], such as a sigmoidal output of a neural network,\n giving the probability of the positive class\n targets : Theano tensor\n Targets in {0, 1}, such as ground truth labels.\n threshold : scalar, default: 0.5\n Specifies at what threshold to consider the predictions being of the\n positive class\n\n Returns\n -------\n Theano tensor\n An expression for the element-wise binary accuracy in {0, 1}\n\n Notes\n -----\n This objective function should not be used with a gradient calculation;\n its gradient is zero everywhere. It is intended as a convenience for\n validation and testing, not training.\n\n To obtain the average accuracy, call :func:`theano.tensor.mean()` on the\n result, passing ``dtype=theano.config.floatX`` to compute the mean on GPU.\n '
predictions = theano.tensor.ge(predictions, threshold)
return theano.tensor.eq(predictions, targets)
|
def categorical_accuracy(predictions, targets, top_k=1):
'Computes the categorical accuracy between predictions and targets.\n\n .. math:: L_i = \\mathbb{I}(t_i = \\operatorname{argmax}_c p_{i,c})\n\n Can be relaxed to allow matches among the top :math:`k` predictions:\n\n .. math::\n L_i = \\mathbb{I}(t_i \\in \\operatorname{argsort}_c (-p_{i,c})_{:k})\n\n Parameters\n ----------\n predictions : Theano 2D tensor\n Predictions in (0, 1), such as softmax output of a neural network,\n with data points in rows and class probabilities in columns.\n targets : Theano 2D tensor or 1D tensor\n Either a vector of int giving the correct class index per data point\n or a 2D tensor of 1 hot encoding of the correct class in the same\n layout as predictions\n top_k : int\n Regard a prediction to be correct if the target class is among the\n `top_k` largest class probabilities. For the default value of 1, a\n prediction is correct only if the target class is the most probable.\n\n Returns\n -------\n Theano 1D tensor\n An expression for the item-wise categorical accuracy in {0, 1}\n\n Notes\n -----\n This is a strictly non differential function as it includes an argmax.\n This objective function should never be used with a gradient calculation.\n It is intended as a convenience for validation and testing not training.\n\n To obtain the average accuracy, call :func:`theano.tensor.mean()` on the\n result, passing ``dtype=theano.config.floatX`` to compute the mean on GPU.\n '
if (targets.ndim == predictions.ndim):
targets = theano.tensor.argmax(targets, axis=(- 1))
elif (targets.ndim != (predictions.ndim - 1)):
raise TypeError('rank mismatch between targets and predictions')
if (top_k == 1):
top = theano.tensor.argmax(predictions, axis=(- 1))
return theano.tensor.eq(top, targets)
else:
top = theano.tensor.argsort(predictions, axis=(- 1))
top = top[([slice(None) for _ in range((top.ndim - 1))] + [slice((- top_k), None)])]
targets = theano.tensor.shape_padaxis(targets, axis=(- 1))
return theano.tensor.any(theano.tensor.eq(top, targets), axis=(- 1))
|
def get_rng():
'Get the package-level random number generator.\n\n Returns\n -------\n :class:`numpy.random.RandomState` instance\n The :class:`numpy.random.RandomState` instance passed to the most\n recent call of :func:`set_rng`, or ``numpy.random`` if :func:`set_rng`\n has never been called.\n '
return _rng
|
def set_rng(new_rng):
'Set the package-level random number generator.\n\n Parameters\n ----------\n new_rng : ``numpy.random`` or a :class:`numpy.random.RandomState` instance\n The random number generator to use.\n '
global _rng
_rng = new_rng
|
def l1(x):
'Computes the L1 norm of a tensor\n\n Parameters\n ----------\n x : Theano tensor\n\n Returns\n -------\n Theano scalar\n l1 norm (sum of absolute values of elements)\n '
return T.sum(abs(x))
|
def l2(x):
'Computes the squared L2 norm of a tensor\n\n Parameters\n ----------\n x : Theano tensor\n\n Returns\n -------\n Theano scalar\n squared l2 norm (sum of squared values of elements)\n '
return T.sum((x ** 2))
|
def apply_penalty(tensor_or_tensors, penalty, **kwargs):
'\n Computes the total cost for applying a specified penalty\n to a tensor or group of tensors.\n\n Parameters\n ----------\n tensor_or_tensors : Theano tensor or list of tensors\n penalty : callable\n **kwargs\n keyword arguments passed to penalty.\n\n Returns\n -------\n Theano scalar\n a scalar expression for the total penalty cost\n '
try:
return sum((penalty(x, **kwargs) for x in tensor_or_tensors))
except (TypeError, ValueError):
return penalty(tensor_or_tensors, **kwargs)
|
def regularize_layer_params(layer, penalty, tags={'regularizable': True}, **kwargs):
'\n Computes a regularization cost by applying a penalty to the parameters\n of a layer or group of layers.\n\n Parameters\n ----------\n layer : a :class:`Layer` instances or list of layers.\n penalty : callable\n tags: dict\n Tag specifications which filter the parameters of the layer or layers.\n By default, only parameters with the `regularizable` tag are included.\n **kwargs\n keyword arguments passed to penalty.\n\n Returns\n -------\n Theano scalar\n a scalar expression for the cost\n '
layers = ([layer] if isinstance(layer, Layer) else layer)
all_params = []
for layer in layers:
all_params += layer.get_params(**tags)
return apply_penalty(all_params, penalty, **kwargs)
|
def regularize_layer_params_weighted(layers, penalty, tags={'regularizable': True}, **kwargs):
'\n Computes a regularization cost by applying a penalty to the parameters\n of a layer or group of layers, weighted by a coefficient for each layer.\n\n Parameters\n ----------\n layers : dict\n A mapping from :class:`Layer` instances to coefficients.\n penalty : callable\n tags: dict\n Tag specifications which filter the parameters of the layer or layers.\n By default, only parameters with the `regularizable` tag are included.\n **kwargs\n keyword arguments passed to penalty.\n\n Returns\n -------\n Theano scalar\n a scalar expression for the cost\n '
return sum(((coeff * apply_penalty(layer.get_params(**tags), penalty, **kwargs)) for (layer, coeff) in layers.items()))
|
def regularize_network_params(layer, penalty, tags={'regularizable': True}, **kwargs):
'\n Computes a regularization cost by applying a penalty to the parameters\n of all layers in a network.\n\n Parameters\n ----------\n layer : a :class:`Layer` instance.\n Parameters of this layer and all layers below it will be penalized.\n penalty : callable\n tags: dict\n Tag specifications which filter the parameters of the layer or layers.\n By default, only parameters with the `regularizable` tag are included.\n **kwargs\n keyword arguments passed to penalty.\n\n Returns\n -------\n Theano scalar\n a scalar expression for the cost\n '
return apply_penalty(get_all_params(layer, **tags), penalty, **kwargs)
|
def pytest_addoption(parser):
parser.addoption('--runslow', action='store_true', help='run slow tests')
|
def pytest_runtest_setup(item):
if (('slow' in item.keywords) and (not item.config.getoption('--runslow'))):
pytest.skip('need --runslow option to run')
|
@pytest.fixture
def dummy_input_layer():
from lasagne.layers.input import InputLayer
input_layer = InputLayer((2, 3, 4))
mock = Mock(input_layer)
mock.shape = input_layer.shape
mock.input_var = input_layer.input_var
mock.output_shape = input_layer.output_shape
return mock
|
class TestLayer():
@pytest.fixture
def layer(self):
from lasagne.layers.base import Layer
return Layer(Mock(output_shape=(None,)))
@pytest.fixture
def named_layer(self):
from lasagne.layers.base import Layer
return Layer(Mock(output_shape=(None,)), name='layer_name')
def test_input_shape(self, layer):
assert (layer.input_shape == layer.input_layer.output_shape)
def test_get_output_shape_for(self, layer):
shape = Mock()
assert (layer.get_output_shape_for(shape) == shape)
@pytest.fixture
def layer_from_shape(self):
from lasagne.layers.base import Layer
return Layer((None, 20))
def test_layer_from_shape(self, layer_from_shape):
layer = layer_from_shape
assert (layer.input_layer is None)
assert (layer.input_shape == (None, 20))
def test_named_layer(self, named_layer):
assert (named_layer.name == 'layer_name')
def test_get_params(self, layer):
assert (layer.get_params() == [])
def test_get_params_tags(self, layer):
a_shape = (20, 50)
a = numpy.random.normal(0, 1, a_shape)
A = layer.add_param(a, a_shape, name='A', tag1=True, tag2=False)
b_shape = (30, 20)
b = numpy.random.normal(0, 1, b_shape)
B = layer.add_param(b, b_shape, name='B', tag1=True, tag2=True)
c_shape = (40, 10)
c = numpy.random.normal(0, 1, c_shape)
C = layer.add_param(c, c_shape, name='C', tag2=True)
assert (layer.get_params() == [A, B, C])
assert (layer.get_params(tag1=True) == [A, B])
assert (layer.get_params(tag1=False) == [C])
assert (layer.get_params(tag2=True) == [B, C])
assert (layer.get_params(tag2=False) == [A])
assert (layer.get_params(tag1=True, tag2=True) == [B])
def test_get_params_expressions(self, layer):
(x, y, z) = (theano.shared(0, name=n) for n in 'xyz')
W1 = layer.add_param(((x ** 2) + theano.tensor.log(y)), (), tag1=True)
W2 = layer.add_param(theano.tensor.matrix(), (10, 10), tag1=True)
W3 = layer.add_param(z.T, (), tag2=True)
assert (list(layer.params.keys()) == [W1, W2, W3])
assert (layer.get_params() == [x, y, z])
assert (layer.get_params(tag1=True) == [x, y])
assert (layer.get_params(tag2=True) == [z])
def test_add_param_tags(self, layer):
a_shape = (20, 50)
a = numpy.random.normal(0, 1, a_shape)
A = layer.add_param(a, a_shape)
assert (A in layer.params)
assert ('trainable' in layer.params[A])
assert ('regularizable' in layer.params[A])
b_shape = (30, 20)
b = numpy.random.normal(0, 1, b_shape)
B = layer.add_param(b, b_shape, trainable=False)
assert (B in layer.params)
assert ('trainable' not in layer.params[B])
assert ('regularizable' in layer.params[B])
c_shape = (40, 10)
c = numpy.random.normal(0, 1, c_shape)
C = layer.add_param(c, c_shape, tag1=True)
assert (C in layer.params)
assert ('trainable' in layer.params[C])
assert ('regularizable' in layer.params[C])
assert ('tag1' in layer.params[C])
def test_add_param_name(self, layer):
a_shape = (20, 50)
a = numpy.random.normal(0, 1, a_shape)
A = layer.add_param(a, a_shape, name='A')
assert (A.name == 'A')
def test_add_param_named_layer_name(self, named_layer):
a_shape = (20, 50)
a = numpy.random.normal(0, 1, a_shape)
A = named_layer.add_param(a, a_shape, name='A')
assert (A.name == 'layer_name.A')
def test_get_output_for_notimplemented(self, layer):
with pytest.raises(NotImplementedError):
layer.get_output_for(Mock())
def test_nonpositive_input_dims_raises_value_error(self, layer):
from lasagne.layers.base import Layer
neg_input_layer = Mock(output_shape=(None, (- 1), (- 1)))
zero_input_layer = Mock(output_shape=(None, 0, 0))
pos_input_layer = Mock(output_shape=(None, 1, 1))
with pytest.raises(ValueError):
Layer(neg_input_layer)
with pytest.raises(ValueError):
Layer(zero_input_layer)
Layer(pos_input_layer)
|
class TestMergeLayer():
@pytest.fixture
def layer(self):
from lasagne.layers.base import MergeLayer
return MergeLayer([Mock(), Mock()])
def test_input_shapes(self, layer):
assert (layer.input_shapes == [l.output_shape for l in layer.input_layers])
@pytest.fixture
def layer_from_shape(self):
from lasagne.layers.input import InputLayer
from lasagne.layers.base import MergeLayer
return MergeLayer([(None, 20), Mock(InputLayer((None,)), output_shape=(None,))])
def test_layer_from_shape(self, layer_from_shape):
layer = layer_from_shape
assert (layer.input_layers[0] is None)
assert (layer.input_shapes[0] == (None, 20))
assert (layer.input_layers[1] is not None)
assert (layer.input_shapes[1] == layer.input_layers[1].output_shape)
def test_get_params(self, layer):
assert (layer.get_params() == [])
def test_get_output_shape_for_notimplemented(self, layer):
with pytest.raises(NotImplementedError):
layer.get_output_shape_for(Mock())
def test_get_output_for_notimplemented(self, layer):
with pytest.raises(NotImplementedError):
layer.get_output_for(Mock())
|
def test_embedding_2D_input():
import numpy as np
import theano
import theano.tensor as T
from lasagne.layers import EmbeddingLayer, InputLayer, helper
x = T.imatrix()
batch_size = 2
seq_len = 3
emb_size = 5
vocab_size = 3
l_in = InputLayer((None, seq_len))
W = np.arange((vocab_size * emb_size)).reshape((vocab_size, emb_size)).astype('float32')
l1 = EmbeddingLayer(l_in, input_size=vocab_size, output_size=emb_size, W=W)
x_test = np.array([[0, 1, 2], [0, 0, 2]], dtype='int32')
assert (helper.get_output_shape(l1, (batch_size, seq_len)) == (batch_size, seq_len, emb_size))
output = helper.get_output(l1, x)
f = theano.function([x], output)
np.testing.assert_array_almost_equal(f(x_test), W[x_test])
|
def test_embedding_1D_input():
import numpy as np
import theano
import theano.tensor as T
from lasagne.layers import EmbeddingLayer, InputLayer, helper
x = T.ivector()
batch_size = 2
emb_size = 10
vocab_size = 3
l_in = InputLayer((None,))
W = np.arange((vocab_size * emb_size)).reshape((vocab_size, emb_size)).astype('float32')
l1 = EmbeddingLayer(l_in, input_size=vocab_size, output_size=emb_size, W=W)
x_test = np.array([0, 1, 2], dtype='int32')
assert (helper.get_output_shape(l1, (batch_size,)) == (batch_size, emb_size))
output = helper.get_output(l1, x)
f = theano.function([x], output)
np.testing.assert_array_almost_equal(f(x_test), W[x_test])
|
class TestGetAllLayers():
def test_stack(self):
from lasagne.layers import InputLayer, DenseLayer, get_all_layers
from itertools import permutations
l1 = InputLayer((10, 20))
l2 = DenseLayer(l1, 30)
l3 = DenseLayer(l2, 40)
for count in (0, 1, 2, 3):
for query in permutations([l1, l2, l3], count):
if (l3 in query):
expected = [l1, l2, l3]
elif (l2 in query):
expected = [l1, l2]
elif (l1 in query):
expected = [l1]
else:
expected = []
assert (get_all_layers(query) == expected)
assert (get_all_layers(l3, treat_as_input=[l2]) == [l2, l3])
def test_merge(self):
from lasagne.layers import InputLayer, DenseLayer, ElemwiseSumLayer, get_all_layers
l1 = InputLayer((10, 20))
l2 = DenseLayer(l1, 30)
l3 = DenseLayer(l2, 40)
l4 = InputLayer((10, 30))
l5 = DenseLayer(l4, 40)
l6 = ElemwiseSumLayer([l3, l5])
assert (get_all_layers(l6) == [l1, l2, l3, l4, l5, l6])
assert (get_all_layers([l4, l6]) == [l4, l1, l2, l3, l5, l6])
assert (get_all_layers([l5, l6]) == [l4, l5, l1, l2, l3, l6])
assert (get_all_layers([l4, l2, l5, l6]) == [l4, l1, l2, l5, l3, l6])
assert (get_all_layers(l6, treat_as_input=[l2]) == [l2, l3, l4, l5, l6])
assert (get_all_layers(l6, treat_as_input=[l3, l5]) == [l3, l5, l6])
assert (get_all_layers([l6, l2], treat_as_input=[l6]) == [l6, l1, l2])
def test_split(self):
from lasagne.layers import InputLayer, DenseLayer, get_all_layers
l1 = InputLayer((10, 20))
l2 = DenseLayer(l1, 30)
l3 = DenseLayer(l2, 40)
l4 = DenseLayer(l1, 50)
assert (get_all_layers(l3) == [l1, l2, l3])
assert (get_all_layers(l4) == [l1, l4])
assert (get_all_layers([l3, l4]) == [l1, l2, l3, l4])
assert (get_all_layers([l4, l3]) == [l1, l4, l2, l3])
assert (get_all_layers(l3, treat_as_input=[l2]) == [l2, l3])
assert (get_all_layers([l3, l4], treat_as_input=[l2]) == [l2, l3, l1, l4])
def test_bridge(self):
from lasagne.layers import InputLayer, DenseLayer, ElemwiseSumLayer, get_all_layers
l1 = InputLayer((10, 20))
l2 = DenseLayer(l1, 30)
l3 = DenseLayer(l2, 30)
l4 = ElemwiseSumLayer([l2, l3])
l5 = DenseLayer(l4, 40)
assert (get_all_layers(l5) == [l1, l2, l3, l4, l5])
assert (get_all_layers(l5, treat_as_input=[l4]) == [l4, l5])
assert (get_all_layers(l5, treat_as_input=[l3]) == [l1, l2, l3, l4, l5])
|
class TestGetOutput_InputLayer():
@pytest.fixture
def get_output(self):
from lasagne.layers.helper import get_output
return get_output
@pytest.fixture
def layer(self):
from lasagne.layers.input import InputLayer
return InputLayer((3, 2))
def test_get_output_without_arguments(self, layer, get_output):
assert (get_output(layer) is layer.input_var)
def test_get_output_input_is_variable(self, layer, get_output):
variable = theano.Variable('myvariable')
assert (get_output(layer, variable) is variable)
def test_get_output_input_is_array(self, layer, get_output):
inputs = [[1, 2, 3]]
output = get_output(layer, inputs)
assert numpy.all((output.eval() == inputs))
def test_get_output_input_is_a_mapping(self, layer, get_output):
inputs = {layer: theano.tensor.matrix()}
assert (get_output(layer, inputs) is inputs[layer])
|
class TestGetOutput_Layer():
@pytest.fixture
def get_output(self):
from lasagne.layers.helper import get_output
return get_output
@pytest.fixture
def layers(self):
from lasagne.layers.base import Layer
from lasagne.layers.input import InputLayer
l1 = Mock(InputLayer((None,)), output_shape=(None,))
l2 = Mock(Layer(l1), output_shape=(None,))
l2.input_layer = l1
l3 = Mock(Layer(l2), output_shape=(None,))
l3.input_layer = l2
return (l1, l2, l3)
def test_get_output_without_arguments(self, layers, get_output):
(l1, l2, l3) = layers
output = get_output(l3)
assert (output is l3.get_output_for.return_value)
l3.get_output_for.assert_called_with(l2.get_output_for.return_value)
l2.get_output_for.assert_called_with(l1.input_var)
def test_get_output_with_single_argument(self, layers, get_output):
(l1, l2, l3) = layers
(inputs, kwarg) = (theano.tensor.matrix(), object())
output = get_output(l3, inputs, kwarg=kwarg)
assert (output is l3.get_output_for.return_value)
l3.get_output_for.assert_called_with(l2.get_output_for.return_value, kwarg=kwarg)
l2.get_output_for.assert_called_with(inputs, kwarg=kwarg)
def test_get_output_input_is_a_mapping(self, layers, get_output):
(l1, l2, l3) = layers
p = PropertyMock()
type(l1).input_var = p
inputs = {l3: theano.tensor.matrix()}
assert (get_output(l3, inputs) is inputs[l3])
assert (l3.get_output_for.call_count == 0)
assert (l2.get_output_for.call_count == 0)
assert (p.call_count == 0)
def test_get_output_input_is_a_mapping_no_key(self, layers, get_output):
(l1, l2, l3) = layers
output = get_output(l3, {})
assert (output is l3.get_output_for.return_value)
l3.get_output_for.assert_called_with(l2.get_output_for.return_value)
l2.get_output_for.assert_called_with(l1.input_var)
def test_get_output_input_is_a_mapping_to_array(self, layers, get_output):
(l1, l2, l3) = layers
p = PropertyMock()
type(l1).input_var = p
inputs = {l3: [[1, 2, 3]]}
output = get_output(l3, inputs)
assert numpy.all((output.eval() == inputs[l3]))
assert (l3.get_output_for.call_count == 0)
assert (l2.get_output_for.call_count == 0)
assert (p.call_count == 0)
def test_get_output_input_is_a_mapping_for_layer(self, layers, get_output):
(l1, l2, l3) = layers
p = PropertyMock()
type(l1).input_var = p
(input_expr, kwarg) = (theano.tensor.matrix(), object())
inputs = {l2: input_expr}
output = get_output(l3, inputs, kwarg=kwarg)
assert (output is l3.get_output_for.return_value)
l3.get_output_for.assert_called_with(input_expr, kwarg=kwarg)
assert (l2.get_output_for.call_count == 0)
assert (p.call_count == 0)
def test_get_output_input_is_a_mapping_for_input_layer(self, layers, get_output):
(l1, l2, l3) = layers
p = PropertyMock()
type(l1).input_var = p
(input_expr, kwarg) = (theano.tensor.matrix(), object())
inputs = {l1: input_expr}
output = get_output(l3, inputs, kwarg=kwarg)
assert (output is l3.get_output_for.return_value)
l3.get_output_for.assert_called_with(l2.get_output_for.return_value, kwarg=kwarg)
l2.get_output_for.assert_called_with(input_expr, kwarg=kwarg)
assert (p.call_count == 0)
@pytest.fixture
def layer_from_shape(self):
from lasagne.layers.base import Layer
return Layer((None, 20))
def test_layer_from_shape_invalid_get_output(self, layer_from_shape, get_output):
layer = layer_from_shape
with pytest.raises(ValueError):
get_output(layer)
with pytest.raises(ValueError):
get_output(layer, [1, 2])
with pytest.raises(ValueError):
get_output(layer, {Mock(): [1, 2]})
def test_layer_from_shape_valid_get_output(self, layer_from_shape, get_output):
layer = layer_from_shape
inputs = {layer: theano.tensor.matrix()}
assert (get_output(layer, inputs) is inputs[layer])
inputs = {None: theano.tensor.matrix()}
layer.get_output_for = Mock()
assert (get_output(layer, inputs) is layer.get_output_for.return_value)
layer.get_output_for.assert_called_with(inputs[None])
|
class TestGetOutput_MergeLayer():
@pytest.fixture
def get_output(self):
from lasagne.layers.helper import get_output
return get_output
@pytest.fixture
def layers(self):
from lasagne.layers.base import Layer, MergeLayer
from lasagne.layers.input import InputLayer
l1 = [Mock(InputLayer((None,)), output_shape=(None,)), Mock(InputLayer((None,)), output_shape=(None,))]
l2 = [Mock(Layer(l1[0]), output_shape=(None,)), Mock(Layer(l1[1]), output_shape=(None,))]
l2[0].input_layer = l1[0]
l2[1].input_layer = l1[1]
l3 = Mock(MergeLayer(l2))
l3.input_layers = l2
return (l1, l2, l3)
def test_get_output_without_arguments(self, layers, get_output):
(l1, l2, l3) = layers
output = get_output(l3)
assert (output is l3.get_output_for.return_value)
l3.get_output_for.assert_called_with([l2[0].get_output_for.return_value, l2[1].get_output_for.return_value])
l2[0].get_output_for.assert_called_with(l1[0].input_var)
l2[1].get_output_for.assert_called_with(l1[1].input_var)
def test_get_output_with_single_argument_fails(self, layers, get_output):
(l1, l2, l3) = layers
(inputs, kwarg) = (theano.tensor.matrix(), object())
with pytest.raises(ValueError):
output = get_output(l3, inputs, kwarg=kwarg)
def test_get_output_input_is_a_mapping(self, layers, get_output):
(l1, l2, l3) = layers
p = PropertyMock()
type(l1[0]).input_var = p
type(l1[1]).input_var = p
inputs = {l3: theano.tensor.matrix()}
assert (get_output(l3, inputs) is inputs[l3])
assert (l3.get_output_for.call_count == 0)
assert (l2[0].get_output_for.call_count == 0)
assert (l2[1].get_output_for.call_count == 0)
assert (p.call_count == 0)
def test_get_output_input_is_a_mapping_no_key(self, layers, get_output):
(l1, l2, l3) = layers
output = get_output(l3, {})
assert (output is l3.get_output_for.return_value)
l3.get_output_for.assert_called_with([l2[0].get_output_for.return_value, l2[1].get_output_for.return_value])
l2[0].get_output_for.assert_called_with(l1[0].input_var)
l2[1].get_output_for.assert_called_with(l1[1].input_var)
def test_get_output_input_is_a_mapping_to_array(self, layers, get_output):
(l1, l2, l3) = layers
p = PropertyMock()
type(l1[0]).input_var = p
type(l1[1]).input_var = p
inputs = {l3: [[1, 2, 3]]}
output = get_output(l3, inputs)
assert numpy.all((output.eval() == inputs[l3]))
assert (l3.get_output_for.call_count == 0)
assert (l2[0].get_output_for.call_count == 0)
assert (l2[1].get_output_for.call_count == 0)
assert (p.call_count == 0)
def test_get_output_input_is_a_mapping_for_layer(self, layers, get_output):
(l1, l2, l3) = layers
p = PropertyMock()
type(l1[0]).input_var = p
(input_expr, kwarg) = (theano.tensor.matrix(), object())
inputs = {l2[0]: input_expr}
output = get_output(l3, inputs, kwarg=kwarg)
assert (output is l3.get_output_for.return_value)
l3.get_output_for.assert_called_with([input_expr, l2[1].get_output_for.return_value], kwarg=kwarg)
l2[1].get_output_for.assert_called_with(l1[1].input_var, kwarg=kwarg)
assert (l2[0].get_output_for.call_count == 0)
assert (p.call_count == 0)
def test_get_output_input_is_a_mapping_for_input_layer(self, layers, get_output):
(l1, l2, l3) = layers
p = PropertyMock()
type(l1[0]).input_var = p
(input_expr, kwarg) = (theano.tensor.matrix(), object())
inputs = {l1[0]: input_expr}
output = get_output(l3, inputs, kwarg=kwarg)
assert (output is l3.get_output_for.return_value)
l3.get_output_for.assert_called_with([l2[0].get_output_for.return_value, l2[1].get_output_for.return_value], kwarg=kwarg)
l2[0].get_output_for.assert_called_with(input_expr, kwarg=kwarg)
l2[1].get_output_for.assert_called_with(l1[1].input_var, kwarg=kwarg)
assert (p.call_count == 0)
@pytest.fixture
def layer_from_shape(self):
from lasagne.layers.input import InputLayer
from lasagne.layers.base import MergeLayer
return MergeLayer([(None, 20), Mock(InputLayer((None,)), output_shape=(None,))])
def test_layer_from_shape_invalid_get_output(self, layer_from_shape, get_output):
layer = layer_from_shape
with pytest.raises(ValueError):
get_output(layer)
with pytest.raises(ValueError):
get_output(layer, [1, 2])
with pytest.raises(ValueError):
get_output(layer, {layer.input_layers[1]: [1, 2]})
def test_layer_from_shape_valid_get_output(self, layer_from_shape, get_output):
layer = layer_from_shape
inputs = {layer: theano.tensor.matrix()}
assert (get_output(layer, inputs) is inputs[layer])
inputs = {None: theano.tensor.matrix()}
layer.get_output_for = Mock()
assert (get_output(layer, inputs) is layer.get_output_for.return_value)
layer.get_output_for.assert_called_with([inputs[None], layer.input_layers[1].input_var])
|
class TestGetOutputShape_InputLayer():
@pytest.fixture
def get_output_shape(self):
from lasagne.layers.helper import get_output_shape
return get_output_shape
@pytest.fixture
def layer(self):
from lasagne.layers.input import InputLayer
return InputLayer((3, 2))
def test_get_output_shape_without_arguments(self, layer, get_output_shape):
assert (get_output_shape(layer) == (3, 2))
def test_get_output_shape_input_is_tuple(self, layer, get_output_shape):
shp = (4, 5, 6)
assert (get_output_shape(layer, shp) == shp)
def test_get_output_shape_input_is_a_mapping(self, layer, get_output_shape):
input_shapes = {layer: (4, 5, 6)}
assert (get_output_shape(layer, input_shapes) == input_shapes[layer])
|
class TestGetOutputShape_Layer():
@pytest.fixture
def get_output_shape(self):
from lasagne.layers.helper import get_output_shape
return get_output_shape
@pytest.fixture
def layers(self):
from lasagne.layers.base import Layer
from lasagne.layers.input import InputLayer
l1 = Mock(InputLayer((None,)), output_shape=(None,))
l2 = Mock(Layer(l1), output_shape=(None,))
l2.input_layer = l1
l3 = Mock(Layer(l2), output_shape=(None,))
l3.input_layer = l2
return (l1, l2, l3)
def test_get_output_shape_without_arguments(self, layers, get_output_shape):
(l1, l2, l3) = layers
output_shape = get_output_shape(l3)
assert (output_shape is l3.output_shape)
assert (l3.get_output_shape_for.call_count == 0)
assert (l2.get_output_shape_for.call_count == 0)
def test_get_output_shape_with_single_argument(self, layers, get_output_shape):
(l1, l2, l3) = layers
shp = (3, 4, 5)
output_shape = get_output_shape(l3, shp)
assert (output_shape is l3.get_output_shape_for.return_value)
l3.get_output_shape_for.assert_called_with(l2.get_output_shape_for.return_value)
l2.get_output_shape_for.assert_called_with(shp)
def test_get_output_shape_input_is_a_mapping(self, layers, get_output_shape):
(l1, l2, l3) = layers
input_shapes = {l3: (4, 5, 6)}
assert (get_output_shape(l3, input_shapes) is input_shapes[l3])
assert (l3.get_output_shape_for.call_count == 0)
assert (l2.get_output_shape_for.call_count == 0)
def test_get_output_shape_input_is_a_mapping_no_key(self, layers, get_output_shape):
(l1, l2, l3) = layers
output_shape = get_output_shape(l3, {})
assert (output_shape is l3.output_shape)
assert (l3.get_output_shape_for.call_count == 0)
assert (l2.get_output_shape_for.call_count == 0)
def test_get_output_shape_input_is_a_mapping_for_layer(self, layers, get_output_shape):
(l1, l2, l3) = layers
shp = (4, 5, 6)
input_shapes = {l2: shp}
output_shape = get_output_shape(l3, input_shapes)
assert (output_shape is l3.get_output_shape_for.return_value)
l3.get_output_shape_for.assert_called_with(shp)
assert (l2.get_output_shape_for.call_count == 0)
def test_get_output_shape_input_is_a_mapping_for_input_layer(self, layers, get_output_shape):
(l1, l2, l3) = layers
shp = (4, 5, 6)
input_shapes = {l1: shp}
output_shape = get_output_shape(l3, input_shapes)
assert (output_shape is l3.get_output_shape_for.return_value)
l3.get_output_shape_for.assert_called_with(l2.get_output_shape_for.return_value)
l2.get_output_shape_for.assert_called_with(shp)
@pytest.fixture
def layer_from_shape(self):
from lasagne.layers.base import Layer
return Layer((None, 20))
def test_layer_from_shape(self, layer_from_shape, get_output_shape):
layer = layer_from_shape
input_shapes = {layer: (4, 5, 6)}
assert (get_output_shape(layer, input_shapes) is input_shapes[layer])
input_shapes = {None: (4, 5, 6)}
layer.get_output_shape_for = Mock()
assert (get_output_shape(layer, input_shapes) is layer.get_output_shape_for.return_value)
layer.get_output_shape_for.assert_called_with(input_shapes[None])
|
class TestGetOutputShape_MergeLayer():
@pytest.fixture
def get_output_shape(self):
from lasagne.layers.helper import get_output_shape
return get_output_shape
@pytest.fixture
def layers(self):
from lasagne.layers.base import Layer, MergeLayer
from lasagne.layers.input import InputLayer
l1 = [Mock(InputLayer((None,)), output_shape=(None,)), Mock(InputLayer((None,)), output_shape=(None,))]
l2 = [Mock(Layer(l1[0]), output_shape=(None,)), Mock(Layer(l1[1]), output_shape=(None,))]
l2[0].input_layer = l1[0]
l2[1].input_layer = l1[1]
l3 = Mock(MergeLayer(l2))
l3.input_layers = l2
return (l1, l2, l3)
def test_get_output_shape_without_arguments(self, layers, get_output_shape):
(l1, l2, l3) = layers
output_shape = get_output_shape(l3)
assert (output_shape is l3.output_shape)
assert (l3.get_output_shape_for.call_count == 0)
assert (l2[0].get_output_shape_for.call_count == 0)
assert (l2[1].get_output_shape_for.call_count == 0)
def test_get_output_shape_with_single_argument_fails(self, layers, get_output_shape):
(l1, l2, l3) = layers
shp = (4, 5, 6)
with pytest.raises(ValueError):
output_shape = get_output_shape(l3, shp)
def test_get_output_shape_input_is_a_mapping(self, layers, get_output_shape):
(l1, l2, l3) = layers
input_shapes = {l3: (4, 5, 6)}
assert (get_output_shape(l3, input_shapes) is input_shapes[l3])
assert (l3.get_output_shape_for.call_count == 0)
assert (l2[0].get_output_shape_for.call_count == 0)
assert (l2[1].get_output_shape_for.call_count == 0)
def test_get_output_shape_input_is_a_mapping_no_key(self, layers, get_output_shape):
(l1, l2, l3) = layers
output_shape = get_output_shape(l3, {})
assert (output_shape is l3.output_shape)
assert (l3.get_output_shape_for.call_count == 0)
assert (l2[0].get_output_shape_for.call_count == 0)
assert (l2[1].get_output_shape_for.call_count == 0)
def test_get_output_shape_input_is_a_mapping_for_layer(self, layers, get_output_shape):
(l1, l2, l3) = layers
shp = (4, 5, 6)
input_shapes = {l2[0]: shp}
output = get_output_shape(l3, input_shapes)
assert (output is l3.get_output_shape_for.return_value)
l3.get_output_shape_for.assert_called_with([shp, l2[1].get_output_shape_for.return_value])
l2[1].get_output_shape_for.assert_called_with(l1[1].shape)
assert (l2[0].get_output_shape_for.call_count == 0)
def test_get_output_shape_input_is_a_mapping_for_input_layer(self, layers, get_output_shape):
(l1, l2, l3) = layers
shp = (4, 5, 6)
input_shapes = {l1[0]: shp}
output = get_output_shape(l3, input_shapes)
assert (output is l3.get_output_shape_for.return_value)
l3.get_output_shape_for.assert_called_with([l2[0].get_output_shape_for.return_value, l2[1].get_output_shape_for.return_value])
l2[0].get_output_shape_for.assert_called_with(shp)
l2[1].get_output_shape_for.assert_called_with(l1[1].shape)
@pytest.fixture
def layer_from_shape(self):
from lasagne.layers.input import InputLayer
from lasagne.layers.base import MergeLayer
return MergeLayer([(None, 20), Mock(InputLayer((None,)), output_shape=(None,))])
def test_layer_from_shape_valid_get_output_shape(self, layer_from_shape, get_output_shape):
layer = layer_from_shape
input_shapes = {layer: (4, 5, 6)}
assert (get_output_shape(layer, input_shapes) is input_shapes[layer])
input_shapes = {None: (4, 5, 6)}
layer.get_output_shape_for = Mock()
assert (get_output_shape(layer, input_shapes) is layer.get_output_shape_for.return_value)
layer.get_output_shape_for.assert_called_with([input_shapes[None], layer.input_layers[1].shape])
|
class TestGetAllParams():
def test_get_all_params(self):
from lasagne.layers import InputLayer, DenseLayer, get_all_params
l1 = InputLayer((10, 20))
l2 = DenseLayer(l1, 30)
l3 = DenseLayer(l2, 40)
assert (get_all_params(l3) == (l2.get_params() + l3.get_params()))
assert (get_all_params(l3, regularizable=False) == (l2.get_params(regularizable=False) + l3.get_params(regularizable=False)))
assert (get_all_params(l3, regularizable=True) == (l2.get_params(regularizable=True) + l3.get_params(regularizable=True)))
|
class TestCountParams():
def test_get_all_params(self):
from lasagne.layers import InputLayer, DenseLayer, count_params
l1 = InputLayer((10, 20))
l2 = DenseLayer(l1, 30)
l3 = DenseLayer(l2, 40)
num_weights = ((20 * 30) + (30 * 40))
num_biases = (30 + 40)
assert (count_params(l3, regularizable=True) == num_weights)
assert (count_params(l3, regularizable=False) == num_biases)
assert (count_params(l3) == (num_weights + num_biases))
|
class TestGetAllParamValues():
def test_get_all_param_values(self):
from lasagne.layers import InputLayer, DenseLayer, get_all_param_values
l1 = InputLayer((10, 20))
l2 = DenseLayer(l1, 30)
l3 = DenseLayer(l2, 40)
pvs = get_all_param_values(l3)
assert (len(pvs) == 4)
|
class TestSetAllParamValues():
def test_set_all_param_values(self):
from lasagne.layers import InputLayer, DenseLayer, set_all_param_values
from lasagne.utils import floatX
l1 = InputLayer((10, 20))
l2 = DenseLayer(l1, 30)
l3 = DenseLayer(l2, 40)
a2 = floatX(numpy.random.normal(0, 1, (20, 30)))
b2 = floatX(numpy.random.normal(0, 1, (30,)))
a3 = floatX(numpy.random.normal(0, 1, (30, 40)))
b3 = floatX(numpy.random.normal(0, 1, (40,)))
set_all_param_values(l3, [a2, b2, a3, b3])
assert numpy.allclose(l3.W.get_value(), a3)
assert numpy.allclose(l3.b.get_value(), b3)
assert numpy.allclose(l2.W.get_value(), a2)
assert numpy.allclose(l2.b.get_value(), b2)
with pytest.raises(ValueError):
set_all_param_values(l3, [a3, b3, a2])
with pytest.raises(ValueError):
a3_bad = floatX(numpy.random.normal(0, 1, (25, 40)))
set_all_param_values(l3, [a2, b2, a3_bad, b3])
|
class TestInputLayer():
@pytest.fixture
def layer(self):
from lasagne.layers.input import InputLayer
return InputLayer((3, 2))
def test_input_var(self, layer):
assert (layer.input_var.ndim == 2)
def test_shape(self, layer):
assert (layer.shape == (3, 2))
def test_input_var_name(self, layer):
assert (layer.input_var.name == 'input')
def test_named_layer_input_var_name(self):
from lasagne.layers.input import InputLayer
layer = InputLayer((3, 2), name='foo')
assert (layer.input_var.name == 'foo.input')
def test_get_params(self, layer):
assert (layer.get_params() == [])
def test_bad_shape_fails(self):
from lasagne.layers.input import InputLayer
input_var = theano.tensor.tensor4()
with pytest.raises(ValueError):
InputLayer((3, 2), input_var)
def test_nonpositive_input_dims_raises_value_error(self):
from lasagne.layers import InputLayer
with pytest.raises(ValueError):
InputLayer(shape=(None, (- 1), (- 1)))
with pytest.raises(ValueError):
InputLayer(shape=(None, 0, 0))
InputLayer(shape=(None, 1, 1))
|
class TestDropoutLayer():
@pytest.fixture(params=[(100, 100), (None, 100)])
def input_layer(self, request):
from lasagne.layers.input import InputLayer
return InputLayer(request.param)
@pytest.fixture
def layer(self, input_layer):
from lasagne.layers.noise import DropoutLayer
return DropoutLayer(input_layer)
@pytest.fixture
def layer_no_rescale(self, input_layer):
from lasagne.layers.noise import DropoutLayer
return DropoutLayer(input_layer, rescale=False)
@pytest.fixture
def layer_p_02(self, input_layer):
from lasagne.layers.noise import DropoutLayer
return DropoutLayer(input_layer, p=0.2)
def test_get_output_for_non_deterministic(self, layer):
input = theano.shared(numpy.ones((100, 100)))
result = layer.get_output_for(input)
result_eval = result.eval()
assert (0.9 < result_eval.mean() < 1.1)
assert (numpy.unique(result_eval) == [0.0, 2.0]).all()
def test_get_output_for_deterministic(self, layer):
input = theano.shared(numpy.ones((100, 100)))
result = layer.get_output_for(input, deterministic=True)
result_eval = result.eval()
assert (result_eval == input.get_value()).all()
def test_get_output_for_no_rescale(self, layer_no_rescale):
input = theano.shared(numpy.ones((100, 100)))
result = layer_no_rescale.get_output_for(input)
result_eval = result.eval()
assert (0.4 < result_eval.mean() < 0.6)
assert (numpy.unique(result_eval) == [0.0, 1.0]).all()
def test_get_output_for_p_02(self, layer_p_02):
input = theano.shared(numpy.ones((100, 100)))
result = layer_p_02.get_output_for(input)
result_eval = result.eval()
assert (0.9 < result_eval.mean() < 1.1)
assert (numpy.round(numpy.unique(result_eval), 2) == [0.0, 1.25]).all()
def test_specified_rng(self, input_layer):
from lasagne.layers.noise import DropoutLayer
input = theano.shared(numpy.ones((100, 100)))
seed = 123456789
rng = get_rng()
set_rng(RandomState(seed))
result = DropoutLayer(input_layer).get_output_for(input)
result_eval1 = result.eval()
set_rng(RandomState(seed))
result = DropoutLayer(input_layer).get_output_for(input)
result_eval2 = result.eval()
set_rng(rng)
assert numpy.allclose(result_eval1, result_eval2)
|
class TestGaussianNoiseLayer():
@pytest.fixture
def layer(self):
from lasagne.layers.noise import GaussianNoiseLayer
return GaussianNoiseLayer(Mock(output_shape=(None,)))
@pytest.fixture(params=[(100, 100), (None, 100)])
def input_layer(self, request):
from lasagne.layers.input import InputLayer
return InputLayer(request.param)
def test_get_output_for_non_deterministic(self, layer):
input = theano.shared(numpy.ones((100, 100)))
result = layer.get_output_for(input, deterministic=False)
result_eval = result.eval()
assert (result_eval != input.eval()).all()
assert (result_eval.mean() != 1.0)
assert (numpy.round(result_eval.mean()) == 1.0)
def test_get_output_for_deterministic(self, layer):
input = theano.shared(numpy.ones((3, 3)))
result = layer.get_output_for(input, deterministic=True)
result_eval = result.eval()
assert (result_eval == input.eval()).all()
def test_specified_rng(self, input_layer):
from lasagne.layers.noise import GaussianNoiseLayer
input = theano.shared(numpy.ones((100, 100)))
seed = 123456789
rng = get_rng()
set_rng(RandomState(seed))
result = GaussianNoiseLayer(input_layer).get_output_for(input)
result_eval1 = result.eval()
set_rng(RandomState(seed))
result = GaussianNoiseLayer(input_layer).get_output_for(input)
result_eval2 = result.eval()
set_rng(rng)
assert numpy.allclose(result_eval1, result_eval2)
|
def _example_modules():
paths = glob(join(EXAMPLES_DIR, '*py'))
return [splitext(basename(path))[0] for path in paths]
|
@pytest.fixture
def example(request):
sys.path.insert(0, EXAMPLES_DIR)
request.addfinalizer((lambda : sys.path.remove(EXAMPLES_DIR)))
|
@pytest.mark.slow
@pytest.mark.parametrize('module_name', _example_modules())
def test_example(example, module_name):
try:
main = getattr(import_module(module_name), 'main')
except ImportError as e:
skip_exceptions = ['requires a GPU', 'pylearn2', 'dnn not available']
if any([(text in str(e)) for text in skip_exceptions]):
pytest.skip(e)
else:
raise
main(num_epochs=1)
|
def test_initializer_sample():
from lasagne.init import Initializer
with pytest.raises(NotImplementedError):
Initializer().sample((100, 100))
|
def test_shape():
from lasagne.init import Initializer
for klass in Initializer.__subclasses__():
if len(klass.__subclasses__()):
for sub_klass in klass.__subclasses__():
assert (sub_klass().sample((12, 23)).shape == (12, 23))
else:
assert (klass().sample((12, 23)).shape == (12, 23))
|
def test_specified_rng():
from lasagne.random import get_rng, set_rng
from lasagne.init import Normal, Uniform, GlorotNormal, GlorotUniform, Sparse, Orthogonal
from numpy.random import RandomState
from numpy import allclose
seed = 123456789
rng = get_rng()
for init_class in [Normal, Uniform, GlorotNormal, GlorotUniform, Sparse, Orthogonal]:
set_rng(RandomState(seed))
sample1 = init_class().sample((100, 100))
set_rng(RandomState(seed))
sample2 = init_class().sample((100, 100))
set_rng(rng)
assert allclose(sample1, sample2), 'random initialization was inconsistent for {}'.format(init_class.__name__)
|
def test_normal():
from lasagne.init import Normal
sample = Normal().sample((100, 200))
assert ((- 0.001) < sample.mean() < 0.001)
assert (0.009 < sample.std() < 0.011)
|
def test_uniform_range_as_number():
from lasagne.init import Uniform
sample = Uniform(1.0).sample((300, 400))
assert (sample.shape == (300, 400))
assert ((- 1.0) <= sample.min() < (- 0.9))
assert (0.9 < sample.max() <= 1.0)
|
def test_uniform_range_as_range():
from lasagne.init import Uniform
sample = Uniform((0.0, 1.0)).sample((300, 400))
assert (sample.shape == (300, 400))
assert (0.0 <= sample.min() < 0.1)
assert (0.9 < sample.max() <= 1.0)
|
def test_uniform_mean_std():
from lasagne.init import Uniform
sample = Uniform(std=1.0, mean=5.0).sample((300, 400))
assert (4.9 < sample.mean() < 5.1)
assert (0.9 < sample.std() < 1.1)
|
def test_glorot_normal():
from lasagne.init import GlorotNormal
sample = GlorotNormal().sample((100, 100))
assert ((- 0.01) < sample.mean() < 0.01)
assert (0.09 < sample.std() < 0.11)
|
def test_glorot_1d_not_supported():
from lasagne.init import GlorotNormal
with pytest.raises(RuntimeError):
GlorotNormal().sample((100,))
|
def test_glorot_normal_receptive_field():
from lasagne.init import GlorotNormal
sample = GlorotNormal().sample((50, 50, 2))
assert ((- 0.01) < sample.mean() < 0.01)
assert (0.09 < sample.std() < 0.11)
|
def test_glorot_normal_gain():
from lasagne.init import GlorotNormal
sample = GlorotNormal(gain=10.0).sample((100, 100))
assert ((- 0.1) < sample.mean() < 0.1)
assert (0.9 < sample.std() < 1.1)
sample = GlorotNormal(gain='relu').sample((100, 100))
assert ((- 0.01) < sample.mean() < 0.01)
assert (0.132 < sample.std() < 0.152)
|
def test_glorot_normal_c01b():
from lasagne.init import GlorotNormal
sample = GlorotNormal(c01b=True).sample((25, 2, 2, 25))
assert ((- 0.01) < sample.mean() < 0.01)
assert (0.09 < sample.std() < 0.11)
|
def test_glorot_normal_c01b_4d_only():
from lasagne.init import GlorotNormal
with pytest.raises(RuntimeError):
GlorotNormal(c01b=True).sample((100,))
with pytest.raises(RuntimeError):
GlorotNormal(c01b=True).sample((100, 100))
with pytest.raises(RuntimeError):
GlorotNormal(c01b=True).sample((100, 100, 100))
|
def test_glorot_uniform():
from lasagne.init import GlorotUniform
sample = GlorotUniform().sample((150, 450))
assert ((- 0.1) <= sample.min() < (- 0.09))
assert (0.09 < sample.max() <= 0.1)
|
def test_glorot_uniform_receptive_field():
from lasagne.init import GlorotUniform
sample = GlorotUniform().sample((150, 150, 2))
assert ((- 0.1) <= sample.min() < (- 0.09))
assert (0.09 < sample.max() <= 0.1)
|
def test_glorot_uniform_gain():
from lasagne.init import GlorotUniform
sample = GlorotUniform(gain=10.0).sample((150, 450))
assert ((- 1.0) <= sample.min() < (- 0.9))
assert (0.9 < sample.max() <= 1.0)
sample = GlorotUniform(gain='relu').sample((100, 100))
assert ((- 0.01) < sample.mean() < 0.01)
assert (0.132 < sample.std() < 0.152)
|
def test_glorot_uniform_c01b():
from lasagne.init import GlorotUniform
sample = GlorotUniform(c01b=True).sample((75, 2, 2, 75))
assert ((- 0.1) <= sample.min() < (- 0.09))
assert (0.09 < sample.max() <= 0.1)
|
def test_glorot_uniform_c01b_4d_only():
from lasagne.init import GlorotUniform
with pytest.raises(RuntimeError):
GlorotUniform(c01b=True).sample((100,))
with pytest.raises(RuntimeError):
GlorotUniform(c01b=True).sample((100, 100))
with pytest.raises(RuntimeError):
GlorotUniform(c01b=True).sample((100, 100, 100))
|
def test_he_normal():
from lasagne.init import HeNormal
sample = HeNormal().sample((100, 100))
assert ((- 0.01) < sample.mean() < 0.01)
assert (0.09 < sample.std() < 0.11)
|
def test_he_1d_not_supported():
from lasagne.init import HeNormal
with pytest.raises(RuntimeError):
HeNormal().sample((100,))
|
def test_he_normal_receptive_field():
from lasagne.init import HeNormal
sample = HeNormal().sample((50, 50, 2))
assert ((- 0.01) < sample.mean() < 0.01)
assert (0.09 < sample.std() < 0.11)
|
def test_he_normal_gain():
from lasagne.init import HeNormal
sample = HeNormal(gain=10.0).sample((100, 100))
assert ((- 0.1) < sample.mean() < 0.1)
assert (0.9 < sample.std() < 1.1)
sample = HeNormal(gain='relu').sample((200, 50))
assert ((- 0.1) < sample.mean() < 0.1)
assert (0.07 < sample.std() < 0.12)
|
def test_he_normal_c01b():
from lasagne.init import HeNormal
sample = HeNormal(c01b=True).sample((25, 2, 2, 25))
assert ((- 0.01) < sample.mean() < 0.01)
assert (0.09 < sample.std() < 0.11)
|
def test_he_normal_c01b_4d_only():
from lasagne.init import HeNormal
with pytest.raises(RuntimeError):
HeNormal(c01b=True).sample((100,))
with pytest.raises(RuntimeError):
HeNormal(c01b=True).sample((100, 100))
with pytest.raises(RuntimeError):
HeNormal(c01b=True).sample((100, 100, 100))
|
def test_he_uniform():
from lasagne.init import HeUniform
sample = HeUniform().sample((300, 200))
assert ((- 0.1) <= sample.min() < (- 0.09))
assert (0.09 < sample.max() <= 0.1)
|
def test_he_uniform_receptive_field():
from lasagne.init import HeUniform
sample = HeUniform().sample((150, 150, 2))
assert ((- 0.1) <= sample.min() < (- 0.09))
assert (0.09 < sample.max() <= 0.1)
|
def test_he_uniform_gain():
from lasagne.init import HeUniform
sample = HeUniform(gain=10.0).sample((300, 200))
assert ((- 1.0) <= sample.min() < (- 0.9))
assert (0.9 < sample.max() <= 1.0)
sample = HeUniform(gain='relu').sample((100, 100))
assert ((- 0.1) < sample.mean() < 0.1)
assert (0.1 < sample.std() < 0.2)
|
def test_he_uniform_c01b():
from lasagne.init import HeUniform
sample = HeUniform(c01b=True).sample((75, 2, 2, 75))
assert ((- 0.1) <= sample.min() < (- 0.09))
assert (0.09 < sample.max() <= 0.1)
|
def test_he_uniform_c01b_4d_only():
from lasagne.init import HeUniform
with pytest.raises(RuntimeError):
HeUniform(c01b=True).sample((100,))
with pytest.raises(RuntimeError):
HeUniform(c01b=True).sample((100, 100))
with pytest.raises(RuntimeError):
HeUniform(c01b=True).sample((100, 100, 100))
|
def test_constant():
from lasagne.init import Constant
sample = Constant(1.0).sample((10, 20))
assert (sample == 1.0).all()
|
def test_sparse():
from lasagne.init import Sparse
sample = Sparse(sparsity=0.1).sample((10, 20))
assert ((sample != 0.0).sum() == ((10 * 20) * 0.1))
|
def test_sparse_1d_not_supported():
from lasagne.init import Sparse
with pytest.raises(RuntimeError):
Sparse().sample((100,))
|
def test_orthogonal():
import numpy as np
from lasagne.init import Orthogonal
sample = Orthogonal().sample((100, 200))
assert np.allclose(np.dot(sample, sample.T), np.eye(100), atol=1e-06)
sample = Orthogonal().sample((200, 100))
assert np.allclose(np.dot(sample.T, sample), np.eye(100), atol=1e-06)
|
def test_orthogonal_gain():
import numpy as np
from lasagne.init import Orthogonal
gain = 2
sample = Orthogonal(gain).sample((100, 200))
assert np.allclose(np.dot(sample, sample.T), ((gain * gain) * np.eye(100)), atol=1e-06)
gain = np.sqrt(2)
sample = Orthogonal('relu').sample((100, 200))
assert np.allclose(np.dot(sample, sample.T), ((gain * gain) * np.eye(100)), atol=1e-06)
|
def test_orthogonal_multi():
import numpy as np
from lasagne.init import Orthogonal
sample = Orthogonal().sample((100, 50, 80))
sample = sample.reshape(100, (50 * 80))
assert np.allclose(np.dot(sample, sample.T), np.eye(100), atol=1e-06)
|
def test_orthogonal_1d_not_supported():
from lasagne.init import Orthogonal
with pytest.raises(RuntimeError):
Orthogonal().sample((100,))
|
class TestNonlinearities(object):
def linear(self, x):
return x
def rectify(self, x):
return (x * (x > 0))
def leaky_rectify(self, x):
return ((x * (x > 0)) + ((0.01 * x) * (x < 0)))
def leaky_rectify_0(self, x):
return self.rectify(x)
def elu(self, x, alpha=1):
return np.where((x > 0), x, (alpha * (np.exp(x) - 1)))
def softplus(self, x):
return np.log1p(np.exp(x))
def sigmoid(self, x):
return (1 / (1 + np.exp((- x))))
def tanh(self, x):
return np.tanh(x)
def scaled_tanh(self, x):
return np.tanh(x)
def scaled_tanh_p(self, x):
return (2.27 * np.tanh((0.5 * x)))
def softmax(self, x):
return (np.exp(x).T / np.exp(x).sum((- 1))).T
@pytest.mark.parametrize('nonlinearity', ['linear', 'rectify', 'leaky_rectify', 'elu', 'sigmoid', 'tanh', 'scaled_tanh', 'softmax', 'leaky_rectify_0', 'scaled_tanh_p', 'softplus'])
def test_nonlinearity(self, nonlinearity):
import lasagne.nonlinearities
if (nonlinearity == 'leaky_rectify_0'):
from lasagne.nonlinearities import LeakyRectify
theano_nonlinearity = LeakyRectify(leakiness=0)
elif (nonlinearity == 'scaled_tanh'):
from lasagne.nonlinearities import ScaledTanH
theano_nonlinearity = ScaledTanH()
elif (nonlinearity == 'scaled_tanh_p'):
from lasagne.nonlinearities import ScaledTanH
theano_nonlinearity = ScaledTanH(scale_in=0.5, scale_out=2.27)
else:
theano_nonlinearity = getattr(lasagne.nonlinearities, nonlinearity)
np_nonlinearity = getattr(self, nonlinearity)
X = T.matrix()
X0 = lasagne.utils.floatX(np.random.uniform((- 3), 3, (10, 10)))
theano_result = theano_nonlinearity(X).eval({X: X0})
np_result = np_nonlinearity(X0)
assert np.allclose(theano_result, np_result)
|
class TestRegularizationPenalties(object):
def l1(self, x):
return np.abs(x).sum()
def l2(self, x):
return (x ** 2).sum()
@pytest.mark.parametrize('penalty', ['l1', 'l2'])
def test_penalty(self, penalty):
np_penalty = getattr(self, penalty)
theano_penalty = getattr(lasagne.regularization, penalty)
X = T.matrix()
X0 = lasagne.utils.floatX(np.random.uniform((- 3), 3, (10, 10)))
theano_result = theano_penalty(X).eval({X: X0})
np_result = np_penalty(X0)
assert np.allclose(theano_result, np_result)
|
class TestRegularizationHelpers(object):
@pytest.fixture
def layers(self):
l_1 = lasagne.layers.InputLayer((10,))
l_2 = lasagne.layers.DenseLayer(l_1, num_units=20)
l_3 = lasagne.layers.DenseLayer(l_2, num_units=30)
return (l_1, l_2, l_3)
def test_apply_penalty(self):
from lasagne.regularization import apply_penalty, l2
A = T.vector()
B = T.matrix()
assert (apply_penalty([], l2) == 0)
assert equal_computations([apply_penalty(A, l2)], [l2(A)])
assert equal_computations([apply_penalty([A, B], l2)], [sum([l2(A), l2(B)])])
def test_regularize_layer_params_single_layer(self, layers):
from lasagne.regularization import regularize_layer_params
(l_1, l_2, l_3) = layers
penalty = Mock(return_value=0)
loss = regularize_layer_params(l_2, penalty)
assert (penalty.call_count == 1)
penalty.assert_any_call(l_2.W)
def test_regularize_layer_params_multiple_layers(self, layers):
from lasagne.regularization import regularize_layer_params
(l_1, l_2, l_3) = layers
penalty = Mock(return_value=0)
loss = regularize_layer_params([l_1, l_2, l_3], penalty)
assert (penalty.call_count == 2)
penalty.assert_any_call(l_2.W)
penalty.assert_any_call(l_3.W)
def test_regularize_network_params(self, layers):
from lasagne.regularization import regularize_network_params
(l_1, l_2, l_3) = layers
penalty = Mock(return_value=0)
loss = regularize_network_params(l_3, penalty)
assert (penalty.call_count == 2)
penalty.assert_any_call(l_2.W)
penalty.assert_any_call(l_3.W)
def test_regularize_layer_params_weighted(self, layers):
from lasagne.regularization import regularize_layer_params_weighted
from lasagne.regularization import apply_penalty, l2
(l_1, l_2, l_3) = layers
layers = OrderedDict()
layers[l_2] = 0.1
layers[l_3] = 0.5
loss = regularize_layer_params_weighted(layers, lasagne.regularization.l2)
assert equal_computations([loss], [sum([(0.1 * apply_penalty([l_2.W], l2)), (0.5 * apply_penalty([l_3.W], l2))])])
|
class TestUpdateFunctions(object):
torch_values = {'sgd': [0.81707280688755, 0.6648326359915, 0.5386151140949], 'momentum': [0.6848486952183, 0.44803321781003, 0.27431190123502], 'nesterov_momentum': [0.67466543592725, 0.44108468114241, 0.2769002108997], 'adagrad': [0.55373120047759, 0.55373120041518, 0.55373120039438], 'rmsprop': [0.83205403985348, 0.83205322744821, 0.83205295664444], 'adadelta': [0.95453237704725, 0.9545237471374, 0.95452214847397], 'adam': [0.90034972009036, 0.90034967993061, 0.90034966654402], 'adamax': [0.90211749000754, 0.90211748762402, 0.90211748682951]}
def f(self, X):
return ([0.1, 0.2, 0.3] * (X ** 2)).sum()
@pytest.mark.parametrize('method, kwargs', [['sgd', {'learning_rate': 0.1}], ['momentum', {'learning_rate': 0.1, 'momentum': 0.5}], ['nesterov_momentum', {'learning_rate': 0.1, 'momentum': 0.5}], ['adagrad', {'learning_rate': 0.1}], ['rmsprop', {'learning_rate': 0.01}], ['adadelta', {}], ['adam', {'learning_rate': 0.01}], ['adamax', {'learning_rate': 0.01}]])
def test_updates(self, method, kwargs):
A = theano.shared(lasagne.utils.floatX([1, 1, 1]))
B = theano.shared(lasagne.utils.floatX([1, 1, 1]))
update_func = getattr(lasagne.updates, method)
updates = update_func((self.f(A) + self.f(B)), [A, B], **kwargs)
do_update = theano.function([], [], updates=updates)
for _ in range(10):
do_update()
assert np.allclose(A.get_value(), B.get_value())
assert np.allclose(A.get_value(), self.torch_values[method])
|
def test_get_or_compute_grads():
from lasagne.updates import get_or_compute_grads
A = theano.shared(1)
B = theano.shared(1)
loss = (A + B)
grads = get_or_compute_grads(loss, [A, B])
assert (get_or_compute_grads(grads, [A, B]) is grads)
with pytest.raises(ValueError):
get_or_compute_grads(grads, [A])
C = T.scalar()
with pytest.raises(ValueError):
get_or_compute_grads((A + C), [A, C])
|
@pytest.mark.parametrize('ndim', [2, 3])
def test_norm_constraint(ndim):
import numpy as np
import theano
from lasagne.updates import norm_constraint
from lasagne.utils import compute_norms
max_norm = 0.01
param = theano.shared(np.random.randn(*((25,) * ndim)).astype(theano.config.floatX))
update = norm_constraint(param, max_norm)
apply_update = theano.function([], [], updates=[(param, update)])
apply_update()
assert (param.dtype == update.dtype)
assert (np.max(compute_norms(param.get_value())) <= (max_norm * (1 + PCT_TOLERANCE)))
|
def test_norm_constraint_norm_axes():
import numpy as np
import theano
from lasagne.updates import norm_constraint
from lasagne.utils import compute_norms
max_norm = 0.01
norm_axes = (0, 2)
param = theano.shared(np.random.randn(10, 20, 30, 40).astype(theano.config.floatX))
update = norm_constraint(param, max_norm, norm_axes=norm_axes)
apply_update = theano.function([], [], updates=[(param, update)])
apply_update()
assert (param.dtype == update.dtype)
assert (np.max(compute_norms(param.get_value(), norm_axes=norm_axes)) <= (max_norm * (1 + PCT_TOLERANCE)))
|
def test_norm_constraint_dim6_raises():
import numpy as np
import theano
from lasagne.updates import norm_constraint
max_norm = 0.01
param = theano.shared(np.random.randn(1, 2, 3, 4, 5, 6).astype(theano.config.floatX))
with pytest.raises(ValueError) as excinfo:
norm_constraint(param, max_norm)
assert ('Unsupported tensor dimensionality' in str(excinfo.value))
|
def test_total_norm_constraint():
import numpy as np
import theano
import theano.tensor as T
from lasagne.updates import total_norm_constraint
x1 = T.scalar()
x2 = T.matrix()
threshold = 5.0
tensors1 = total_norm_constraint([x1, x2], threshold, return_norm=False)
(tensors2, norm) = total_norm_constraint([x1, x2], threshold, return_norm=True)
f1 = theano.function([x1, x2], [tensors1[0], tensors1[1]])
f2 = theano.function([x1, x2], [tensors2[0], tensors2[1], norm])
x_test = np.arange((1 + 9), dtype='float32')
x1_test = x_test[(- 1)]
x2_test = x_test[:9].reshape((3, 3))
(x1_out1, x2_out1) = f1(x1_test, x2_test)
(x1_out2, x2_out2, norm) = f2(x1_test, x2_test)
np.testing.assert_array_almost_equal(x1_out1, x1_out2)
np.testing.assert_array_almost_equal(x2_out1, x2_out2)
x_out = ([float(x1_out1)] + list(x2_out1.flatten()))
np.testing.assert_array_almost_equal(np.linalg.norm(x_test), norm)
np.testing.assert_array_almost_equal(np.linalg.norm(x_out), threshold)
|
def test_shared_empty():
from lasagne.utils import shared_empty
X = shared_empty(3)
assert (np.zeros((1, 1, 1)) == X.eval()).all()
|
def test_as_theano_expression_fails():
from lasagne.utils import as_theano_expression
with pytest.raises(TypeError):
as_theano_expression({})
|
def test_collect_shared_vars():
from lasagne.utils import collect_shared_vars as collect
(x, y, z) = (theano.shared(0, name=n) for n in 'xyz')
assert (collect([x, y, z]) == [x, y, z])
assert (collect([x, y, x, y, y, z]) == [x, y, z])
assert (collect(((x + y) + z)) == [x, y, z])
assert (collect((x + (y + z))) == [x, y, z])
assert (collect([(x ** 2), ((y * z) * np.ones(10)), (x + T.matrix())]) == [x, y, z])
assert (collect([(T.matrix() + T.matrix()), T.log(T.matrix())]) == [])
|
def test_one_hot():
from lasagne.utils import one_hot
a = np.random.randint(0, 10, 20)
b = np.zeros((a.size, (a.max() + 1)))
b[(np.arange(a.size), a)] = 1
result = one_hot(a).eval()
assert (result == b).all()
|
def test_as_tuple_fails():
from lasagne.utils import as_tuple
with pytest.raises(ValueError):
as_tuple([1, 2, 3], 4)
with pytest.raises(TypeError):
as_tuple('asdf', 4, int)
|
def test_compute_norms():
from lasagne.utils import compute_norms
array = np.random.randn(10, 20, 30, 40).astype(theano.config.floatX)
norms = compute_norms(array)
assert (array.dtype == norms.dtype)
assert (norms.shape[0] == array.shape[0])
|
def test_compute_norms_axes():
from lasagne.utils import compute_norms
array = np.random.randn(10, 20, 30, 40).astype(theano.config.floatX)
norms = compute_norms(array, norm_axes=(0, 2))
assert (array.dtype == norms.dtype)
assert (norms.shape == (array.shape[1], array.shape[3]))
|
def test_compute_norms_ndim6_raises():
from lasagne.utils import compute_norms
array = np.random.randn(1, 2, 3, 4, 5, 6).astype(theano.config.floatX)
with pytest.raises(ValueError) as excinfo:
compute_norms(array)
assert ('Unsupported tensor dimensionality' in str(excinfo.value))
|
def test_create_param_bad_callable_raises():
from lasagne.utils import create_param
with pytest.raises(RuntimeError):
create_param((lambda x: {}), (1, 2, 3))
with pytest.raises(RuntimeError):
create_param((lambda x: np.array(1)), (1, 2, 3))
|
def test_create_param_bad_spec_raises():
from lasagne.utils import create_param
with pytest.raises(RuntimeError):
create_param({}, (1, 2, 3))
|
def test_create_param_accepts_iterable_shape():
from lasagne.utils import create_param
factory = np.empty
create_param(factory, [2, 3])
create_param(factory, (x for x in [2, 3]))
|
def test_create_param_numpy_bad_shape_raises_error():
from lasagne.utils import create_param
param = np.array([[1, 2, 3], [4, 5, 6]])
with pytest.raises(RuntimeError):
create_param(param, (3, 2))
|
def test_create_param_numpy_returns_shared():
from lasagne.utils import create_param
param = np.array([[1, 2, 3], [4, 5, 6]])
result = create_param(param, (2, 3))
assert (result.get_value() == param).all()
assert isinstance(result, type(theano.shared(param)))
assert (result.get_value() == param).all()
|
def test_create_param_shared_returns_same():
from lasagne.utils import create_param
param = theano.shared(np.array([[1, 2, 3], [4, 5, 6]]))
result = create_param(param, (2, 3))
assert (result is param)
|
def test_create_param_shared_bad_ndim_raises_error():
from lasagne.utils import create_param
param = theano.shared(np.array([[1, 2, 3], [4, 5, 6]]))
with pytest.raises(RuntimeError):
create_param(param, (2, 3, 4))
|
def test_create_param_callable_returns_return_value():
from lasagne.utils import create_param
array = np.array([[1, 2, 3], [4, 5, 6]])
factory = Mock()
factory.return_value = array
result = create_param(factory, (2, 3))
assert (result.get_value() == array).all()
factory.assert_called_with((2, 3))
|
def test_nonpositive_dims_raises_value_error():
from lasagne.utils import create_param
neg_shape = ((- 1), (- 1))
zero_shape = (0, 0)
pos_shape = (1, 1)
spec = np.empty
with pytest.raises(ValueError):
create_param(spec, neg_shape)
with pytest.raises(ValueError):
create_param(spec, zero_shape)
create_param(spec, pos_shape)
|
def test_unroll_scan():
from lasagne.utils import unroll_scan
k = 2
a = T.scalar('a')
result = unroll_scan(fn=(lambda step, prior_result, a: (prior_result * a)), sequences=T.arange(k), outputs_info=[1.0], non_sequences=[a], n_steps=k)
final_result = result[(- 1)]
power = theano.function(inputs=[a], outputs=final_result)
assert np.all((power(10) == [10, 100]))
b = T.scalar('b')
def mul_div(step, previous_mul, previous_div, mul, div):
return ((previous_mul * mul), (previous_div / div))
result = unroll_scan(fn=mul_div, sequences=T.arange(k), outputs_info=[1.0, 1.0], non_sequences=[a, b], n_steps=k)
power = theano.function(inputs=[a, b], outputs=result)
assert np.allclose(power(10, 10), [[10, 100], [0.1, 0.01]])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.