code stringlengths 17 6.64M |
|---|
def test_glorot_normal():
from lasagne.init import GlorotNormal
sample = GlorotNormal().sample((100, 100))
assert ((- 0.01) < sample.mean() < 0.01)
assert (0.09 < sample.std() < 0.11)
|
def test_glorot_1d_not_supported():
from lasagne.init import GlorotNormal
with pytest.raises(RuntimeError):
GlorotNormal().sample((100,))
|
def test_glorot_normal_receptive_field():
from lasagne.init import GlorotNormal
sample = GlorotNormal().sample((50, 50, 2))
assert ((- 0.01) < sample.mean() < 0.01)
assert (0.09 < sample.std() < 0.11)
|
def test_glorot_normal_gain():
from lasagne.init import GlorotNormal
sample = GlorotNormal(gain=10.0).sample((100, 100))
assert ((- 0.1) < sample.mean() < 0.1)
assert (0.9 < sample.std() < 1.1)
sample = GlorotNormal(gain='relu').sample((100, 100))
assert ((- 0.01) < sample.mean() < 0.01)
assert (0.132 < sample.std() < 0.152)
|
def test_glorot_normal_c01b():
from lasagne.init import GlorotNormal
sample = GlorotNormal(c01b=True).sample((25, 2, 2, 25))
assert ((- 0.01) < sample.mean() < 0.01)
assert (0.09 < sample.std() < 0.11)
|
def test_glorot_normal_c01b_4d_only():
from lasagne.init import GlorotNormal
with pytest.raises(RuntimeError):
GlorotNormal(c01b=True).sample((100,))
with pytest.raises(RuntimeError):
GlorotNormal(c01b=True).sample((100, 100))
with pytest.raises(RuntimeError):
GlorotNormal(c01b=True).sample((100, 100, 100))
|
def test_glorot_uniform():
from lasagne.init import GlorotUniform
sample = GlorotUniform().sample((150, 450))
assert ((- 0.1) <= sample.min() < (- 0.09))
assert (0.09 < sample.max() <= 0.1)
|
def test_glorot_uniform_receptive_field():
from lasagne.init import GlorotUniform
sample = GlorotUniform().sample((150, 150, 2))
assert ((- 0.1) <= sample.min() < (- 0.09))
assert (0.09 < sample.max() <= 0.1)
|
def test_glorot_uniform_gain():
from lasagne.init import GlorotUniform
sample = GlorotUniform(gain=10.0).sample((150, 450))
assert ((- 1.0) <= sample.min() < (- 0.9))
assert (0.9 < sample.max() <= 1.0)
sample = GlorotUniform(gain='relu').sample((100, 100))
assert ((- 0.01) < sample.mean() < 0.01)
assert (0.132 < sample.std() < 0.152)
|
def test_glorot_uniform_c01b():
from lasagne.init import GlorotUniform
sample = GlorotUniform(c01b=True).sample((75, 2, 2, 75))
assert ((- 0.1) <= sample.min() < (- 0.09))
assert (0.09 < sample.max() <= 0.1)
|
def test_glorot_uniform_c01b_4d_only():
from lasagne.init import GlorotUniform
with pytest.raises(RuntimeError):
GlorotUniform(c01b=True).sample((100,))
with pytest.raises(RuntimeError):
GlorotUniform(c01b=True).sample((100, 100))
with pytest.raises(RuntimeError):
GlorotUniform(c01b=True).sample((100, 100, 100))
|
def test_he_normal():
from lasagne.init import HeNormal
sample = HeNormal().sample((100, 100))
assert ((- 0.01) < sample.mean() < 0.01)
assert (0.09 < sample.std() < 0.11)
|
def test_he_1d_not_supported():
from lasagne.init import HeNormal
with pytest.raises(RuntimeError):
HeNormal().sample((100,))
|
def test_he_normal_receptive_field():
from lasagne.init import HeNormal
sample = HeNormal().sample((50, 50, 2))
assert ((- 0.01) < sample.mean() < 0.01)
assert (0.09 < sample.std() < 0.11)
|
def test_he_normal_gain():
from lasagne.init import HeNormal
sample = HeNormal(gain=10.0).sample((100, 100))
assert ((- 0.1) < sample.mean() < 0.1)
assert (0.9 < sample.std() < 1.1)
sample = HeNormal(gain='relu').sample((200, 50))
assert ((- 0.1) < sample.mean() < 0.1)
assert (0.07 < sample.std() < 0.12)
|
def test_he_normal_c01b():
from lasagne.init import HeNormal
sample = HeNormal(c01b=True).sample((25, 2, 2, 25))
assert ((- 0.01) < sample.mean() < 0.01)
assert (0.09 < sample.std() < 0.11)
|
def test_he_normal_c01b_4d_only():
from lasagne.init import HeNormal
with pytest.raises(RuntimeError):
HeNormal(c01b=True).sample((100,))
with pytest.raises(RuntimeError):
HeNormal(c01b=True).sample((100, 100))
with pytest.raises(RuntimeError):
HeNormal(c01b=True).sample((100, 100, 100))
|
def test_he_uniform():
from lasagne.init import HeUniform
sample = HeUniform().sample((300, 200))
assert ((- 0.1) <= sample.min() < (- 0.09))
assert (0.09 < sample.max() <= 0.1)
|
def test_he_uniform_receptive_field():
from lasagne.init import HeUniform
sample = HeUniform().sample((150, 150, 2))
assert ((- 0.1) <= sample.min() < (- 0.09))
assert (0.09 < sample.max() <= 0.1)
|
def test_he_uniform_gain():
from lasagne.init import HeUniform
sample = HeUniform(gain=10.0).sample((300, 200))
assert ((- 1.0) <= sample.min() < (- 0.9))
assert (0.9 < sample.max() <= 1.0)
sample = HeUniform(gain='relu').sample((100, 100))
assert ((- 0.1) < sample.mean() < 0.1)
assert (0.1 < sample.std() < 0.2)
|
def test_he_uniform_c01b():
from lasagne.init import HeUniform
sample = HeUniform(c01b=True).sample((75, 2, 2, 75))
assert ((- 0.1) <= sample.min() < (- 0.09))
assert (0.09 < sample.max() <= 0.1)
|
def test_he_uniform_c01b_4d_only():
from lasagne.init import HeUniform
with pytest.raises(RuntimeError):
HeUniform(c01b=True).sample((100,))
with pytest.raises(RuntimeError):
HeUniform(c01b=True).sample((100, 100))
with pytest.raises(RuntimeError):
HeUniform(c01b=True).sample((100, 100, 100))
|
def test_constant():
from lasagne.init import Constant
sample = Constant(1.0).sample((10, 20))
assert (sample == 1.0).all()
|
def test_sparse():
from lasagne.init import Sparse
sample = Sparse(sparsity=0.1).sample((10, 20))
assert ((sample != 0.0).sum() == ((10 * 20) * 0.1))
|
def test_sparse_1d_not_supported():
from lasagne.init import Sparse
with pytest.raises(RuntimeError):
Sparse().sample((100,))
|
def test_orthogonal():
import numpy as np
from lasagne.init import Orthogonal
sample = Orthogonal().sample((100, 200))
assert np.allclose(np.dot(sample, sample.T), np.eye(100), atol=1e-06)
sample = Orthogonal().sample((200, 100))
assert np.allclose(np.dot(sample.T, sample), np.eye(100), atol=1e-06)
|
def test_orthogonal_gain():
import numpy as np
from lasagne.init import Orthogonal
gain = 2
sample = Orthogonal(gain).sample((100, 200))
assert np.allclose(np.dot(sample, sample.T), ((gain * gain) * np.eye(100)), atol=1e-06)
gain = np.sqrt(2)
sample = Orthogonal('relu').sample((100, 200))
assert np.allclose(np.dot(sample, sample.T), ((gain * gain) * np.eye(100)), atol=1e-06)
|
def test_orthogonal_multi():
import numpy as np
from lasagne.init import Orthogonal
sample = Orthogonal().sample((100, 50, 80))
sample = sample.reshape(100, (50 * 80))
assert np.allclose(np.dot(sample, sample.T), np.eye(100), atol=1e-06)
|
def test_orthogonal_1d_not_supported():
from lasagne.init import Orthogonal
with pytest.raises(RuntimeError):
Orthogonal().sample((100,))
|
class TestNonlinearities(object):
def linear(self, x):
return x
def rectify(self, x):
return (x * (x > 0))
def leaky_rectify(self, x):
return ((x * (x > 0)) + ((0.01 * x) * (x < 0)))
def leaky_rectify_0(self, x):
return self.rectify(x)
def elu(self, x, alpha=1):
return np.where((x > 0), x, (alpha * (np.exp(x) - 1)))
def softplus(self, x):
return np.log1p(np.exp(x))
def sigmoid(self, x):
return (1 / (1 + np.exp((- x))))
def tanh(self, x):
return np.tanh(x)
def scaled_tanh(self, x):
return np.tanh(x)
def scaled_tanh_p(self, x):
return (2.27 * np.tanh((0.5 * x)))
def softmax(self, x):
return (np.exp(x).T / np.exp(x).sum((- 1))).T
@pytest.mark.parametrize('nonlinearity', ['linear', 'rectify', 'leaky_rectify', 'elu', 'sigmoid', 'tanh', 'scaled_tanh', 'softmax', 'leaky_rectify_0', 'scaled_tanh_p', 'softplus'])
def test_nonlinearity(self, nonlinearity):
import lasagne.nonlinearities
if (nonlinearity == 'leaky_rectify_0'):
from lasagne.nonlinearities import LeakyRectify
theano_nonlinearity = LeakyRectify(leakiness=0)
elif (nonlinearity == 'scaled_tanh'):
from lasagne.nonlinearities import ScaledTanH
theano_nonlinearity = ScaledTanH()
elif (nonlinearity == 'scaled_tanh_p'):
from lasagne.nonlinearities import ScaledTanH
theano_nonlinearity = ScaledTanH(scale_in=0.5, scale_out=2.27)
else:
theano_nonlinearity = getattr(lasagne.nonlinearities, nonlinearity)
np_nonlinearity = getattr(self, nonlinearity)
X = T.matrix()
X0 = lasagne.utils.floatX(np.random.uniform((- 3), 3, (10, 10)))
theano_result = theano_nonlinearity(X).eval({X: X0})
np_result = np_nonlinearity(X0)
assert np.allclose(theano_result, np_result)
|
class TestRegularizationPenalties(object):
def l1(self, x):
return np.abs(x).sum()
def l2(self, x):
return (x ** 2).sum()
@pytest.mark.parametrize('penalty', ['l1', 'l2'])
def test_penalty(self, penalty):
np_penalty = getattr(self, penalty)
theano_penalty = getattr(lasagne.regularization, penalty)
X = T.matrix()
X0 = lasagne.utils.floatX(np.random.uniform((- 3), 3, (10, 10)))
theano_result = theano_penalty(X).eval({X: X0})
np_result = np_penalty(X0)
assert np.allclose(theano_result, np_result)
|
class TestRegularizationHelpers(object):
@pytest.fixture
def layers(self):
l_1 = lasagne.layers.InputLayer((10,))
l_2 = lasagne.layers.DenseLayer(l_1, num_units=20)
l_3 = lasagne.layers.DenseLayer(l_2, num_units=30)
return (l_1, l_2, l_3)
def test_apply_penalty(self):
from lasagne.regularization import apply_penalty, l2
A = T.vector()
B = T.matrix()
assert (apply_penalty([], l2) == 0)
assert equal_computations([apply_penalty(A, l2)], [l2(A)])
assert equal_computations([apply_penalty([A, B], l2)], [sum([l2(A), l2(B)])])
def test_regularize_layer_params_single_layer(self, layers):
from lasagne.regularization import regularize_layer_params
(l_1, l_2, l_3) = layers
penalty = Mock(return_value=0)
loss = regularize_layer_params(l_2, penalty)
assert (penalty.call_count == 1)
penalty.assert_any_call(l_2.W)
def test_regularize_layer_params_multiple_layers(self, layers):
from lasagne.regularization import regularize_layer_params
(l_1, l_2, l_3) = layers
penalty = Mock(return_value=0)
loss = regularize_layer_params([l_1, l_2, l_3], penalty)
assert (penalty.call_count == 2)
penalty.assert_any_call(l_2.W)
penalty.assert_any_call(l_3.W)
def test_regularize_network_params(self, layers):
from lasagne.regularization import regularize_network_params
(l_1, l_2, l_3) = layers
penalty = Mock(return_value=0)
loss = regularize_network_params(l_3, penalty)
assert (penalty.call_count == 2)
penalty.assert_any_call(l_2.W)
penalty.assert_any_call(l_3.W)
def test_regularize_layer_params_weighted(self, layers):
from lasagne.regularization import regularize_layer_params_weighted
from lasagne.regularization import apply_penalty, l2
(l_1, l_2, l_3) = layers
layers = OrderedDict()
layers[l_2] = 0.1
layers[l_3] = 0.5
loss = regularize_layer_params_weighted(layers, lasagne.regularization.l2)
assert equal_computations([loss], [sum([(0.1 * apply_penalty([l_2.W], l2)), (0.5 * apply_penalty([l_3.W], l2))])])
|
class TestUpdateFunctions(object):
torch_values = {'sgd': [0.81707280688755, 0.6648326359915, 0.5386151140949], 'momentum': [0.6848486952183, 0.44803321781003, 0.27431190123502], 'nesterov_momentum': [0.67466543592725, 0.44108468114241, 0.2769002108997], 'adagrad': [0.55373120047759, 0.55373120041518, 0.55373120039438], 'rmsprop': [0.83205403985348, 0.83205322744821, 0.83205295664444], 'adadelta': [0.95453237704725, 0.9545237471374, 0.95452214847397], 'adam': [0.90034972009036, 0.90034967993061, 0.90034966654402], 'adamax': [0.90211749000754, 0.90211748762402, 0.90211748682951]}
def f(self, X):
return ([0.1, 0.2, 0.3] * (X ** 2)).sum()
@pytest.mark.parametrize('method, kwargs', [['sgd', {'learning_rate': 0.1}], ['momentum', {'learning_rate': 0.1, 'momentum': 0.5}], ['nesterov_momentum', {'learning_rate': 0.1, 'momentum': 0.5}], ['adagrad', {'learning_rate': 0.1}], ['rmsprop', {'learning_rate': 0.01}], ['adadelta', {}], ['adam', {'learning_rate': 0.01}], ['adamax', {'learning_rate': 0.01}]])
def test_updates(self, method, kwargs):
A = theano.shared(lasagne.utils.floatX([1, 1, 1]))
B = theano.shared(lasagne.utils.floatX([1, 1, 1]))
update_func = getattr(lasagne.updates, method)
updates = update_func((self.f(A) + self.f(B)), [A, B], **kwargs)
do_update = theano.function([], [], updates=updates)
for _ in range(10):
do_update()
assert np.allclose(A.get_value(), B.get_value())
assert np.allclose(A.get_value(), self.torch_values[method])
|
def test_get_or_compute_grads():
from lasagne.updates import get_or_compute_grads
A = theano.shared(1)
B = theano.shared(1)
loss = (A + B)
grads = get_or_compute_grads(loss, [A, B])
assert (get_or_compute_grads(grads, [A, B]) is grads)
with pytest.raises(ValueError):
get_or_compute_grads(grads, [A])
C = T.scalar()
with pytest.raises(ValueError):
get_or_compute_grads((A + C), [A, C])
|
@pytest.mark.parametrize('ndim', [2, 3])
def test_norm_constraint(ndim):
import numpy as np
import theano
from lasagne.updates import norm_constraint
from lasagne.utils import compute_norms
max_norm = 0.01
param = theano.shared(np.random.randn(*((25,) * ndim)).astype(theano.config.floatX))
update = norm_constraint(param, max_norm)
apply_update = theano.function([], [], updates=[(param, update)])
apply_update()
assert (param.dtype == update.dtype)
assert (np.max(compute_norms(param.get_value())) <= (max_norm * (1 + PCT_TOLERANCE)))
|
def test_norm_constraint_norm_axes():
import numpy as np
import theano
from lasagne.updates import norm_constraint
from lasagne.utils import compute_norms
max_norm = 0.01
norm_axes = (0, 2)
param = theano.shared(np.random.randn(10, 20, 30, 40).astype(theano.config.floatX))
update = norm_constraint(param, max_norm, norm_axes=norm_axes)
apply_update = theano.function([], [], updates=[(param, update)])
apply_update()
assert (param.dtype == update.dtype)
assert (np.max(compute_norms(param.get_value(), norm_axes=norm_axes)) <= (max_norm * (1 + PCT_TOLERANCE)))
|
def test_norm_constraint_dim6_raises():
import numpy as np
import theano
from lasagne.updates import norm_constraint
max_norm = 0.01
param = theano.shared(np.random.randn(1, 2, 3, 4, 5, 6).astype(theano.config.floatX))
with pytest.raises(ValueError) as excinfo:
norm_constraint(param, max_norm)
assert ('Unsupported tensor dimensionality' in str(excinfo.value))
|
def test_total_norm_constraint():
import numpy as np
import theano
import theano.tensor as T
from lasagne.updates import total_norm_constraint
x1 = T.scalar()
x2 = T.matrix()
threshold = 5.0
tensors1 = total_norm_constraint([x1, x2], threshold, return_norm=False)
(tensors2, norm) = total_norm_constraint([x1, x2], threshold, return_norm=True)
f1 = theano.function([x1, x2], [tensors1[0], tensors1[1]])
f2 = theano.function([x1, x2], [tensors2[0], tensors2[1], norm])
x_test = np.arange((1 + 9), dtype='float32')
x1_test = x_test[(- 1)]
x2_test = x_test[:9].reshape((3, 3))
(x1_out1, x2_out1) = f1(x1_test, x2_test)
(x1_out2, x2_out2, norm) = f2(x1_test, x2_test)
np.testing.assert_array_almost_equal(x1_out1, x1_out2)
np.testing.assert_array_almost_equal(x2_out1, x2_out2)
x_out = ([float(x1_out1)] + list(x2_out1.flatten()))
np.testing.assert_array_almost_equal(np.linalg.norm(x_test), norm)
np.testing.assert_array_almost_equal(np.linalg.norm(x_out), threshold)
|
def test_shared_empty():
from lasagne.utils import shared_empty
X = shared_empty(3)
assert (np.zeros((1, 1, 1)) == X.eval()).all()
|
def test_as_theano_expression_fails():
from lasagne.utils import as_theano_expression
with pytest.raises(TypeError):
as_theano_expression({})
|
def test_collect_shared_vars():
from lasagne.utils import collect_shared_vars as collect
(x, y, z) = (theano.shared(0, name=n) for n in 'xyz')
assert (collect([x, y, z]) == [x, y, z])
assert (collect([x, y, x, y, y, z]) == [x, y, z])
assert (collect(((x + y) + z)) == [x, y, z])
assert (collect((x + (y + z))) == [x, y, z])
assert (collect([(x ** 2), ((y * z) * np.ones(10)), (x + T.matrix())]) == [x, y, z])
assert (collect([(T.matrix() + T.matrix()), T.log(T.matrix())]) == [])
|
def test_one_hot():
from lasagne.utils import one_hot
a = np.random.randint(0, 10, 20)
b = np.zeros((a.size, (a.max() + 1)))
b[(np.arange(a.size), a)] = 1
result = one_hot(a).eval()
assert (result == b).all()
|
def test_as_tuple_fails():
from lasagne.utils import as_tuple
with pytest.raises(ValueError):
as_tuple([1, 2, 3], 4)
with pytest.raises(TypeError):
as_tuple('asdf', 4, int)
|
def test_compute_norms():
from lasagne.utils import compute_norms
array = np.random.randn(10, 20, 30, 40).astype(theano.config.floatX)
norms = compute_norms(array)
assert (array.dtype == norms.dtype)
assert (norms.shape[0] == array.shape[0])
|
def test_compute_norms_axes():
from lasagne.utils import compute_norms
array = np.random.randn(10, 20, 30, 40).astype(theano.config.floatX)
norms = compute_norms(array, norm_axes=(0, 2))
assert (array.dtype == norms.dtype)
assert (norms.shape == (array.shape[1], array.shape[3]))
|
def test_compute_norms_ndim6_raises():
from lasagne.utils import compute_norms
array = np.random.randn(1, 2, 3, 4, 5, 6).astype(theano.config.floatX)
with pytest.raises(ValueError) as excinfo:
compute_norms(array)
assert ('Unsupported tensor dimensionality' in str(excinfo.value))
|
def test_create_param_bad_callable_raises():
from lasagne.utils import create_param
with pytest.raises(RuntimeError):
create_param((lambda x: {}), (1, 2, 3))
with pytest.raises(RuntimeError):
create_param((lambda x: np.array(1)), (1, 2, 3))
|
def test_create_param_bad_spec_raises():
from lasagne.utils import create_param
with pytest.raises(RuntimeError):
create_param({}, (1, 2, 3))
|
def test_create_param_accepts_iterable_shape():
from lasagne.utils import create_param
factory = np.empty
create_param(factory, [2, 3])
create_param(factory, (x for x in [2, 3]))
|
def test_create_param_numpy_bad_shape_raises_error():
from lasagne.utils import create_param
param = np.array([[1, 2, 3], [4, 5, 6]])
with pytest.raises(RuntimeError):
create_param(param, (3, 2))
|
def test_create_param_numpy_returns_shared():
from lasagne.utils import create_param
param = np.array([[1, 2, 3], [4, 5, 6]])
result = create_param(param, (2, 3))
assert (result.get_value() == param).all()
assert isinstance(result, type(theano.shared(param)))
assert (result.get_value() == param).all()
|
def test_create_param_shared_returns_same():
from lasagne.utils import create_param
param = theano.shared(np.array([[1, 2, 3], [4, 5, 6]]))
result = create_param(param, (2, 3))
assert (result is param)
|
def test_create_param_shared_bad_ndim_raises_error():
from lasagne.utils import create_param
param = theano.shared(np.array([[1, 2, 3], [4, 5, 6]]))
with pytest.raises(RuntimeError):
create_param(param, (2, 3, 4))
|
def test_create_param_callable_returns_return_value():
from lasagne.utils import create_param
array = np.array([[1, 2, 3], [4, 5, 6]])
factory = Mock()
factory.return_value = array
result = create_param(factory, (2, 3))
assert (result.get_value() == array).all()
factory.assert_called_with((2, 3))
|
def test_nonpositive_dims_raises_value_error():
from lasagne.utils import create_param
neg_shape = ((- 1), (- 1))
zero_shape = (0, 0)
pos_shape = (1, 1)
spec = np.empty
with pytest.raises(ValueError):
create_param(spec, neg_shape)
with pytest.raises(ValueError):
create_param(spec, zero_shape)
create_param(spec, pos_shape)
|
def test_unroll_scan():
from lasagne.utils import unroll_scan
k = 2
a = T.scalar('a')
result = unroll_scan(fn=(lambda step, prior_result, a: (prior_result * a)), sequences=T.arange(k), outputs_info=[1.0], non_sequences=[a], n_steps=k)
final_result = result[(- 1)]
power = theano.function(inputs=[a], outputs=final_result)
assert np.all((power(10) == [10, 100]))
b = T.scalar('b')
def mul_div(step, previous_mul, previous_div, mul, div):
return ((previous_mul * mul), (previous_div / div))
result = unroll_scan(fn=mul_div, sequences=T.arange(k), outputs_info=[1.0, 1.0], non_sequences=[a, b], n_steps=k)
power = theano.function(inputs=[a, b], outputs=result)
assert np.allclose(power(10, 10), [[10, 100], [0.1, 0.01]])
|
def pad(x, width, val=0, batch_ndim=1):
'\n Pad a tensor with a constant value.\n\n Parameters\n ----------\n x : tensor\n\n width : int, iterable of int, or iterable of tuple\n Padding width. If an int, pads each axis symmetrically with the same\n amount in the beginning and end. If an iterable of int, defines the\n symmetric padding width separately for each axis. If an iterable of\n tuples of two ints, defines a seperate padding width for each beginning\n and end of each axis.\n\n val : float\n The constant value used for padding\n\n batch_ndim : integer\n Dimensions before the value will not be padded.\n\n '
input_shape = x.shape
input_ndim = x.ndim
output_shape = list(input_shape)
indices = [slice(None) for _ in output_shape]
if isinstance(width, int):
widths = ([width] * (input_ndim - batch_ndim))
else:
widths = width
for (k, w) in enumerate(widths):
try:
(l, r) = w
except TypeError:
l = r = w
output_shape[(k + batch_ndim)] += (l + r)
indices[(k + batch_ndim)] = slice(l, (l + input_shape[(k + batch_ndim)]))
if val:
out = (T.ones(output_shape) * val)
else:
out = T.zeros(output_shape)
return T.set_subtensor(out[tuple(indices)], x)
|
def get_or_compute_grads(loss_or_grads, params):
'Helper function returning a list of gradients\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to return the gradients for\n\n Returns\n -------\n list of expressions\n If `loss_or_grads` is a list, it is assumed to be a list of\n gradients and returned as is, unless it does not match the length\n of `params`, in which case a `ValueError` is raised.\n Otherwise, `loss_or_grads` is assumed to be a cost expression and\n the function returns `theano.grad(loss_or_grads, params)`.\n\n Raises\n ------\n ValueError\n If `loss_or_grads` is a list of a different length than `params`, or if\n any element of `params` is not a shared variable (while we could still\n compute its gradient, we can never update it and want to fail early).\n '
if any(((not isinstance(p, theano.compile.SharedVariable)) for p in params)):
raise ValueError('params must contain shared variables only. If it contains arbitrary parameter expressions, then lasagne.utils.collect_shared_vars() may help you.')
if isinstance(loss_or_grads, list):
if (not (len(loss_or_grads) == len(params))):
raise ValueError(('Got %d gradient expressions for %d parameters' % (len(loss_or_grads), len(params))))
return loss_or_grads
else:
return theano.grad(loss_or_grads, params)
|
def sgd(loss_or_grads, params, learning_rate):
'Stochastic Gradient Descent (SGD) updates\n\n Generates update expressions of the form:\n\n * ``param := param - learning_rate * gradient``\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n '
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for (param, grad) in zip(params, grads):
updates[param] = (param - (learning_rate * grad))
return updates
|
def apply_momentum(updates, params=None, momentum=0.9):
'Returns a modified update dictionary including momentum\n\n Generates update expressions of the form:\n\n * ``velocity := momentum * velocity + updates[param] - param``\n * ``param := param + velocity``\n\n Parameters\n ----------\n updates : OrderedDict\n A dictionary mapping parameters to update expressions\n params : iterable of shared variables, optional\n The variables to apply momentum to. If omitted, will apply\n momentum to all `updates.keys()`.\n momentum : float or symbolic scalar, optional\n The amount of momentum to apply. Higher momentum results in\n smoothing over more update steps. Defaults to 0.9.\n\n Returns\n -------\n OrderedDict\n A copy of `updates` with momentum updates for all `params`.\n\n Notes\n -----\n Higher momentum also results in larger update steps. To counter that,\n you can optionally scale your learning rate by `1 - momentum`.\n\n See Also\n --------\n momentum : Shortcut applying momentum to SGD updates\n '
if (params is None):
params = updates.keys()
updates = OrderedDict(updates)
for param in params:
value = param.get_value(borrow=True)
velocity = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
x = ((momentum * velocity) + updates[param])
updates[velocity] = (x - param)
updates[param] = x
return updates
|
def momentum(loss_or_grads, params, learning_rate, momentum=0.9):
"Stochastic Gradient Descent (SGD) updates with momentum\n\n Generates update expressions of the form:\n\n * ``velocity := momentum * velocity - learning_rate * gradient``\n * ``param := param + velocity``\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n momentum : float or symbolic scalar, optional\n The amount of momentum to apply. Higher momentum results in\n smoothing over more update steps. Defaults to 0.9.\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n Notes\n -----\n Higher momentum also results in larger update steps. To counter that,\n you can optionally scale your learning rate by `1 - momentum`.\n\n See Also\n --------\n apply_momentum : Generic function applying momentum to updates\n nesterov_momentum : Nesterov's variant of SGD with momentum\n "
updates = sgd(loss_or_grads, params, learning_rate)
return apply_momentum(updates, momentum=momentum)
|
def apply_nesterov_momentum(updates, params=None, momentum=0.9):
'Returns a modified update dictionary including Nesterov momentum\n\n Generates update expressions of the form:\n\n * ``velocity := momentum * velocity + updates[param] - param``\n * ``param := param + momentum * velocity + updates[param] - param``\n\n Parameters\n ----------\n updates : OrderedDict\n A dictionary mapping parameters to update expressions\n params : iterable of shared variables, optional\n The variables to apply momentum to. If omitted, will apply\n momentum to all `updates.keys()`.\n momentum : float or symbolic scalar, optional\n The amount of momentum to apply. Higher momentum results in\n smoothing over more update steps. Defaults to 0.9.\n\n Returns\n -------\n OrderedDict\n A copy of `updates` with momentum updates for all `params`.\n\n Notes\n -----\n Higher momentum also results in larger update steps. To counter that,\n you can optionally scale your learning rate by `1 - momentum`.\n\n The classic formulation of Nesterov momentum (or Nesterov accelerated\n gradient) requires the gradient to be evaluated at the predicted next\n position in parameter space. Here, we use the formulation described at\n https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617,\n which allows the gradient to be evaluated at the current parameters.\n\n See Also\n --------\n nesterov_momentum : Shortcut applying Nesterov momentum to SGD updates\n '
if (params is None):
params = updates.keys()
updates = OrderedDict(updates)
for param in params:
value = param.get_value(borrow=True)
velocity = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
x = (((momentum * velocity) + updates[param]) - param)
updates[velocity] = x
updates[param] = ((momentum * x) + updates[param])
return updates
|
def nesterov_momentum(loss_or_grads, params, learning_rate, momentum=0.9):
'Stochastic Gradient Descent (SGD) updates with Nesterov momentum\n\n Generates update expressions of the form:\n\n * ``velocity := momentum * velocity - learning_rate * gradient``\n * ``param := param + momentum * velocity - learning_rate * gradient``\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n momentum : float or symbolic scalar, optional\n The amount of momentum to apply. Higher momentum results in\n smoothing over more update steps. Defaults to 0.9.\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n Notes\n -----\n Higher momentum also results in larger update steps. To counter that,\n you can optionally scale your learning rate by `1 - momentum`.\n\n The classic formulation of Nesterov momentum (or Nesterov accelerated\n gradient) requires the gradient to be evaluated at the predicted next\n position in parameter space. Here, we use the formulation described at\n https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617,\n which allows the gradient to be evaluated at the current parameters.\n\n See Also\n --------\n apply_nesterov_momentum : Function applying momentum to updates\n '
updates = sgd(loss_or_grads, params, learning_rate)
return apply_nesterov_momentum(updates, momentum=momentum)
|
def adagrad(loss_or_grads, params, learning_rate=1.0, epsilon=1e-06):
'Adagrad updates\n\n Scale learning rates by dividing with the square root of accumulated\n squared gradients. See [1]_ for further description.\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n epsilon : float or symbolic scalar\n Small value added for numerical stability\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n Notes\n -----\n Using step size eta Adagrad calculates the learning rate for feature i at\n time step t as:\n\n .. math:: \\eta_{t,i} = \\frac{\\eta}\n {\\sqrt{\\sum^t_{t^\\prime} g^2_{t^\\prime,i}+\\epsilon}} g_{t,i}\n\n as such the learning rate is monotonically decreasing.\n\n Epsilon is not included in the typical formula, see [2]_.\n\n References\n ----------\n .. [1] Duchi, J., Hazan, E., & Singer, Y. (2011):\n Adaptive subgradient methods for online learning and stochastic\n optimization. JMLR, 12:2121-2159.\n\n .. [2] Chris Dyer:\n Notes on AdaGrad. http://www.ark.cs.cmu.edu/cdyer/adagrad.pdf\n '
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for (param, grad) in zip(params, grads):
value = param.get_value(borrow=True)
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
accu_new = (accu + (grad ** 2))
updates[accu] = accu_new
updates[param] = (param - ((learning_rate * grad) / T.sqrt((accu_new + epsilon))))
return updates
|
def rmsprop(loss_or_grads, params, learning_rate=1.0, rho=0.9, epsilon=1e-06):
'RMSProp updates\n\n Scale learning rates by dividing with the moving average of the root mean\n squared (RMS) gradients. See [1]_ for further description.\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n rho : float or symbolic scalar\n Gradient moving average decay factor\n epsilon : float or symbolic scalar\n Small value added for numerical stability\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n Notes\n -----\n `rho` should be between 0 and 1. A value of `rho` close to 1 will decay the\n moving average slowly and a value close to 0 will decay the moving average\n fast.\n\n Using the step size :math:`\\eta` and a decay factor :math:`\\rho` the\n learning rate :math:`\\eta_t` is calculated as:\n\n .. math::\n r_t &= \\rho r_{t-1} + (1-\\rho)*g^2\\\\\n \\eta_t &= \\frac{\\eta}{\\sqrt{r_t + \\epsilon}}\n\n References\n ----------\n .. [1] Tieleman, T. and Hinton, G. (2012):\n Neural Networks for Machine Learning, Lecture 6.5 - rmsprop.\n Coursera. http://www.youtube.com/watch?v=O3sxAc4hxZU (formula @5:20)\n '
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for (param, grad) in zip(params, grads):
value = param.get_value(borrow=True)
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
accu_new = ((rho * accu) + ((1 - rho) * (grad ** 2)))
updates[accu] = accu_new
updates[param] = (param - ((learning_rate * grad) / T.sqrt((accu_new + epsilon))))
return updates
|
def adadelta(loss_or_grads, params, learning_rate=1.0, rho=0.95, epsilon=1e-06):
' Adadelta updates\n\n Scale learning rates by a the ratio of accumulated gradients to accumulated\n step sizes, see [1]_ and notes for further description.\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float or symbolic scalar\n The learning rate controlling the size of update steps\n rho : float or symbolic scalar\n Squared gradient moving average decay factor\n epsilon : float or symbolic scalar\n Small value added for numerical stability\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n Notes\n -----\n rho should be between 0 and 1. A value of rho close to 1 will decay the\n moving average slowly and a value close to 0 will decay the moving average\n fast.\n\n rho = 0.95 and epsilon=1e-6 are suggested in the paper and reported to\n work for multiple datasets (MNIST, speech).\n\n In the paper, no learning rate is considered (so learning_rate=1.0).\n Probably best to keep it at this value.\n epsilon is important for the very first update (so the numerator does\n not become 0).\n\n Using the step size eta and a decay factor rho the learning rate is\n calculated as:\n\n .. math::\n r_t &= \\rho r_{t-1} + (1-\\rho)*g^2\\\\\n \\eta_t &= \\eta \\frac{\\sqrt{s_{t-1} + \\epsilon}}\n {\\sqrt{r_t + \\epsilon}}\\\\\n s_t &= \\rho s_{t-1} + (1-\\rho)*(\\eta_t*g)^2\n\n References\n ----------\n .. [1] Zeiler, M. D. (2012):\n ADADELTA: An Adaptive Learning Rate Method.\n arXiv Preprint arXiv:1212.5701.\n '
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for (param, grad) in zip(params, grads):
value = param.get_value(borrow=True)
accu = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
delta_accu = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
accu_new = ((rho * accu) + ((1 - rho) * (grad ** 2)))
updates[accu] = accu_new
update = ((grad * T.sqrt((delta_accu + epsilon))) / T.sqrt((accu_new + epsilon)))
updates[param] = (param - (learning_rate * update))
delta_accu_new = ((rho * delta_accu) + ((1 - rho) * (update ** 2)))
updates[delta_accu] = delta_accu_new
return updates
|
def adam(loss_or_grads, params, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08):
'Adam updates\n\n Adam updates implemented as in [1]_.\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float\n Learning rate\n beta1 : float\n Exponential decay rate for the first moment estimates.\n beta2 : float\n Exponential decay rate for the second moment estimates.\n epsilon : float\n Constant for numerical stability.\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n Notes\n -----\n The paper [1]_ includes an additional hyperparameter lambda. This is only\n needed to prove convergence of the algorithm and has no practical use\n (personal communication with the authors), it is therefore omitted here.\n\n References\n ----------\n .. [1] Kingma, Diederik, and Jimmy Ba (2014):\n Adam: A Method for Stochastic Optimization.\n arXiv preprint arXiv:1412.6980.\n '
all_grads = get_or_compute_grads(loss_or_grads, params)
t_prev = theano.shared(utils.floatX(0.0))
updates = OrderedDict()
t = (t_prev + 1)
a_t = ((learning_rate * T.sqrt((1 - (beta2 ** t)))) / (1 - (beta1 ** t)))
for (param, g_t) in zip(params, all_grads):
value = param.get_value(borrow=True)
m_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
v_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
m_t = ((beta1 * m_prev) + ((1 - beta1) * g_t))
v_t = ((beta2 * v_prev) + ((1 - beta2) * (g_t ** 2)))
step = ((a_t * m_t) / (T.sqrt(v_t) + epsilon))
updates[m_prev] = m_t
updates[v_prev] = v_t
updates[param] = (param - step)
updates[t_prev] = t
return updates
|
def adamax(loss_or_grads, params, learning_rate=0.002, beta1=0.9, beta2=0.999, epsilon=1e-08):
'Adamax updates\n\n Adamax updates implemented as in [1]_. This is a variant of of the Adam\n algorithm based on the infinity norm.\n\n Parameters\n ----------\n loss_or_grads : symbolic expression or list of expressions\n A scalar loss expression, or a list of gradient expressions\n params : list of shared variables\n The variables to generate update expressions for\n learning_rate : float\n Learning rate\n beta1 : float\n Exponential decay rate for the first moment estimates.\n beta2 : float\n Exponential decay rate for the weighted infinity norm estimates.\n epsilon : float\n Constant for numerical stability.\n\n Returns\n -------\n OrderedDict\n A dictionary mapping each parameter to its update expression\n\n References\n ----------\n .. [1] Kingma, Diederik, and Jimmy Ba (2014):\n Adam: A Method for Stochastic Optimization.\n arXiv preprint arXiv:1412.6980.\n '
all_grads = get_or_compute_grads(loss_or_grads, params)
t_prev = theano.shared(utils.floatX(0.0))
updates = OrderedDict()
t = (t_prev + 1)
a_t = (learning_rate / (1 - (beta1 ** t)))
for (param, g_t) in zip(params, all_grads):
value = param.get_value(borrow=True)
m_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
u_prev = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
m_t = ((beta1 * m_prev) + ((1 - beta1) * g_t))
u_t = T.maximum((beta2 * u_prev), abs(g_t))
step = ((a_t * m_t) / (u_t + epsilon))
updates[m_prev] = m_t
updates[u_prev] = u_t
updates[param] = (param - step)
updates[t_prev] = t
return updates
|
def norm_constraint(tensor_var, max_norm, norm_axes=None, epsilon=1e-07):
'Max weight norm constraints and gradient clipping\n\n This takes a TensorVariable and rescales it so that incoming weight\n norms are below a specified constraint value. Vectors violating the\n constraint are rescaled so that they are within the allowed range.\n\n Parameters\n ----------\n tensor_var : TensorVariable\n Theano expression for update, gradient, or other quantity.\n max_norm : scalar\n This value sets the maximum allowed value of any norm in\n `tensor_var`.\n norm_axes : sequence (list or tuple)\n The axes over which to compute the norm. This overrides the\n default norm axes defined for the number of dimensions\n in `tensor_var`. When this is not specified and `tensor_var` is a\n matrix (2D), this is set to `(0,)`. If `tensor_var` is a 3D, 4D or\n 5D tensor, it is set to a tuple listing all axes but axis 0. The\n former default is useful for working with dense layers, the latter\n is useful for 1D, 2D and 3D convolutional layers.\n (Optional)\n epsilon : scalar, optional\n Value used to prevent numerical instability when dividing by\n very small or zero norms.\n\n Returns\n -------\n TensorVariable\n Input `tensor_var` with rescaling applied to weight vectors\n that violate the specified constraints.\n\n Examples\n --------\n >>> param = theano.shared(\n ... np.random.randn(100, 200).astype(theano.config.floatX))\n >>> update = param + 100\n >>> update = norm_constraint(update, 10)\n >>> func = theano.function([], [], updates=[(param, update)])\n >>> # Apply constrained update\n >>> _ = func()\n >>> from lasagne.utils import compute_norms\n >>> norms = compute_norms(param.get_value())\n >>> np.isclose(np.max(norms), 10)\n True\n\n Notes\n -----\n When `norm_axes` is not specified, the axes over which the norm is\n computed depend on the dimensionality of the input variable. If it is\n 2D, it is assumed to come from a dense layer, and the norm is computed\n over axis 0. If it is 3D, 4D or 5D, it is assumed to come from a\n convolutional layer and the norm is computed over all trailing axes\n beyond axis 0. For other uses, you should explicitly specify the axes\n over which to compute the norm using `norm_axes`.\n '
ndim = tensor_var.ndim
if (norm_axes is not None):
sum_over = tuple(norm_axes)
elif (ndim == 2):
sum_over = (0,)
elif (ndim in [3, 4, 5]):
sum_over = tuple(range(1, ndim))
else:
raise ValueError('Unsupported tensor dimensionality {}.Must specify `norm_axes`'.format(ndim))
dtype = np.dtype(theano.config.floatX).type
norms = T.sqrt(T.sum(T.sqr(tensor_var), axis=sum_over, keepdims=True))
target_norms = T.clip(norms, 0, dtype(max_norm))
constrained_output = (tensor_var * (target_norms / (dtype(epsilon) + norms)))
return constrained_output
|
def total_norm_constraint(tensor_vars, max_norm, epsilon=1e-07, return_norm=False):
'Rescales a list of tensors based on their combined norm\n\n If the combined norm of the input tensors exceeds the threshold then all\n tensors are rescaled such that the combined norm is equal to the threshold.\n\n Scaling the norms of the gradients is often used when training recurrent\n neural networks [1]_.\n\n Parameters\n ----------\n tensor_vars : List of TensorVariables.\n Tensors to be rescaled.\n max_norm : float\n Threshold value for total norm.\n epsilon : scalar, optional\n Value used to prevent numerical instability when dividing by\n very small or zero norms.\n return_norm : bool\n If true the total norm is also returned.\n\n Returns\n -------\n tensor_vars_scaled : list of TensorVariables\n The scaled tensor variables.\n norm : Theano scalar\n The combined norms of the input variables prior to rescaling,\n only returned if ``return_norms=True``.\n\n Examples\n --------\n >>> from lasagne.layers import InputLayer, DenseLayer\n >>> import lasagne\n >>> from lasagne.updates import sgd, total_norm_constraint\n >>> x = T.matrix()\n >>> y = T.ivector()\n >>> l_in = InputLayer((5, 10))\n >>> l1 = DenseLayer(l_in, num_units=7, nonlinearity=T.nnet.softmax)\n >>> output = lasagne.layers.get_output(l1, x)\n >>> cost = T.mean(T.nnet.categorical_crossentropy(output, y))\n >>> all_params = lasagne.layers.get_all_params(l1)\n >>> all_grads = T.grad(cost, all_params)\n >>> scaled_grads = total_norm_constraint(all_grads, 5)\n >>> updates = sgd(scaled_grads, all_params, learning_rate=0.1)\n\n Notes\n -----\n The total norm can be used to monitor training.\n\n References\n ----------\n .. [1] Sutskever, I., Vinyals, O., & Le, Q. V. (2014): Sequence to sequence\n learning with neural networks. In Advances in Neural Information\n Processing Systems (pp. 3104-3112).\n '
norm = T.sqrt(sum((T.sum((tensor ** 2)) for tensor in tensor_vars)))
dtype = np.dtype(theano.config.floatX).type
target_norm = T.clip(norm, 0, dtype(max_norm))
multiplier = (target_norm / (dtype(epsilon) + norm))
tensor_vars_scaled = [(step * multiplier) for step in tensor_vars]
if return_norm:
return (tensor_vars_scaled, norm)
else:
return tensor_vars_scaled
|
def floatX(arr):
'Converts data to a numpy array of dtype ``theano.config.floatX``.\n\n Parameters\n ----------\n arr : array_like\n The data to be converted.\n\n Returns\n -------\n numpy ndarray\n The input array in the ``floatX`` dtype configured for Theano.\n If `arr` is an ndarray of correct dtype, it is returned as is.\n '
return np.asarray(arr, dtype=theano.config.floatX)
|
def shared_empty(dim=2, dtype=None):
'Creates empty Theano shared variable.\n\n Shortcut to create an empty Theano shared variable with\n the specified number of dimensions.\n\n Parameters\n ----------\n dim : int, optional\n The number of dimensions for the empty variable, defaults to 2.\n dtype : a numpy data-type, optional\n The desired dtype for the variable. Defaults to the Theano\n ``floatX`` dtype.\n\n Returns\n -------\n Theano shared variable\n An empty Theano shared variable of dtype ``dtype`` with\n `dim` dimensions.\n '
if (dtype is None):
dtype = theano.config.floatX
shp = tuple(([1] * dim))
return theano.shared(np.zeros(shp, dtype=dtype))
|
def as_theano_expression(input):
'Wrap as Theano expression.\n\n Wraps the given input as a Theano constant if it is not\n a valid Theano expression already. Useful to transparently\n handle numpy arrays and Python scalars, for example.\n\n Parameters\n ----------\n input : number, numpy array or Theano expression\n Expression to be converted to a Theano constant.\n\n Returns\n -------\n Theano symbolic constant\n Theano constant version of `input`.\n '
if isinstance(input, theano.gof.Variable):
return input
else:
try:
return theano.tensor.constant(input)
except Exception as e:
raise TypeError(('Input of type %s is not a Theano expression and cannot be wrapped as a Theano constant (original exception: %s)' % (type(input), e)))
|
def collect_shared_vars(expressions):
'Returns all shared variables the given expression(s) depend on.\n\n Parameters\n ----------\n expressions : Theano expression or iterable of Theano expressions\n The expressions to collect shared variables from.\n\n Returns\n -------\n list of Theano shared variables\n All shared variables the given expression(s) depend on, in fixed order\n (as found by a left-recursive depth-first search). If some expressions\n are shared variables themselves, they are included in the result.\n '
if isinstance(expressions, theano.Variable):
expressions = [expressions]
return [v for v in theano.gof.graph.inputs(reversed(expressions)) if isinstance(v, theano.compile.SharedVariable)]
|
def one_hot(x, m=None):
'One-hot representation of integer vector.\n\n Given a vector of integers from 0 to m-1, returns a matrix\n with a one-hot representation, where each row corresponds\n to an element of x.\n\n Parameters\n ----------\n x : integer vector\n The integer vector to convert to a one-hot representation.\n m : int, optional\n The number of different columns for the one-hot representation. This\n needs to be strictly greater than the maximum value of `x`.\n Defaults to ``max(x) + 1``.\n\n Returns\n -------\n Theano tensor variable\n A Theano tensor variable of shape (``n``, `m`), where ``n`` is the\n length of `x`, with the one-hot representation of `x`.\n\n Notes\n -----\n If your integer vector represents target class memberships, and you wish to\n compute the cross-entropy between predictions and the target class\n memberships, then there is no need to use this function, since the function\n :func:`lasagne.objectives.categorical_crossentropy()` can compute the\n cross-entropy from the integer vector directly.\n\n '
if (m is None):
m = T.cast((T.max(x) + 1), 'int32')
return T.eye(m)[T.cast(x, 'int32')]
|
def unique(l):
'Filters duplicates of iterable.\n\n Create a new list from l with duplicate entries removed,\n while preserving the original order.\n\n Parameters\n ----------\n l : iterable\n Input iterable to filter of duplicates.\n\n Returns\n -------\n list\n A list of elements of `l` without duplicates and in the same order.\n '
new_list = []
seen = set()
for el in l:
if (el not in seen):
new_list.append(el)
seen.add(el)
return new_list
|
def as_tuple(x, N, t=None):
'\n Coerce a value to a tuple of given length (and possibly given type).\n\n Parameters\n ----------\n x : value or iterable\n N : integer\n length of the desired tuple\n t : type, optional\n required type for all elements\n\n Returns\n -------\n tuple\n ``tuple(x)`` if `x` is iterable, ``(x,) * N`` otherwise.\n\n Raises\n ------\n TypeError\n if `type` is given and `x` or any of its elements do not match it\n ValueError\n if `x` is iterable, but does not have exactly `N` elements\n '
try:
X = tuple(x)
except TypeError:
X = ((x,) * N)
if ((t is not None) and (not all((isinstance(v, t) for v in X)))):
raise TypeError('expected a single value or an iterable of {0}, got {1} instead'.format(t.__name__, x))
if (len(X) != N):
raise ValueError('expected a single value or an iterable with length {0}, got {1} instead'.format(N, x))
return X
|
def compute_norms(array, norm_axes=None):
' Compute incoming weight vector norms.\n\n Parameters\n ----------\n array : ndarray\n Weight array.\n norm_axes : sequence (list or tuple)\n The axes over which to compute the norm. This overrides the\n default norm axes defined for the number of dimensions\n in `array`. When this is not specified and `array` is a 2D array,\n this is set to `(0,)`. If `array` is a 3D, 4D or 5D array, it is\n set to a tuple listing all axes but axis 0. The former default is\n useful for working with dense layers, the latter is useful for 1D,\n 2D and 3D convolutional layers.\n (Optional)\n\n Returns\n -------\n norms : 1D array\n 1D array of incoming weight vector norms.\n\n Examples\n --------\n >>> array = np.random.randn(100, 200)\n >>> norms = compute_norms(array)\n >>> norms.shape\n (200,)\n\n >>> norms = compute_norms(array, norm_axes=(1,))\n >>> norms.shape\n (100,)\n '
ndim = array.ndim
if (norm_axes is not None):
sum_over = tuple(norm_axes)
elif (ndim == 2):
sum_over = (0,)
elif (ndim in [3, 4, 5]):
sum_over = tuple(range(1, ndim))
else:
raise ValueError('Unsupported tensor dimensionality {}.Must specify `norm_axes`'.format(array.ndim))
norms = np.sqrt(np.sum((array ** 2), axis=sum_over))
return norms
|
def create_param(spec, shape, name=None):
'\n Helper method to create Theano shared variables for layer parameters\n and to initialize them.\n\n Parameters\n ----------\n spec : numpy array, Theano expression, or callable\n Either of the following:\n\n * a numpy array with the initial parameter values\n * a Theano expression or shared variable representing the parameters\n * a function or callable that takes the desired shape of\n the parameter array as its single argument and returns\n a numpy array.\n\n shape : iterable of int\n a tuple or other iterable of integers representing the desired\n shape of the parameter array.\n\n name : string, optional\n If a new variable is created, the name to give to the parameter\n variable. This is ignored if `spec` is already a Theano expression\n or shared variable.\n\n Returns\n -------\n Theano shared variable or Theano expression\n A Theano shared variable or expression representing layer parameters.\n If a numpy array was provided, a shared variable is initialized to\n contain this array. If a shared variable or expression was provided,\n it is simply returned. If a callable was provided, it is called, and\n its output is used to initialize a shared variable.\n\n Notes\n -----\n This function is called by :meth:`Layer.add_param()` in the constructor\n of most :class:`Layer` subclasses. This enables those layers to\n support initialization with numpy arrays, existing Theano shared variables\n or expressions, and callables for generating initial parameter values.\n '
shape = tuple(shape)
if any(((d <= 0) for d in shape)):
raise ValueError(('Cannot create param with a non-positive shape dimension. Tried to create param with shape=%r, name=%r' % (shape, name)))
if isinstance(spec, theano.Variable):
if (spec.ndim != len(shape)):
raise RuntimeError(('parameter variable has %d dimensions, should be %d' % (spec.ndim, len(shape))))
return spec
elif isinstance(spec, np.ndarray):
if (spec.shape != shape):
raise RuntimeError(('parameter array has shape %s, should be %s' % (spec.shape, shape)))
return theano.shared(spec, name=name)
elif hasattr(spec, '__call__'):
arr = spec(shape)
try:
arr = floatX(arr)
except Exception:
raise RuntimeError('cannot initialize parameters: the provided callable did not return an array-like value')
if (arr.shape != shape):
raise RuntimeError('cannot initialize parameters: the provided callable did not return a value with the correct shape')
return theano.shared(arr, name=name)
else:
raise RuntimeError("cannot initialize parameters: 'spec' is not a numpy array, a Theano expression, or a callable")
|
def unroll_scan(fn, sequences, outputs_info, non_sequences, n_steps, go_backwards=False):
'\n Helper function to unroll for loops. Can be used to unroll theano.scan.\n The parameter names are identical to theano.scan, please refer to here\n for more information.\n\n Note that this function does not support the truncate_gradient\n setting from theano.scan.\n\n Parameters\n ----------\n\n fn : function\n Function that defines calculations at each step.\n\n sequences : TensorVariable or list of TensorVariables\n List of TensorVariable with sequence data. The function iterates\n over the first dimension of each TensorVariable.\n\n outputs_info : list of TensorVariables\n List of tensors specifying the initial values for each recurrent\n value.\n\n non_sequences: list of TensorVariables\n List of theano.shared variables that are used in the step function.\n\n n_steps: int\n Number of steps to unroll.\n\n go_backwards: bool\n If true the recursion starts at sequences[-1] and iterates\n backwards.\n\n Returns\n -------\n List of TensorVariables. Each element in the list gives the recurrent\n values at each time step.\n\n '
if (not isinstance(sequences, (list, tuple))):
sequences = [sequences]
counter = range(n_steps)
if go_backwards:
counter = counter[::(- 1)]
output = []
prev_vals = outputs_info
for i in counter:
step_input = (([s[i] for s in sequences] + prev_vals) + non_sequences)
out_ = fn(*step_input)
if isinstance(out_, T.TensorVariable):
out_ = [out_]
if isinstance(out_, tuple):
out_ = list(out_)
output.append(out_)
prev_vals = output[(- 1)]
output_scan = []
for i in range(len(output[0])):
l = map((lambda x: x[i]), output)
output_scan.append(T.stack(*l))
return output_scan
|
def path2gt(file_path, dataset):
if (dataset == 'GTZAN'):
return gtzan_path2gt(file_path)
elif (dataset == 'Ballroom'):
return ballroom_path2gt(file_path)
elif (dataset == 'ExtendedBallroom'):
return extended_ballroom_path2gt(file_path)
elif (dataset == 'UrbanSound8K'):
return urban_sound_path2gt(file_path)
else:
import ipdb
ipdb.set_trace()
|
def gtzan_path2gt(file_path):
tag = file_path[(file_path.rfind('/') + 1):file_path.rfind('.', 0, (- 4))]
print(tag)
if (tag == 'blues'):
return 0
elif (tag == 'classical'):
return 1
elif (tag == 'country'):
return 2
elif (tag == 'disco'):
return 3
elif (tag == 'hiphop'):
return 4
elif (tag == 'jazz'):
return 5
elif (tag == 'metal'):
return 6
elif (tag == 'pop'):
return 7
elif (tag == 'reggae'):
return 8
elif (tag == 'rock'):
return 9
else:
print((('Warning: did not find the corresponding ground truth (' + str(tag)) + ').'))
import ipdb
ipdb.set_trace()
|
def ballroom_path2gt(file_path):
cut_end = file_path[:file_path.rfind('/')]
tag = cut_end[(cut_end.rfind('/') + 1):]
print(tag)
if (tag == 'ChaChaCha'):
return 0
elif (tag == 'Jive'):
return 1
elif (tag == 'Quickstep'):
return 2
elif (tag == 'Rumba'):
return 3
elif (tag == 'Samba'):
return 4
elif (tag == 'Tango'):
return 5
elif (tag == 'VienneseWaltz'):
return 6
elif (tag == 'Waltz'):
return 7
else:
print((('Warning: did not find the corresponding ground truth (' + str(tag)) + ').'))
import ipdb
ipdb.set_trace()
|
def extended_ballroom_path2gt(file_path):
cut_end = file_path[:file_path.rfind('/')]
tag = cut_end[(cut_end.rfind('/') + 1):]
print(tag)
if (tag == 'Chacha'):
return 0
elif (tag == 'Foxtrot'):
return 1
elif (tag == 'Jive'):
return 2
elif (tag == 'Pasodoble'):
return 3
elif (tag == 'Quickstep'):
return 4
elif (tag == 'Rumba'):
return 5
elif (tag == 'Salsa'):
return 6
elif (tag == 'Samba'):
return 7
elif (tag == 'Slowwaltz'):
return 8
elif (tag == 'Tango'):
return 9
elif (tag == 'Viennesewaltz'):
return 10
elif (tag == 'Waltz'):
return 11
elif (tag == 'Wcswing'):
return 12
else:
print((('Warning: did not find the corresponding ground truth (' + str(tag)) + ').'))
import ipdb
ipdb.set_trace()
|
def urban_sound_path2gt(file_path):
tag = file_path[(file_path.rfind('/') + 1):]
print(tag)
df = pd.read_csv('/datasets/MTG/users/jpons/urban_sounds/UrbanSound8K/metadata/UrbanSound8K.csv')
return int(df[(df.slice_file_name == tag)].classID)
|
def build(config, x_in):
if (config['CNN']['architecture'] == 'cnn_small_filters'):
return cnn_small_filters(config, x_in)
elif (config['CNN']['architecture'] == 'cnn_single'):
return cnn_single(config, x_in)
elif (config['CNN']['architecture'] == 'cnn_music'):
return cnn_music(config, x_in)
elif (config['CNN']['architecture'] == 'sample_level'):
return sample_level(config, x_in)
elif (config['CNN']['architecture'] == 'frame_level'):
return frame_level(config, x_in)
elif (config['CNN']['architecture'] == 'frame_level_many'):
return frame_level_many(config, x_in)
elif (config['CNN']['architecture'] == 'cnn_audio'):
return cnn_audio(config, x_in)
|
def cnn_small_filters(config, x_in):
with tf.name_scope('cnn_small_filters'):
print(('[SMALL FILTERS] Input: ' + str(x_in.get_shape)))
input_layer = tf.reshape(x_in, [(- 1), config['CNN']['n_frames'], config['CNN']['n_mels'], 1])
conv1 = tf.layers.conv2d(inputs=input_layer, filters=config['CNN']['num_filters'], kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='1CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[4, 2], strides=[4, 2])
conv2 = tf.layers.conv2d(inputs=pool1, filters=config['CNN']['num_filters'], kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='2CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[4, 3], strides=[4, 3])
conv3 = tf.layers.conv2d(inputs=pool2, filters=config['CNN']['num_filters'], kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='3CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[5, 2], strides=[5, 2])
conv4 = tf.layers.conv2d(inputs=pool3, filters=config['CNN']['num_filters'], kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='4CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[4, 2], strides=[4, 2])
conv5 = tf.layers.conv2d(inputs=pool4, filters=config['CNN']['num_filters'], kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='5CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool5 = tf.layers.max_pooling2d(inputs=conv5, pool_size=[4, 4], strides=[4, 4])
print(pool1.get_shape)
print(pool2.get_shape)
print(pool3.get_shape)
print(pool4.get_shape)
print(pool5.get_shape)
return [pool1, pool2, pool3, pool4, pool5]
|
def cnn_single(config, x_in):
with tf.name_scope('cnn_single'):
print(('[CNN SINGLE] Input: ' + str(x_in.get_shape)))
input_layer = tf.reshape(x_in, [(- 1), config['CNN']['n_frames'], config['CNN']['n_mels'], 1])
conv1 = tf.layers.conv2d(inputs=input_layer, filters=config['CNN']['num_filters'], kernel_size=config['CNN']['filter_shape'], padding='valid', activation=tf.nn.relu, name='1CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=config['CNN']['pool_shape'], strides=config['CNN']['pool_shape'])
print(conv1.get_shape)
print(pool1.get_shape)
return [conv1, pool1]
|
def cnn_music(config, x_in):
if (config['CNN']['num_filters'] == 256):
remove = 64
elif (config['CNN']['num_filters'] == 128):
remove = 32
elif (config['CNN']['num_filters'] == 64):
remove = 16
elif (config['CNN']['num_filters'] == 32):
remove = 8
elif (config['CNN']['num_filters'] == 16):
remove = 4
elif (config['CNN']['num_filters'] == 8):
remove = 2
elif (config['CNN']['num_filters'] == 4):
remove = 1
with tf.name_scope('cnn_music'):
print(('[MUSIC] Input: ' + str(x_in.get_shape)))
input_layer = tf.reshape(x_in, [(- 1), config['CNN']['n_frames'], config['CNN']['n_mels'], 1])
input_pad_7 = tf.pad(input_layer, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
input_pad_3 = tf.pad(input_layer, [[0, 0], [1, 1], [0, 0], [0, 0]], 'CONSTANT')
conv1 = tf.layers.conv2d(inputs=input_pad_7, filters=config['CNN']['num_filters'], kernel_size=[7, int((0.9 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[1, conv1.shape[2]], strides=[1, conv1.shape[2]])
p1 = tf.squeeze(pool1, [2])
conv2 = tf.layers.conv2d(inputs=input_pad_3, filters=(config['CNN']['num_filters'] * 2), kernel_size=[3, int((0.9 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[1, conv2.shape[2]], strides=[1, conv2.shape[2]])
p2 = tf.squeeze(pool2, [2])
conv3 = tf.layers.conv2d(inputs=input_layer, filters=(config['CNN']['num_filters'] * 4), kernel_size=[1, int((0.9 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[1, conv3.shape[2]], strides=[1, conv3.shape[2]])
p3 = tf.squeeze(pool3, [2])
conv4 = tf.layers.conv2d(inputs=input_pad_7, filters=config['CNN']['num_filters'], kernel_size=[7, int((0.4 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[1, conv4.shape[2]], strides=[1, conv4.shape[2]])
p4 = tf.squeeze(pool4, [2])
conv5 = tf.layers.conv2d(inputs=input_pad_3, filters=(config['CNN']['num_filters'] * 2), kernel_size=[3, int((0.4 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool5 = tf.layers.max_pooling2d(inputs=conv5, pool_size=[1, conv5.shape[2]], strides=[1, conv5.shape[2]])
p5 = tf.squeeze(pool5, [2])
conv6 = tf.layers.conv2d(inputs=input_layer, filters=(config['CNN']['num_filters'] * 4), kernel_size=[1, int((0.4 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool6 = tf.layers.max_pooling2d(inputs=conv6, pool_size=[1, conv6.shape[2]], strides=[1, conv6.shape[2]])
p6 = tf.squeeze(pool6, [2])
pool7 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, config['CNN']['n_mels']], strides=[1, config['CNN']['n_mels']])
pool7_rs = tf.squeeze(pool7, [3])
conv7 = tf.layers.conv1d(inputs=pool7_rs, filters=(config['CNN']['num_filters'] - remove), kernel_size=165, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool8 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, config['CNN']['n_mels']], strides=[1, config['CNN']['n_mels']])
pool8_rs = tf.squeeze(pool8, [3])
conv8 = tf.layers.conv1d(inputs=pool8_rs, filters=((config['CNN']['num_filters'] * 2) - remove), kernel_size=128, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool9 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, config['CNN']['n_mels']], strides=[1, config['CNN']['n_mels']])
pool9_rs = tf.squeeze(pool9, [3])
conv9 = tf.layers.conv1d(inputs=pool9_rs, filters=((config['CNN']['num_filters'] * 4) - remove), kernel_size=64, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool10 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, config['CNN']['n_mels']], strides=[1, config['CNN']['n_mels']])
pool10_rs = tf.squeeze(pool10, [3])
conv10 = tf.layers.conv1d(inputs=pool10_rs, filters=((config['CNN']['num_filters'] * 8) - remove), kernel_size=32, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
timbral = tf.concat([p1, p2, p3, p4, p5, p6], 2)
temporal = tf.concat([conv7, conv8, conv9, conv10], 2)
print(timbral.get_shape)
print(temporal.get_shape)
return [timbral, temporal]
|
def backend(route_out, config):
"Function implementing the proposed back-end.\n - 'route_out': is the output of the front-end, and therefore the input of this function.\n - 'config': dictionary with some configurable parameters like: number of output units - config['numOutputNeurons']\n or number of frequency bins of the spectrogram config['setup_params']['yInput']\n "
conv1 = tf.layers.conv2d(inputs=route_out, filters=config['CNN']['num_filters'], kernel_size=[7, route_out.shape[2]], padding='valid', activation=tf.nn.relu, name='1cnnOut', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
conv1_t = tf.transpose(conv1, [0, 1, 3, 2])
bn_conv1_pad = tf.pad(conv1_t, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
conv2 = tf.layers.conv2d(inputs=bn_conv1_pad, filters=config['CNN']['num_filters'], kernel_size=[7, bn_conv1_pad.shape[2]], padding='valid', activation=tf.nn.relu, name='2cnnOut', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
conv2_t = tf.transpose(conv2, [0, 1, 3, 2])
res_conv2 = tf.add(conv2_t, conv1_t)
pool1 = tf.layers.max_pooling2d(inputs=res_conv2, pool_size=[2, 1], strides=[2, 1], name='poolOut')
bn_conv4_pad = tf.pad(pool1, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
conv5 = tf.layers.conv2d(inputs=bn_conv4_pad, filters=config['CNN']['num_filters'], kernel_size=[7, bn_conv4_pad.shape[2]], padding='valid', activation=tf.nn.relu, name='3cnnOut', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
conv5_t = tf.transpose(conv5, [0, 1, 3, 2])
res_conv5 = tf.add(conv5_t, pool1)
return [conv1_t, res_conv2, res_conv5]
|
def sample_level(config, x_in):
'Function implementing the front-end proposed by Lee et al. 2017.\n Lee, et al. "Sample-level Deep Convolutional Neural Networks for Music Auto-tagging Using Raw Waveforms." \n arXiv preprint arXiv:1703.01789 (2017).\n - \'x\': placeholder whith the input.\n - \'is_training\': placeholder indicating weather it is training or test phase, for dropout or batch norm.\n '
conv0 = tf.layers.conv1d(inputs=x_in, filters=config['CNN']['num_filters'], kernel_size=3, strides=3, padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
conv1 = tf.layers.conv1d(inputs=conv0, filters=config['CNN']['num_filters'], kernel_size=3, strides=1, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool_1 = tf.layers.max_pooling1d(conv1, pool_size=3, strides=3)
conv2 = tf.layers.conv1d(inputs=pool_1, filters=config['CNN']['num_filters'], kernel_size=3, strides=1, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool_2 = tf.layers.max_pooling1d(conv2, pool_size=3, strides=3)
conv3 = tf.layers.conv1d(inputs=pool_2, filters=config['CNN']['num_filters'], kernel_size=3, strides=1, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool_3 = tf.layers.max_pooling1d(conv3, pool_size=3, strides=3)
conv4 = tf.layers.conv1d(inputs=pool_3, filters=config['CNN']['num_filters'], kernel_size=3, strides=1, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool_4 = tf.layers.max_pooling1d(conv4, pool_size=3, strides=3)
conv5 = tf.layers.conv1d(inputs=pool_4, filters=config['CNN']['num_filters'], kernel_size=3, strides=1, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool_5 = tf.layers.max_pooling1d(conv5, pool_size=3, strides=3)
conv6 = tf.layers.conv1d(inputs=pool_5, filters=config['CNN']['num_filters'], kernel_size=3, strides=1, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool_6 = tf.layers.max_pooling1d(conv6, pool_size=3, strides=3)
print(pool_1.get_shape)
print(pool_2.get_shape)
print(pool_3.get_shape)
print(pool_4.get_shape)
print(pool_5.get_shape)
print(pool_6.get_shape)
return [conv0, pool_1, pool_2, pool_3, pool_4, pool_5, pool_6]
|
def frame_level(config, x_in):
conv1 = tf.layers.conv1d(inputs=x_in, filters=config['CNN']['num_filters'], kernel_size=512, strides=32, padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
front_end_out = tf.expand_dims(conv1, 3)
[end_c1, end_cr2, end_cr3] = backend(front_end_out, config)
print(conv1.get_shape)
print(end_c1.get_shape)
print(end_cr2.get_shape)
print(end_cr3.get_shape)
return [conv1, end_c1, end_cr2, end_cr3]
|
def frame_level_many(config, x_in):
conv0 = tf.layers.conv1d(inputs=x_in, filters=config['CNN']['num_filters'], kernel_size=512, strides=32, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
conv1 = tf.layers.conv1d(inputs=x_in, filters=config['CNN']['num_filters'], kernel_size=256, strides=32, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
conv2 = tf.layers.conv1d(inputs=x_in, filters=config['CNN']['num_filters'], kernel_size=128, strides=32, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
conv3 = tf.layers.conv1d(inputs=x_in, filters=config['CNN']['num_filters'], kernel_size=64, strides=32, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
conv4 = tf.layers.conv1d(inputs=x_in, filters=config['CNN']['num_filters'], kernel_size=32, strides=32, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
many = tf.concat([conv0, conv1, conv2, conv3, conv4], 2)
front_end_out = tf.expand_dims(many, 3)
[end_c1, end_cr2, end_cr3] = backend(front_end_out, config)
print(x_in.get_shape)
print(conv0.get_shape)
print(conv1.get_shape)
print(conv2.get_shape)
print(conv3.get_shape)
print(conv4.get_shape)
print(end_c1.get_shape)
print(end_cr2.get_shape)
print(end_cr3.get_shape)
return [conv0, conv1, conv2, conv3, conv4, end_c1, end_cr2, end_cr3]
|
def cnn_audio(config, x_in):
if (config['CNN']['num_filters'] == 256):
remove = 64
elif (config['CNN']['num_filters'] == 128):
remove = 32
elif (config['CNN']['num_filters'] == 64):
remove = 16
elif (config['CNN']['num_filters'] == 32):
remove = 8
elif (config['CNN']['num_filters'] == 16):
remove = 4
elif (config['CNN']['num_filters'] == 8):
remove = 2
elif (config['CNN']['num_filters'] == 4):
remove = 1
with tf.name_scope('cnn_audio'):
print(('[AUDIO!] Input: ' + str(x_in.get_shape)))
input_layer = tf.reshape(x_in, [(- 1), config['CNN']['n_frames'], config['CNN']['n_mels'], 1])
input_pad_7 = tf.pad(input_layer, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
input_pad_3 = tf.pad(input_layer, [[0, 0], [1, 1], [0, 0], [0, 0]], 'CONSTANT')
conv1 = tf.layers.conv2d(inputs=input_pad_7, filters=config['CNN']['num_filters'], kernel_size=[7, int((0.9 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[1, conv1.shape[2]], strides=[1, conv1.shape[2]])
p1 = tf.squeeze(pool1, [2])
conv2 = tf.layers.conv2d(inputs=input_pad_3, filters=(config['CNN']['num_filters'] * 2), kernel_size=[3, int((0.9 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[1, conv2.shape[2]], strides=[1, conv2.shape[2]])
p2 = tf.squeeze(pool2, [2])
conv3 = tf.layers.conv2d(inputs=input_layer, filters=(config['CNN']['num_filters'] * 4), kernel_size=[1, int((0.9 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[1, conv3.shape[2]], strides=[1, conv3.shape[2]])
p3 = tf.squeeze(pool3, [2])
conv4 = tf.layers.conv2d(inputs=input_pad_7, filters=config['CNN']['num_filters'], kernel_size=[7, int((0.4 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[1, conv4.shape[2]], strides=[1, conv4.shape[2]])
p4 = tf.squeeze(pool4, [2])
conv5 = tf.layers.conv2d(inputs=input_pad_3, filters=(config['CNN']['num_filters'] * 2), kernel_size=[3, int((0.4 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool5 = tf.layers.max_pooling2d(inputs=conv5, pool_size=[1, conv5.shape[2]], strides=[1, conv5.shape[2]])
p5 = tf.squeeze(pool5, [2])
conv6 = tf.layers.conv2d(inputs=input_layer, filters=(config['CNN']['num_filters'] * 4), kernel_size=[1, int((0.4 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool6 = tf.layers.max_pooling2d(inputs=conv6, pool_size=[1, conv6.shape[2]], strides=[1, conv6.shape[2]])
p6 = tf.squeeze(pool6, [2])
pool7 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, config['CNN']['n_mels']], strides=[1, config['CNN']['n_mels']])
pool7_rs = tf.squeeze(pool7, [3])
conv7 = tf.layers.conv1d(inputs=pool7_rs, filters=(config['CNN']['num_filters'] - remove), kernel_size=64, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool8 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, config['CNN']['n_mels']], strides=[1, config['CNN']['n_mels']])
pool8_rs = tf.squeeze(pool8, [3])
conv8 = tf.layers.conv1d(inputs=pool8_rs, filters=((config['CNN']['num_filters'] * 2) - remove), kernel_size=32, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool9 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, config['CNN']['n_mels']], strides=[1, config['CNN']['n_mels']])
pool9_rs = tf.squeeze(pool9, [3])
conv9 = tf.layers.conv1d(inputs=pool9_rs, filters=((config['CNN']['num_filters'] * 4) - remove), kernel_size=16, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool10 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, config['CNN']['n_mels']], strides=[1, config['CNN']['n_mels']])
pool10_rs = tf.squeeze(pool10, [3])
conv10 = tf.layers.conv1d(inputs=pool10_rs, filters=((config['CNN']['num_filters'] * 8) - remove), kernel_size=8, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
timbral = tf.concat([p1, p2, p3, p4, p5, p6], 2)
temporal = tf.concat([conv7, conv8, conv9, conv10], 2)
print(timbral.get_shape)
print(temporal.get_shape)
return [timbral, temporal]
|
class BaseELM(BaseEstimator):
'\n Base class for ELMs.\n\n Warning: This class should not be used directly.\n Use derived classes instead.\n '
__metaclass__ = ABCMeta
def __init__(self, hidden_layer, regressor):
self.regressor = regressor
self.hidden_layer = hidden_layer
@abstractmethod
def fit(self, X, y):
'\n Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : array-like of shape [n_samples, n_outputs]\n Target values (class labels in classification, real numbers in\n regression)\n\n Returns\n -------\n self : object\n\n Returns an instance of self.\n '
@abstractmethod
def predict(self, X):
'\n Predict values using the model\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n\n Returns\n -------\n C : numpy array of shape [n_samples, n_outputs]\n Predicted values.\n '
|
class GenELMRegressor(BaseELM, RegressorMixin):
'\n ELMRegressor is a regressor based on the Extreme Learning Machine.\n\n An Extreme Learning Machine (ELM) is a single layer feedforward\n network with a random hidden layer components and ordinary linear\n least squares fitting of the hidden->output weights by default.\n [1][2]\n\n Parameters\n ----------\n `hidden_layer` : random_layer instance, optional\n (default=MLPRandomLayer(random_state=0))\n\n `regressor` : regressor instance, optional (default=None)\n If provided, this object is used to perform the regression from hidden\n unit activations to the outputs and subsequent predictions. If not\n present, an ordinary linear least squares fit is performed\n\n Attributes\n ----------\n `coefs_` : numpy array\n Fitted regression coefficients if no regressor supplied.\n\n `fitted_` : bool\n Flag set when fit has been called already.\n\n `hidden_activations_` : numpy array of shape [n_samples, n_hidden]\n Hidden layer activations for last input.\n\n See Also\n --------\n RBFRandomLayer, MLPRandomLayer, ELMRegressor, ELMClassifier\n\n References\n ----------\n .. [1] http://www.extreme-learning-machines.org\n .. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:\n Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,\n 2006.\n '
def __init__(self, hidden_layer=MLPRandomLayer(random_state=0), regressor=None):
super(GenELMRegressor, self).__init__(hidden_layer, regressor)
self.coefs_ = None
self.fitted_ = False
self.hidden_activations_ = None
def _fit_regression(self, y):
'\n fit regression using pseudo-inverse\n or supplied regressor\n '
if (self.regressor is None):
self.coefs_ = safe_sparse_dot(pinv2(self.hidden_activations_), y)
else:
self.regressor.fit(self.hidden_activations_, y)
self.fitted_ = True
def fit(self, X, y):
'\n Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : array-like of shape [n_samples, n_outputs]\n Target values (class labels in classification, real numbers in\n regression)\n\n Returns\n -------\n self : object\n\n Returns an instance of self.\n '
self.hidden_activations_ = self.hidden_layer.fit_transform(X)
self._fit_regression(as_float_array(y, copy=True))
return self
def _get_predictions(self):
'get predictions using internal least squares/supplied regressor'
if (self.regressor is None):
preds = safe_sparse_dot(self.hidden_activations_, self.coefs_)
else:
preds = self.regressor.predict(self.hidden_activations_)
return preds
def predict(self, X):
'\n Predict values using the model\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n\n Returns\n -------\n C : numpy array of shape [n_samples, n_outputs]\n Predicted values.\n '
if (not self.fitted_):
raise ValueError('ELMRegressor not fitted')
self.hidden_activations_ = self.hidden_layer.transform(X)
predictions = self._get_predictions()
return predictions
|
class GenELMClassifier(BaseELM, ClassifierMixin):
'\n GenELMClassifier is a classifier based on the Extreme Learning Machine.\n\n An Extreme Learning Machine (ELM) is a single layer feedforward\n network with a random hidden layer components and ordinary linear\n least squares fitting of the hidden->output weights by default.\n [1][2]\n\n Parameters\n ----------\n `hidden_layer` : random_layer instance, optional\n (default=MLPRandomLayer(random_state=0))\n\n `binarizer` : LabelBinarizer, optional\n (default=LabelBinarizer(-1, 1))\n\n `regressor` : regressor instance, optional (default=None)\n If provided, this object is used to perform the regression from hidden\n unit activations to the outputs and subsequent predictions. If not\n present, an ordinary linear least squares fit is performed\n\n Attributes\n ----------\n `classes_` : numpy array of shape [n_classes]\n Array of class labels\n\n `genelm_regressor_` : ELMRegressor instance\n Performs actual fit of binarized values\n\n See Also\n --------\n RBFRandomLayer, MLPRandomLayer, ELMRegressor, ELMClassifier\n\n References\n ----------\n .. [1] http://www.extreme-learning-machines.org\n .. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:\n Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,\n 2006.\n '
def __init__(self, hidden_layer=MLPRandomLayer(random_state=0), binarizer=LabelBinarizer((- 1), 1), regressor=None):
super(GenELMClassifier, self).__init__(hidden_layer, regressor)
self.binarizer = binarizer
self.classes_ = None
self.genelm_regressor_ = GenELMRegressor(hidden_layer, regressor)
def decision_function(self, X):
'\n This function return the decision function values related to each\n class on an array of test vectors X.\n\n Parameters\n ----------\n X : array-like of shape [n_samples, n_features]\n\n Returns\n -------\n C : array of shape [n_samples, n_classes] or [n_samples,]\n Decision function values related to each class, per sample.\n In the two-class case, the shape is [n_samples,]\n '
return self.genelm_regressor_.predict(X)
def fit(self, X, y):
'\n Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : array-like of shape [n_samples, n_outputs]\n Target values (class labels in classification, real numbers in\n regression)\n\n Returns\n -------\n self : object\n\n Returns an instance of self.\n '
self.classes_ = np.unique(y)
y_bin = self.binarizer.fit_transform(y)
self.genelm_regressor_.fit(X, y_bin)
return self
def predict(self, X):
'Predict values using the model\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n\n Returns\n -------\n C : numpy array of shape [n_samples, n_outputs]\n Predicted values.\n '
raw_predictions = self.decision_function(X)
class_predictions = self.binarizer.inverse_transform(raw_predictions)
return class_predictions
|
class ELMRegressor(BaseEstimator, RegressorMixin):
'\n ELMRegressor is a regressor based on the Extreme Learning Machine.\n\n An Extreme Learning Machine (ELM) is a single layer feedforward\n network with a random hidden layer components and ordinary linear\n least squares fitting of the hidden->output weights by default.\n [1][2]\n\n ELMRegressor is a wrapper for an GenELMRegressor that uses a\n RandomLayer and passes the __init__ parameters through\n to the hidden layer generated by the fit() method.\n\n Parameters\n ----------\n `n_hidden` : int, optional (default=20)\n Number of units to generate in the SimpleRandomLayer\n\n `alpha` : float, optional (default=0.5)\n Mixing coefficient for distance and dot product input activations:\n activation = alpha*mlp_activation + (1-alpha)*rbf_width*rbf_activation\n\n `rbf_width` : float, optional (default=1.0)\n multiplier on rbf_activation\n\n `activation_func` : {callable, string} optional (default=\'tanh\')\n Function used to transform input activation\n\n It must be one of \'tanh\', \'sine\', \'tribas\', \'inv_tribase\', \'sigmoid\',\n \'hardlim\', \'softlim\', \'gaussian\', \'multiquadric\', \'inv_multiquadric\',\n \'reclinear\' or a callable. If none is given, \'tanh\' will be used. \n If a callable is given, it will be used to compute the hidden unit\n activations.\n\n `activation_args` : dictionary, optional (default=None)\n Supplies keyword arguments for a callable activation_func\n\n `user_components`: dictionary, optional (default=None)\n dictionary containing values for components that woud otherwise be\n randomly generated. Valid key/value pairs are as follows:\n \'radii\' : array-like of shape [n_hidden]\n \'centers\': array-like of shape [n_hidden, n_features]\n \'biases\' : array-like of shape [n_hidden]\n \'weights\': array-like of shape [n_hidden, n_features]\n\n `regressor` : regressor instance, optional (default=None)\n If provided, this object is used to perform the regression from hidden\n unit activations to the outputs and subsequent predictions. If not\n present, an ordinary linear least squares fit is performed\n\n `random_state` : int, RandomState instance or None (default=None)\n Control the pseudo random number generator used to generate the\n hidden unit weights at fit time.\n\n Attributes\n ----------\n `genelm_regressor_` : GenELMRegressor object\n Wrapped object that actually performs the fit.\n\n See Also\n --------\n RandomLayer, RBFRandomLayer, MLPRandomLayer,\n GenELMRegressor, GenELMClassifier, ELMClassifier\n\n References\n ----------\n .. [1] http://www.extreme-learning-machines.org\n .. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:\n Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,\n 2006.\n '
def __init__(self, n_hidden=20, alpha=0.5, rbf_width=1.0, activation_func='tanh', activation_args=None, user_components=None, regressor=None, random_state=None):
self.n_hidden = n_hidden
self.alpha = alpha
self.random_state = random_state
self.activation_func = activation_func
self.activation_args = activation_args
self.user_components = user_components
self.rbf_width = rbf_width
self.regressor = regressor
self._genelm_regressor = None
def _create_random_layer(self):
'Pass init params to RandomLayer'
return RandomLayer(n_hidden=self.n_hidden, alpha=self.alpha, random_state=self.random_state, activation_func=self.activation_func, activation_args=self.activation_args, user_components=self.user_components, rbf_width=self.rbf_width)
def fit(self, X, y):
'\n Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : array-like of shape [n_samples, n_outputs]\n Target values (class labels in classification, real numbers in\n regression)\n\n Returns\n -------\n self : object\n\n Returns an instance of self.\n '
rhl = self._create_random_layer()
self._genelm_regressor = GenELMRegressor(hidden_layer=rhl, regressor=self.regressor)
self._genelm_regressor.fit(X, y)
return self
def predict(self, X):
'\n Predict values using the model\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n\n Returns\n -------\n C : numpy array of shape [n_samples, n_outputs]\n Predicted values.\n '
if (self._genelm_regressor is None):
raise ValueError('SimpleELMRegressor not fitted')
return self._genelm_regressor.predict(X)
|
class ELMClassifier(ELMRegressor):
'\n ELMClassifier is a classifier based on the Extreme Learning Machine.\n\n An Extreme Learning Machine (ELM) is a single layer feedforward\n network with a random hidden layer components and ordinary linear\n least squares fitting of the hidden->output weights by default.\n [1][2]\n\n ELMClassifier is an ELMRegressor subclass that first binarizes the\n data, then uses the superclass to compute the decision function that\n is then unbinarized to yield the prediction.\n\n The params for the RandomLayer used in the input transform are\n exposed in the ELMClassifier constructor.\n\n Parameters\n ----------\n `n_hidden` : int, optional (default=20)\n Number of units to generate in the SimpleRandomLayer\n\n `activation_func` : {callable, string} optional (default=\'tanh\')\n Function used to transform input activation\n\n It must be one of \'tanh\', \'sine\', \'tribas\', \'inv_tribase\', \'sigmoid\',\n \'hardlim\', \'softlim\', \'gaussian\', \'multiquadric\', \'inv_multiquadric\',\n \'reclinear\' or a callable. If none is given, \'tanh\' will be used. \n If a callable is given, it will be used to compute the hidden unit\n activations.\n\n `activation_args` : dictionary, optional (default=None)\n Supplies keyword arguments for a callable activation_func\n\n `random_state` : int, RandomState instance or None (default=None)\n Control the pseudo random number generator used to generate the\n hidden unit weights at fit time.\n\n Attributes\n ----------\n `classes_` : numpy array of shape [n_classes]\n Array of class labels\n\n See Also\n --------\n RandomLayer, RBFRandomLayer, MLPRandomLayer,\n GenELMRegressor, GenELMClassifier, ELMClassifier\n\n References\n ----------\n .. [1] http://www.extreme-learning-machines.org\n .. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:\n Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,\n 2006.\n '
def __init__(self, n_hidden=20, alpha=0.5, rbf_width=1.0, activation_func='tanh', activation_args=None, user_components=None, regressor=None, binarizer=LabelBinarizer((- 1), 1), random_state=None):
super(ELMClassifier, self).__init__(n_hidden=n_hidden, alpha=alpha, random_state=random_state, activation_func=activation_func, activation_args=activation_args, user_components=user_components, rbf_width=rbf_width, regressor=regressor)
self.classes_ = None
self.binarizer = binarizer
def decision_function(self, X):
'\n This function return the decision function values related to each\n class on an array of test vectors X.\n\n Parameters\n ----------\n X : array-like of shape [n_samples, n_features]\n\n Returns\n -------\n C : array of shape [n_samples, n_classes] or [n_samples,]\n Decision function values related to each class, per sample.\n In the two-class case, the shape is [n_samples,]\n '
return super(ELMClassifier, self).predict(X)
def fit(self, X, y):
'\n Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : array-like of shape [n_samples, n_outputs]\n Target values (class labels in classification, real numbers in\n regression)\n\n Returns\n -------\n self : object\n\n Returns an instance of self.\n '
self.classes_ = np.unique(y)
y_bin = self.binarizer.fit_transform(y)
super(ELMClassifier, self).fit(X, y_bin)
return self
def predict(self, X):
'\n Predict values using the model\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n\n Returns\n -------\n C : numpy array of shape [n_samples, n_outputs]\n Predicted values.\n '
raw_predictions = self.decision_function(X)
class_predictions = self.binarizer.inverse_transform(raw_predictions)
return class_predictions
def score(self, X, y):
"Force use of accuracy score since we don't inherit\n from ClassifierMixin"
from sklearn.metrics import accuracy_score
return accuracy_score(y, self.predict(X))
|
def wave_frontend(x, is_training):
'Function implementing the front-end proposed by Lee et al. 2017.\n Lee, et al. "Sample-level Deep Convolutional Neural Networks for Music\n Auto-tagging Using Raw Waveforms."\n arXiv preprint arXiv:1703.01789 (2017).\n\n - \'x\': placeholder whith the input.\n - \'is_training\': placeholder indicating weather it is training or test\n phase, for dropout or batch norm.\n '
initializer = tf.contrib.layers.variance_scaling_initializer()
conv0 = tf.layers.conv1d(inputs=x, filters=64, kernel_size=3, strides=3, padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv0 = tf.layers.batch_normalization(conv0, training=is_training)
conv1 = tf.layers.conv1d(inputs=bn_conv0, filters=64, kernel_size=3, strides=1, padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv1 = tf.layers.batch_normalization(conv1, training=is_training)
pool_1 = tf.layers.max_pooling1d(bn_conv1, pool_size=3, strides=3)
conv2 = tf.layers.conv1d(inputs=pool_1, filters=64, kernel_size=3, strides=1, padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv2 = tf.layers.batch_normalization(conv2, training=is_training)
pool_2 = tf.layers.max_pooling1d(bn_conv2, pool_size=3, strides=3)
conv3 = tf.layers.conv1d(inputs=pool_2, filters=128, kernel_size=3, strides=1, padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv3 = tf.layers.batch_normalization(conv3, training=is_training)
pool_3 = tf.layers.max_pooling1d(bn_conv3, pool_size=3, strides=3)
conv4 = tf.layers.conv1d(inputs=pool_3, filters=128, kernel_size=3, strides=1, padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv4 = tf.layers.batch_normalization(conv4, training=is_training)
pool_4 = tf.layers.max_pooling1d(bn_conv4, pool_size=3, strides=3)
conv5 = tf.layers.conv1d(inputs=pool_4, filters=128, kernel_size=3, strides=1, padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv5 = tf.layers.batch_normalization(conv5, training=is_training)
pool_5 = tf.layers.max_pooling1d(bn_conv5, pool_size=3, strides=3)
conv6 = tf.layers.conv1d(inputs=pool_5, filters=256, kernel_size=3, strides=1, padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv6 = tf.layers.batch_normalization(conv6, training=is_training)
pool_6 = tf.layers.max_pooling1d(bn_conv6, pool_size=3, strides=3)
return tf.expand_dims(pool_6, [3])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.