repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
chainer
|
chainer-master/tests/chainer_tests/utils_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/utils_tests/test_walker_alias.py
|
import unittest
import numpy
from chainer import backend
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
from chainer import utils
class TestWalkerAlias(unittest.TestCase):
def setUp(self):
self.ps = numpy.array([5, 3, 4, 1, 2], dtype=numpy.int32)
self.sampler = utils.WalkerAlias(self.ps)
def check_sample(self):
counts = numpy.zeros(len(self.ps), numpy.float32)
for _ in range(1000):
vs = self.sampler.sample((4, 3))
numpy.add.at(counts, cuda.to_cpu(vs), 1)
counts /= (1000 * 12)
counts *= sum(self.ps)
testing.assert_allclose(self.ps, counts, atol=0.1, rtol=0.1)
def test_sample_cpu(self):
assert not self.sampler.use_gpu
self.check_sample()
@attr.gpu
def test_sample_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.sampler.to_gpu()
assert self.sampler.use_gpu
assert isinstance(self.sampler._device.device, cuda.Device)
self.check_sample()
@attr.gpu
def test_to_cpu(self):
with testing.assert_warns(DeprecationWarning):
self.sampler.to_gpu()
with testing.assert_warns(DeprecationWarning):
self.sampler.to_cpu()
assert not self.sampler.use_gpu
assert isinstance(
self.sampler._device, backend.CpuDevice)
self.check_sample()
testing.run_module(__name__, __file__)
| 1,483
| 27.538462
| 68
|
py
|
chainer
|
chainer-master/tests/chainer_tests/utils_tests/test_experimental.py
|
import unittest
import warnings
import chainer
from chainer import testing
from chainer import utils
def f():
utils.experimental('f')
class C(object):
@staticmethod
def static_method():
utils.experimental('static_method')
@classmethod
def class_method(cls):
utils.experimental('C.class_method')
def __init__(self):
utils.experimental('C')
def f(self):
utils.experimental('C.f')
class TestExperimental(unittest.TestCase):
def setUp(self):
self.original = chainer.disable_experimental_feature_warning
chainer.disable_experimental_feature_warning = False
def tearDown(self):
chainer.disable_experimental_feature_warning = self.original
def test_experimental_with_api_name(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
f()
self.assertEqual(len(w), 1)
self.assertIs(w[0].category, FutureWarning)
self.assertIn('f is experimental.', str(w[0].message))
def test_experimental_with_no_api_name_2(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
C()
self.assertEqual(len(w), 1)
self.assertIs(w[0].category, FutureWarning)
self.assertIn('C is experimental.', str(w[0].message))
def test_experimental_with_no_api_name_3(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
c = C()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
c.f()
self.assertEqual(len(w), 1)
self.assertIs(w[0].category, FutureWarning)
self.assertIn('C.f is experimental.', str(w[0].message))
def test_experimental_static_method(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
C.static_method()
self.assertEqual(len(w), 1)
self.assertIs(w[0].category, FutureWarning)
self.assertIn('static_method is experimental.',
str(w[0].message))
def test_experimental_class_method(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
C.class_method()
self.assertEqual(len(w), 1)
self.assertIs(w[0].category, FutureWarning)
self.assertIn('C.class_method is experimental.',
str(w[0].message))
class TestDisableExperimentalWarning(unittest.TestCase):
def setUp(self):
self.original = chainer.disable_experimental_feature_warning
chainer.disable_experimental_feature_warning = True
def tearDown(self):
chainer.disable_experimental_feature_warning = self.original
def test_experimental(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
f()
self.assertEqual(len(w), 0)
testing.run_module(__name__, __file__)
| 3,064
| 27.119266
| 68
|
py
|
chainer
|
chainer-master/tests/chainer_tests/utils_tests/test_argument.py
|
import unittest
import six
from chainer import testing
from chainer.utils.argument import parse_kwargs
class TestArgument(unittest.TestCase):
def test_parse_kwargs(self):
def test(**kwargs):
return parse_kwargs(kwargs, ('foo', 1), ('bar', 2))
self.assertEqual(test(), (1, 2))
self.assertEqual(test(bar=1, foo=2), (2, 1))
re = r'test\(\) got unexpected keyword argument\(s\) \'ham\', \'spam\''
with six.assertRaisesRegex(self, TypeError, re):
test(spam=1, ham=2)
testing.run_module(__name__, __file__)
| 579
| 22.2
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/utils_tests/test_utils.py
|
import unittest
import numpy
from chainer import testing
from chainer import utils
@testing.parameterize(*testing.product({
'dtype': [None, numpy.float16, numpy.float32, numpy.float64],
}))
class TestForceArray(unittest.TestCase):
def test_scalar(self):
x = utils.force_array(numpy.float32(1), dtype=self.dtype)
self.assertIsInstance(x, numpy.ndarray)
if self.dtype is None:
self.assertEqual(x.dtype, numpy.float32)
else:
self.assertEqual(x.dtype, self.dtype)
def test_0dim_array(self):
x = utils.force_array(numpy.array(1, numpy.float32), dtype=self.dtype)
self.assertIsInstance(x, numpy.ndarray)
if self.dtype is None:
self.assertEqual(x.dtype, numpy.float32)
else:
self.assertEqual(x.dtype, self.dtype)
def test_array(self):
x = utils.force_array(numpy.array([1], numpy.float32),
dtype=self.dtype)
self.assertIsInstance(x, numpy.ndarray)
if self.dtype is None:
self.assertEqual(x.dtype, numpy.float32)
else:
self.assertEqual(x.dtype, self.dtype)
class TestForceType(unittest.TestCase):
def test_force_type_scalar(self):
x = numpy.int32(1)
y = utils.force_type(numpy.dtype(numpy.float32), x)
self.assertEqual(y.dtype, numpy.float32)
def test_force_type_array(self):
x = numpy.array([1], dtype=numpy.int32)
y = utils.force_type(numpy.dtype(numpy.float32), x)
self.assertEqual(y.dtype, numpy.float32)
def test_force_type_array_no_change(self):
x = numpy.array([1], dtype=numpy.float32)
y = utils.force_type(numpy.dtype(numpy.float32), x)
self.assertEqual(y.dtype, numpy.float32)
testing.run_module(__name__, __file__)
| 1,830
| 30.033898
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/utils_tests/test_conv.py
|
import unittest
import numpy
from six import moves
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
from chainer.utils import conv
class TestConv(unittest.TestCase):
def check_conv_outsize(self, size, k, s, p, d):
# When cover_all == False, `outsize` is the maximum integer that
# satisfies "(outsize - 1) * s + k <= w"
w = size + p * 2
dk = k + (k - 1) * (d - 1)
outsize = conv.get_conv_outsize(size, k, s, p, cover_all=False, d=d)
self.assertTrue((outsize - 1) * s + dk <= w < outsize * s + dk)
def check_conv_outsize_cover_all(self, size, k, s, p, d):
# When cover_all == True, `outsize` is the minimum integer that
# satisfies "w <= (outsize - 1) * s + k"
w = size + p * 2
dk = k + (k - 1) * (d - 1)
outsize = conv.get_conv_outsize(size, k, s, p, cover_all=True, d=d)
self.assertTrue((outsize - 2) * s + dk < w <= (outsize - 1) * s + dk)
def test_conv_outsize1(self):
self.check_conv_outsize(10, 4, 3, 2, 1)
def test_conv_outsize2(self):
self.check_conv_outsize(10, 4, 4, 2, 1)
def test_conv_outsize3(self):
self.check_conv_outsize(10, 4, 3, 2, 2)
def test_conv_outsize_cover_all1(self):
self.check_conv_outsize_cover_all(10, 4, 3, 2, 1)
def test_conv_outsize_cover_all2(self):
self.check_conv_outsize_cover_all(10, 4, 4, 2, 1)
def test_conv_outsize_cover_all3(self):
self.check_conv_outsize_cover_all(10, 4, 3, 2, 2)
@testing.parameterize(*testing.product({
'params': [
(1, 1, 1, 1, 1, 1, 1, 1),
(2, 2, 2, 2, 2, 2, 2, 2),
(1, 2, 2, 1, 1, 2, 1, 1),
(1, 2, 3, 4, 1, 2, 1, 1),
(1, 2, 3, 4, 4, 5, 2, 3),
(3, 3, 2, 2, 1, 1, 1, 1),
],
}))
class TestIm2Col(unittest.TestCase):
def setUp(self):
self.dtype = numpy.float32
self.w = 10
self.h = 8
shape = (2, 3, self.h, self.w)
self.img = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
def check_im2col(self, kh, kw, sy, sx, ph, pw, dy, dx, gpu):
if gpu:
img = cuda.to_gpu(self.img)
else:
img = self.img
col = conv.im2col(img, kh, kw, sy, sx, ph, pw, dy=dy, dx=dx)
col_h = conv.get_conv_outsize(self.h, kh, sy, ph, d=dy)
col_w = conv.get_conv_outsize(self.w, kw, sx, pw, d=dx)
self.assertEqual(col.shape, (2, 3, kh, kw, col_h, col_w))
col = cuda.to_cpu(col)
for y in moves.range(col_h):
for x in moves.range(col_w):
for ky in moves.range(kh):
for kx in moves.range(kw):
oy = y * sy - ph + ky * dy
ox = x * sx - pw + kx * dx
if 0 <= oy < self.h and 0 <= ox < self.w:
testing.assert_allclose(
col[:, :, ky, kx, y, x],
self.img[:, :, oy, ox])
else:
testing.assert_allclose(
col[:, :, ky, kx, y, x],
numpy.zeros((2, 3), self.dtype))
def test_im2col_cpu(self):
self.check_im2col(*self.params, gpu=False)
@attr.gpu
def test_im2col_gpu(self):
self.check_im2col(*self.params, gpu=True)
@testing.parameterize(*testing.product({
'params': [
(1, 1, 1, 1, 1, 1, 1, 1),
(2, 2, 2, 2, 2, 2, 2, 2),
(1, 2, 2, 1, 1, 2, 1, 1),
(1, 2, 3, 4, 1, 2, 1, 1),
(1, 2, 3, 4, 4, 5, 2, 3),
(3, 3, 2, 2, 1, 1, 1, 1),
],
}))
class TestCol2Im(unittest.TestCase):
def setUp(self):
self.dtype = numpy.float32
self.w = 10
self.h = 8
def check_col2im(self, kh, kw, sy, sx, ph, pw, dy, dx, gpu):
col_h = conv.get_conv_outsize(self.h, kh, sy, ph, d=dy)
col_w = conv.get_conv_outsize(self.w, kw, sx, pw, d=dx)
shape = (2, 3, kh, kw, col_h, col_w)
col = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
if gpu:
col_data = cuda.to_gpu(col)
else:
col_data = col
img = conv.col2im(
col_data, sy, sx, ph, pw, self.h, self.w, dy=dy, dx=dx)
img = cuda.to_cpu(img)
self.assertEqual(img.shape, (2, 3, self.h, self.w))
for y in moves.range(self.h):
for x in moves.range(self.w):
v = numpy.zeros((2, 3), self.dtype)
for ky in moves.range(kh):
for kx in moves.range(kw):
oy = (y + ph - ky * dy) // sy
ox = (x + pw - kx * dx) // sx
if ((y + ph - ky * dy) % sy == 0 and
(x + pw - kx * dx) % sx == 0 and
0 <= oy < col_h and 0 <= ox < col_w):
v += col[:, :, ky, kx, oy, ox]
testing.assert_allclose(img[:, :, y, x], v)
def test_col2im_cpu(self):
self.check_col2im(*self.params, gpu=False)
@attr.gpu
def test_col2im_gpu(self):
self.check_col2im(*self.params, gpu=True)
testing.run_module(__name__, __file__)
| 5,281
| 32.43038
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/activation_tests/test_selu.py
|
import random
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestSELU(testing.FunctionTestCase):
dodge_nondifferentiable = True
def setUp(self):
self.alpha = random.random()
self.scale = random.random()
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 5e-4, 'rtol': 5e-3})
self.check_backward_options.update({'atol': 5e-4, 'rtol': 5e-3})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.selu(x, alpha=self.alpha, scale=self.scale),
def forward_expected(self, inputs):
x, = inputs
expected = numpy.where(x >= 0, x, self.alpha * (numpy.exp(x) - 1))
expected *= self.scale
return expected.astype(x.dtype),
testing.run_module(__name__, __file__)
| 1,518
| 25.189655
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/activation_tests/test_leaky_relu.py
|
import random
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'slope': ['random', 0.0],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestLeakyReLU(testing.FunctionTestCase):
def setUp(self):
if self.slope == 'random':
self.slope = random.random()
self.check_forward_options = {}
self.check_backward_options = {}
self.check_double_backward_options = {}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.leaky_relu(x, slope=self.slope),
def forward_expected(self, inputs):
x, = inputs
expected = numpy.where(x >= 0, x, x * self.slope)
return expected.astype(self.dtype),
testing.run_module(__name__, __file__)
| 1,690
| 26.721311
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/activation_tests/test_prelu.py
|
import numpy
import chainer
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(4, 3, 2), (1,), (1, 2, 3, 4, 5, 6)],
'Wdim': [0, 1, 3],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@chainer.testing.backend.inject_backend_tests(
None,
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestPReLU(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options.update({'atol': 5e-4, 'rtol': 5e-3})
self.check_double_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
if self.dtype == numpy.float16:
self.check_double_backward_options.update(
{'atol': 5e-3, 'rtol': 5e-2})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
x[(-0.05 < x) & (x < 0.05)] = 0.5
W = numpy.random.uniform(
-1, 1, self.shape[1:1 + self.Wdim]).astype(self.dtype)
return x, W
def forward_expected(self, inputs):
x, W = inputs
y_expect = x.copy()
masked = numpy.ma.masked_greater_equal(y_expect, 0, copy=False)
shape = (1,) + W.shape + (1,) * (x.ndim - W.ndim - 1)
masked *= W.reshape(shape)
return y_expect,
def forward(self, inputs, device):
x, W = inputs
y = functions.prelu(x, W)
return y,
testing.run_module(__name__, __file__)
| 1,845
| 29.262295
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/activation_tests/test_hard_sigmoid.py
|
import numpy
from chainer import functions
from chainer import testing
from chainer import utils
def _hard_sigmoid(x):
return (x * 0.2 + 0.5).clip(0, 1)
@testing.parameterize(*testing.product({
'shape': [(3, 4), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{}
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestHardSigmoid(testing.FunctionTestCase):
dodge_nondifferentiable = True
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.hard_sigmoid(x),
def forward_expected(self, inputs):
x, = inputs
y = utils.force_array(_hard_sigmoid(x), self.dtype)
return y,
testing.run_module(__name__, __file__)
| 1,417
| 23.033898
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/activation_tests/test_tanh.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer import utils
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'contiguous': [None, 'C'],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'}
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ('never', 'always'),
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1']
})
)
class TestTanh(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
def generate_inputs(self):
x = numpy.random.uniform(-.5, .5, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.tanh(x),
def forward_expected(self, inputs):
x, = inputs
return utils.force_array(numpy.tanh(x)),
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestTanhCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expect = chainer.should_use_cudnn('==always')
def forward(self):
x = chainer.Variable(self.x)
return functions.tanh(x)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
default_func = cuda.cupy.cudnn.activation_forward
with testing.patch('cupy.cudnn.activation_forward') as func:
func.side_effect = default_func
self.forward()
self.assertEqual(func.called, self.expect)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
y.grad = self.gy
default_func = cuda.cupy.cudnn.activation_backward
with testing.patch('cupy.cudnn.activation_backward') as func:
func.side_effect = default_func
y.backward()
self.assertEqual(func.called, self.expect)
testing.run_module(__name__, __file__)
| 2,832
| 28.821053
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/activation_tests/test_log_softmax.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product_dict(
testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}),
testing.product({
'shape': [None, (2, 3), (2, 2, 3), (2, 2, 2, 3)],
'axis': [1],
}) + [
{'shape': (2, 3), 'axis': 0},
{'shape': (2, 2, 3), 'axis': -1},
{'shape': (2, 2, 2, 3), 'axis': -4},
],
))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestLogSoftmax(testing.FunctionTestCase):
def setUp(self):
self.check_forward_options = {}
self.check_backward_options = {}
self.check_double_backward_options = {}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.check_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.check_double_backward_options = {'atol': 1e-2, 'rtol': 1e-1}
def generate_inputs(self):
if self.shape is None:
# For checking numerical stability
value = -5 if self.dtype == numpy.float16 else -1000
x = numpy.array([[value, 1]], dtype=self.dtype)
else:
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.log_softmax(x, axis=self.axis),
def forward_expected(self, inputs):
x, = inputs
log_z = numpy.ufunc.reduce(
numpy.logaddexp, x, axis=self.axis, keepdims=True)
y_expect = x - log_z
return y_expect,
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestLogSoftmaxCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expect = chainer.should_use_cudnn('>=auto')
def forward(self):
x = chainer.Variable(self.x)
return functions.log_softmax(x)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cudnn.softmax_forward') as func:
self.forward()
self.assertEqual(func.called, self.expect)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
y.grad = self.gy
with testing.patch('cupy.cudnn.softmax_backward') as func:
y.backward()
self.assertEqual(func.called, self.expect)
testing.run_module(__name__, __file__)
| 3,333
| 29.587156
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/activation_tests/test_rrelu.py
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
@testing.parameterize(*testing.product({
'train': [True, False],
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestRReLU(testing.FunctionTestCase):
def setUp(self):
# Assumption l < u
self.l = numpy.random.uniform(0, 1)
self.u = numpy.random.uniform(0, 1)
if self.l >= self.u:
self.l, self.u = self.u, self.l
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_double_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
# cast self.r later because check_backward casts only x
self.r = numpy.random.uniform(self.l, self.u, self.shape)
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
if self.test_name in ('test_backward', 'test_double_backward'):
x[(-0.05 < x) & (x < 0.05)] = 0.5
return x,
def forward(self, inputs, device):
x, = inputs
r = self.r.astype(x.dtype)
r = device.send(r)
with chainer.using_config('train', self.train):
y = functions.rrelu(x, l=self.l, u=self.u, r=r)
return y,
def forward_expected(self, inputs):
x, = inputs
r = self.r.astype(self.dtype)
if self.train:
expected = numpy.where(x >= 0, x, x * r)
else:
r_test = numpy.mean([self.l, self.u]).astype(self.dtype)
expected = numpy.where(x >= 0, x, x * r_test)
return expected,
@testing.parameterize(*testing.product({
'specify_r': [True, False],
'return_r': [True, False],
'train': [True, False],
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestRReLUR(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
# Assumption l < u
self.l = numpy.random.uniform(0, 1)
self.u = numpy.random.uniform(0, 1)
if self.l >= self.u:
self.l, self.u = self.u, self.l
self.r = numpy.random.uniform(
self.l, self.u, self.x.shape).astype(self.x.dtype)
def _check(self):
r = self.r if self.specify_r else None
return_r = self.return_r
with chainer.using_config('train', self.train):
out = functions.rrelu(
self.x, self.l, self.u, r=r, return_r=return_r)
if not return_r:
return
out, out_r = out
assert isinstance(out_r, type(out.array))
if r is None:
assert out_r.shape == out.array.shape
else:
if self.train:
assert out_r is r
def test_cpu(self):
with chainer.using_config('use_ideep', 'never'):
self._check()
@attr.gpu
def test_gpu(self):
self.x = cuda.to_gpu(self.x)
self.r = cuda.to_gpu(self.r)
self._check()
testing.run_module(__name__, __file__)
| 3,598
| 27.792
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/activation_tests/test_crelu.py
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product_dict(
[
{'shape': (5, 4), 'y_shape': (10, 4), 'axis': 0},
{'shape': (5, 4), 'y_shape': (5, 8), 'axis': 1},
{'shape': (5, 4), 'y_shape': (5, 8), 'axis': -1},
{'shape': (5, 4), 'y_shape': (10, 4), 'axis': -2},
{'shape': (5, 4, 3, 2), 'y_shape': (10, 4, 3, 2), 'axis': 0},
{'shape': (5, 4, 3, 2), 'y_shape': (5, 8, 3, 2), 'axis': 1},
{'shape': (5, 4, 3, 2), 'y_shape': (5, 4, 6, 2), 'axis': 2},
{'shape': (5, 4, 3, 2), 'y_shape': (5, 4, 3, 4), 'axis': 3},
{'shape': (5, 4, 3, 2), 'y_shape': (5, 4, 3, 4), 'axis': -1},
{'shape': (5, 4, 3, 2), 'y_shape': (5, 4, 6, 2), 'axis': -2},
{'shape': (5, 4, 3, 2), 'y_shape': (5, 8, 3, 2), 'axis': -3},
{'shape': (5, 4, 3, 2), 'y_shape': (10, 4, 3, 2), 'axis': -4},
], [
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
]
))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestCReLU(testing.FunctionTestCase):
dodge_nondifferentiable = True
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.crelu(x, axis=self.axis),
def forward_expected(self, inputs):
x, = inputs
expected_former = numpy.maximum(x, 0)
expected_latter = numpy.maximum(-x, 0)
expected = numpy.concatenate(
(expected_former, expected_latter), axis=self.axis)
assert expected.shape == self.y_shape
return expected,
testing.run_module(__name__, __file__)
| 2,144
| 30.544118
| 70
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/activation_tests/test_softplus.py
|
import numpy
from chainer import functions
from chainer import testing
from chainer import utils
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestSoftplus(testing.FunctionTestCase):
def setUp(self):
self.beta = numpy.random.uniform(1, 2, ())
self.check_forward_options = {}
self.check_backward_options = {}
self.check_double_backward_options = {}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.check_double_backward_options = {'atol': 5e-2, 'rtol': 5e-1}
def generate_inputs(self):
x = numpy.random.uniform(-.5, .5, self.shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
y = numpy.log(1 + numpy.exp(self.beta * x)) / self.beta
return utils.force_array(y).astype(self.dtype),
def forward(self, inputs, device):
x, = inputs
return functions.softplus(x, beta=self.beta),
testing.run_module(__name__, __file__)
| 1,554
| 26.280702
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/activation_tests/test_swish.py
|
import numpy
from chainer import functions
from chainer import testing
def _sigmoid(x):
half = x.dtype.type(0.5)
return numpy.tanh(x * half) * half + half
def _broadcast_to(array, shape):
if hasattr(numpy, 'broadcast_to'):
return numpy.broadcast_to(array, shape)
dummy = numpy.empty(shape, array.dtype)
return numpy.broadcast_arrays(array, dummy)[0]
@testing.parameterize(*testing.product_dict(
[
{'x_shape': (4, 3, 2), 'beta_shape': (3,),
'extended_beta_shape': (1, 3, 1)},
{'x_shape': (4, 3, 2), 'beta_shape': (3, 2),
'extended_beta_shape': (1, 3, 2)},
], [
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
class TestSwish(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_backward_options.update({'atol': 5e-4, 'rtol': 5e-3})
self.check_double_backward_options.update(
{'atol': 5e-3, 'rtol': 5e-2})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
beta = numpy.random.uniform(
-1, 1, self.beta_shape).astype(self.dtype)
return x, beta
def forward_expected(self, inputs):
x, beta = inputs
beta = _broadcast_to(beta.reshape(self.extended_beta_shape),
x.shape)
y = x * _sigmoid(beta * x)
return y,
def forward(self, inputs, device):
x, beta = inputs
return functions.swish(x, beta=beta),
testing.run_module(__name__, __file__)
| 2,087
| 26.473684
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/activation_tests/test_clipped_relu.py
|
import unittest
import mock
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer import utils
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
[
# CPU tests
{},
# GPU tests
{'use_cuda': True, 'use_cudnn': 'never'},
{'use_cuda': True, 'use_cudnn': 'always'},
# ChainerX tests
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
])
class TestClippedReLU(testing.FunctionTestCase):
dodge_nondifferentiable = True
z = 0.75
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.clipped_relu(x, self.z)
return y,
def forward_expected(self, inputs):
x, = inputs
y = utils.force_array(x.clip(0, self.z), x.dtype)
return y,
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestClippedReLUCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
self.z = 0.75
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expect = chainer.should_use_cudnn('==always')
def forward(self):
x = chainer.Variable(self.x)
return functions.clipped_relu(x, self.z)
def test_call_cudnn_forward(self):
default_func = cuda.cupy.cudnn.activation_forward
with chainer.using_config('use_cudnn', self.use_cudnn):
with mock.patch('cupy.cudnn.activation_forward') as func:
func.side_effect = default_func
self.forward()
self.assertEqual(func.called, self.expect)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
y.grad = self.gy
default_func = cuda.cupy.cudnn.activation_backward
with mock.patch('cupy.cudnn.activation_backward') as func:
func.side_effect = default_func
y.backward()
self.assertEqual(func.called, self.expect)
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
[
# CPU tests
{},
# GPU tests
{'use_cuda': True, 'use_cudnn': 'never'},
{'use_cuda': True, 'use_cudnn': 'always'},
# ChainerX tests
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
])
class TestReLU6(testing.FunctionTestCase):
dodge_nondifferentiable = True
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.relu6(x)
return y,
def forward_expected(self, inputs):
x, = inputs
y = utils.force_array(x.clip(0, 6.0), x.dtype)
return y,
testing.run_module(__name__, __file__)
| 3,653
| 28
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/activation_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/activation_tests/test_elu.py
|
import random
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'alpha_range': [(-2.0, 0.0), 0.0, (0.0, 2.0)],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestELU(testing.FunctionTestCase):
def setUp(self):
if isinstance(self.alpha_range, tuple):
l, u = self.alpha_range
self.alpha = random.uniform(l, u)
else:
self.alpha = self.alpha_range
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 5e-4, 'rtol': 5e-3})
self.check_backward_options.update({'atol': 5e-4, 'rtol': 5e-3})
self.check_double_backward_options.update(
{'atol': 5e-4, 'rtol': 5e-3})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
if self.test_name in ('test_backward', 'test_double_backward'):
x[(-0.01 < x) & (x < 0.01)] = 0.5
return x,
def forward(self, inputs, device):
x, = inputs
return functions.elu(x, alpha=self.alpha),
def forward_expected(self, inputs):
x, = inputs
expected = x.astype(numpy.float64, copy=True)
for i in numpy.ndindex(x.shape):
if x[i] < 0:
expected[i] = self.alpha * numpy.expm1(expected[i])
return expected.astype(x.dtype),
testing.run_module(__name__, __file__)
| 1,927
| 27.776119
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/activation_tests/test_relu.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'contiguous': [None, 'C'],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestReLU(testing.FunctionTestCase):
dodge_nondifferentiable = True
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.relu(x),
def forward_expected(self, inputs):
x, = inputs
expected = x.copy()
expected[expected < 0] = 0
return expected,
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestReLUCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expect = chainer.should_use_cudnn('>=auto')
def forward(self):
x = chainer.Variable(self.x)
return functions.relu(x)
def test_call_cudnn_forward(self):
default_func = cuda.cupy.cudnn.activation_forward
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cudnn.activation_forward') as func:
func.side_effect = default_func
self.forward()
self.assertEqual(func.called, self.expect)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
y.grad = self.gy
default_func = cuda.cupy.cudnn.activation_backward
with testing.patch('cupy.cudnn.activation_backward') as func:
func.side_effect = default_func
y.backward()
self.assertEqual(func.called, self.expect)
testing.run_module(__name__, __file__)
| 2,738
| 28.451613
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/activation_tests/test_sigmoid.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer import utils
def _sigmoid(x):
half = x.dtype.type(0.5)
return numpy.tanh(x * half) * half + half
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'contiguous': [None, 'C'],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': True},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestSigmoid(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'atol': 1e-2, 'rtol': 5e-2}
self.check_double_backward_options = {'atol': 1e-2, 'rtol': 5e-2}
def generate_inputs(self):
x = numpy.random.uniform(-.5, .5, self.shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
y = _sigmoid(x)
y = utils.force_array(y)
return y,
def forward(self, inputs, device):
x, = inputs
return functions.sigmoid(x),
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestSigmoidCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expect = chainer.should_use_cudnn('==always')
def forward(self):
x = chainer.Variable(self.x)
return functions.sigmoid(x)
def test_call_cudnn_forward(self):
default_func = cuda.cupy.cudnn.activation_forward
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cudnn.activation_forward') as func:
func.side_effect = default_func
self.forward()
self.assertEqual(func.called, self.expect)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
y.grad = self.gy
default_func = cuda.cupy.cudnn.activation_backward
with testing.patch('cupy.cudnn.activation_backward') as func:
func.side_effect = default_func
y.backward()
self.assertEqual(func.called, self.expect)
testing.run_module(__name__, __file__)
| 3,029
| 28.417476
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/activation_tests/test_maxout.py
|
import unittest
import numpy
import chainer
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
def _maxout(x, pool_size, axis):
shape = (x.shape[:axis] + (x.shape[axis] // pool_size, pool_size) +
x.shape[axis + 1:])
x = x.reshape(shape)
return x.max(axis=axis + 1)
@testing.parameterize(*testing.product_dict(
[
{'x_shape': (7, 12), 'pool_size': 2, 'axis': 1, 'y_shape': (7, 6)},
{'x_shape': (7, 12), 'pool_size': 12, 'axis': 1, 'y_shape': (7, 1)},
{'x_shape': (7, 3, 4), 'pool_size': 7, 'axis': 0,
'y_shape': (1, 3, 4)},
{'x_shape': (7, 3, 4), 'pool_size': 3, 'axis': 1,
'y_shape': (7, 1, 4)},
{'x_shape': (7, 3, 4), 'pool_size': 4, 'axis': 2,
'y_shape': (7, 3, 1)},
{'x_shape': (7, 2, 3, 4), 'pool_size': 2, 'axis': 3,
'y_shape': (7, 2, 3, 2)},
], [
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
]
))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestNonparameterizedMaxout(testing.FunctionTestCase):
dodge_nondifferentiable = True
def generate_inputs(self):
x_size = numpy.prod(self.x_shape)
x = numpy.random.permutation(numpy.arange(x_size))\
.reshape(self.x_shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.maxout(x, self.pool_size, self.axis),
def forward_expected(self, inputs):
x, = inputs
expected = _maxout(x, self.pool_size, self.axis)
assert expected.shape == self.y_shape
return expected,
@testing.parameterize(
{'x_shape': (2, 3, 4), 'pool_size': 5, 'error': type_check.InvalidType},
{'x_shape': (2, 3, 4), 'pool_size': -1, 'error': ValueError}
)
class InvalidArgument(unittest.TestCase):
def setUp(self):
self.x = chainer.Variable(
numpy.random.uniform(-1, 1, self.x_shape).astype(numpy.float32))
def test_invalid_shape_cpu(self):
with self.assertRaises(self.error):
functions.maxout(self.x, self.pool_size)
@attr.gpu
def test_invalid_shape_gpu(self):
self.x.to_gpu()
with self.assertRaises(self.error):
functions.maxout(self.x, self.pool_size)
testing.run_module(__name__, __file__)
| 2,835
| 27.938776
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/activation_tests/test_softmax.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product_dict(
[
{'shape': None, 'axis': 1},
{'shape': (5,), 'axis': 0},
{'shape': (2, 3), 'axis': 0},
{'shape': (2, 3), 'axis': 1},
{'shape': (2, 3, 4), 'axis': 0},
{'shape': (2, 3, 4), 'axis': -1},
{'shape': (2, 3, 2, 3), 'axis': -3},
{'shape': (2, 3, 2, 3), 'axis': 3},
],
testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}),
))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestSoftmax(testing.FunctionTestCase):
dodge_nondifferentiable = True
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 1e-3, 'rtol': 1e-2})
self.check_backward_options.update({'atol': 1e-3, 'rtol': 1e-2})
self.check_double_backward_options \
.update({'atol': 1e-3, 'rtol': 1e-2})
def generate_inputs(self):
if self.shape is None:
# For checking numerical stability
value = -5 if self.dtype == numpy.float16 else -1000
x = numpy.array([[value, 1]], dtype=self.dtype)
else:
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.softmax(x, axis=self.axis),
def forward_expected(self, inputs):
x, = inputs
expected = numpy.exp(x)
expected = numpy.rollaxis(expected, self.axis, expected.ndim)
for i in numpy.ndindex(expected.shape[:-1]):
expected[i] /= expected[i].sum()
expected = numpy.rollaxis(expected, expected.ndim-1, self.axis)
return expected.astype(x.dtype),
@testing.parameterize(*testing.product({
'axis': [0],
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestSoftmaxCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expect = chainer.should_use_cudnn('>=auto')
def forward(self):
x = chainer.Variable(self.x)
return functions.softmax(x, axis=self.axis)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cudnn.softmax_forward') as func:
self.forward()
self.assertEqual(func.called, self.expect)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
y.grad = self.gy
with testing.patch('cupy.cudnn.softmax_backward') as func:
y.backward()
self.assertEqual(func.called, self.expect)
testing.run_module(__name__, __file__)
| 3,610
| 30.675439
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/test_triplet.py
|
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'batchsize': [5, 10],
'input_dim': [2, 3],
'margin': [0.1, 0.5],
'reduce': ['mean', 'no']
}))
class TestTriplet(unittest.TestCase):
def setUp(self):
if self.dtype == numpy.float16:
eps = 1e-2
self.check_forward_options = {'rtol': 5e-3, 'atol': 5e-3}
self.check_backward_options = {
'eps': eps, 'rtol': 5e-2, 'atol': 5e-2}
self.check_double_backward_options = {
'eps': eps, 'rtol': 5e-2, 'atol': 5e-2}
elif self.dtype == numpy.float32:
eps = 1e-3
self.check_forward_options = {'rtol': 1e-4, 'atol': 1e-4}
self.check_backward_options = {
'eps': eps, 'rtol': 5e-4, 'atol': 5e-4}
self.check_double_backward_options = {
'eps': eps, 'rtol': 1e-3, 'atol': 1e-3}
elif self.dtype == numpy.float64:
eps = 1e-3
self.check_forward_options = {'rtol': 1e-4, 'atol': 1e-4}
self.check_backward_options = {
'eps': eps, 'rtol': 5e-4, 'atol': 5e-4}
self.check_double_backward_options = {
'eps': eps, 'rtol': 1e-3, 'atol': 1e-3}
else:
assert False
# Sample differentiable inputs
x_shape = (self.batchsize, self.input_dim)
while True:
self.a = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
self.p = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
self.n = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
if (abs(self.a - self.p) < 2 * eps).any():
continue
if (abs(self.a - self.n) < 2 * eps).any():
continue
dist = numpy.sum(
(self.a - self.p) ** 2 - (self.a - self.n) ** 2,
axis=1) + self.margin
# TODO(imanishi): Investigate whether this condition is enough
# to dodge non-differentialble points.
if (abs(dist) < 4 * eps).any():
continue
break
if self.reduce == 'mean':
gy_shape = ()
else:
gy_shape = self.batchsize,
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
self.gga = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
self.ggp = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
self.ggn = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
def check_forward(self, a_data, p_data, n_data):
a_val = chainer.Variable(a_data)
p_val = chainer.Variable(p_data)
n_val = chainer.Variable(n_data)
loss = functions.triplet(a_val, p_val, n_val, self.margin, self.reduce)
if self.reduce == 'mean':
self.assertEqual(loss.data.shape, ())
else:
self.assertEqual(loss.data.shape, (self.batchsize,))
self.assertEqual(loss.data.dtype, self.dtype)
loss_value = cuda.to_cpu(loss.data)
#
# Compute expected value
#
loss_expect = numpy.empty((self.a.shape[0],), dtype=self.dtype)
for i in six.moves.range(self.a.shape[0]):
ad, pd, nd = self.a[i], self.p[i], self.n[i]
dp = numpy.sum((ad - pd) ** 2)
dn = numpy.sum((ad - nd) ** 2)
loss_expect[i] = max((dp - dn + self.margin), 0)
if self.reduce == 'mean':
loss_expect = loss_expect.mean()
numpy.testing.assert_allclose(
loss_expect, loss_value, **self.check_forward_options)
def test_negative_margin(self):
self.margin = -1
self.assertRaises(ValueError, self.check_forward,
self.a, self.p, self.n)
self.assertRaises(ValueError, self.check_backward,
self.a, self.p, self.n, self.gy)
def test_forward_cpu(self):
self.check_forward(self.a, self.p, self.n)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.a), cuda.to_gpu(self.p),
cuda.to_gpu(self.n))
def check_backward(self, a_data, p_data, n_data, gy_data):
def f(a, p, n):
return functions.triplet(
a, p, n, margin=self.margin, reduce=self.reduce)
gradient_check.check_backward(
f, (a_data, p_data, n_data), gy_data, dtype=numpy.float64,
**self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.a, self.p, self.n, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.a), cuda.to_gpu(self.p),
cuda.to_gpu(self.n), cuda.to_gpu(self.gy))
def check_double_backward(self, a_data, p_data, n_data, gy_data, gga_data,
ggp_data, ggn_data):
def f(a, p, n):
return functions.triplet(
a, p, n, margin=self.margin, reduce=self.reduce)
gradient_check.check_double_backward(
f, (a_data, p_data, n_data), gy_data,
(gga_data, ggp_data, ggn_data),
dtype=numpy.float64, **self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(
self.a, self.p, self.n, self.gy, self.gga, self.ggp, self.ggn)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.a), cuda.to_gpu(self.p), cuda.to_gpu(self.n),
cuda.to_gpu(self.gy), cuda.to_gpu(self.gga), cuda.to_gpu(self.ggp),
cuda.to_gpu(self.ggn))
class TestContrastiveInvalidReductionOption(unittest.TestCase):
def setUp(self):
self.a = numpy.random.uniform(-1, 1, (5, 10)).astype(numpy.float32)
self.p = numpy.random.uniform(-1, 1, (5, 10)).astype(numpy.float32)
self.n = numpy.random.randint(-1, 1, (5, 10)).astype(numpy.float32)
def check_invalid_option(self, xp):
a = xp.asarray(self.a)
p = xp.asarray(self.p)
n = xp.asarray(self.n)
with self.assertRaises(ValueError):
functions.triplet(a, p, n, reduce='invalid_option')
def test_invalid_option_cpu(self):
self.check_invalid_option(numpy)
@attr.gpu
def test_invalid_option_gpu(self):
self.check_invalid_option(cuda.cupy)
testing.run_module(__name__, __file__)
| 6,748
| 36.082418
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/test_mean_squared_error.py
|
import unittest
import numpy
import chainer
from chainer import functions
from chainer import testing
from chainer import utils
from chainer.utils import type_check
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
@testing.parameterize(
{'dtype': numpy.float16},
{'dtype': numpy.float32},
)
class TestMeanSquaredError(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 1e-3, 'rtol': 1e-3})
self.check_backward_options.update({'atol': 5e-2, 'rtol': 5e-2})
self.check_double_backward_options.update(
{'atol': 5e-2, 'rtol': 5e-2})
def generate_inputs(self):
dtype = self.dtype
x0 = numpy.random.uniform(-1, 1, (4, 3)).astype(dtype)
x1 = numpy.random.uniform(-1, 1, (4, 3)).astype(dtype)
return x0, x1
def forward(self, inputs, device):
x0, x1 = inputs
loss = functions.mean_squared_error(x0, x1)
return loss,
def forward_expected(self, inputs):
x0, x1 = inputs
loss = 0.
for i in numpy.ndindex(x0.shape):
loss += (x0[i] - x1[i]) ** 2
loss /= x0.size
loss = utils.force_array(loss).astype(x0.dtype)
return loss,
class TestMeanSquaredErrorTypeCheck(unittest.TestCase):
def test_invalid_dtype1(self):
x0 = chainer.Variable(
numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.int32))
x1 = chainer.Variable(
numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.int32))
with self.assertRaises(type_check.InvalidType):
functions.mean_squared_error(x0, x1)
def test_invalid_dtype2(self):
x0 = chainer.Variable(
numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32))
x1 = chainer.Variable(
numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float16))
with self.assertRaises(type_check.InvalidType):
functions.mean_squared_error(x0, x1)
testing.run_module(__name__, __file__)
| 2,462
| 27.976471
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/test_mean_absolute_error.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer import utils
from chainer.testing import attr
from chainer.utils import type_check
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
@testing.parameterize(
{'dtype': numpy.float16},
{'dtype': numpy.float32},
)
class TestMeanAbsoluteError(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 1e-3, 'rtol': 1e-3})
self.check_backward_options.update({'atol': 5e-2, 'rtol': 5e-2})
self.check_double_backward_options.update(
{'atol': 5e-2, 'rtol': 5e-2})
def generate_inputs(self):
dtype = self.dtype
x0 = numpy.random.uniform(-1, 1, (4, 3)).astype(dtype)
diff = numpy.random.uniform(-1, 1, (4, 3)).astype(dtype)
diff[abs(diff) < 0.01] = 0.5
x1 = x0 + diff
return x0, x1
def forward(self, inputs, device):
x0, x1 = inputs
loss = functions.mean_absolute_error(x0, x1)
return loss,
def forward_expected(self, inputs):
x0, x1 = inputs
loss = 0.
for i in numpy.ndindex(x0.shape):
loss += numpy.abs(x0[i] - x1[i])
loss /= x0.size
loss = utils.force_array(loss).astype(x0.dtype)
return loss,
class TestMeanAbsoluteErrorTypeCheck(unittest.TestCase):
def test_invalid_dtype1(self):
x0 = chainer.Variable(
numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.int32))
x1 = chainer.Variable(
numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.int32))
with self.assertRaises(type_check.InvalidType):
functions.mean_absolute_error(x0, x1)
def test_invalid_dtype2(self):
x0 = chainer.Variable(
numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32))
x1 = chainer.Variable(
numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float16))
with self.assertRaises(type_check.InvalidType):
functions.mean_absolute_error(x0, x1)
# See chainer#6702.
class TestMeanAbsoluteErrorFP16Overflow(unittest.TestCase):
def check_fp16_overflow(self, xp):
x0 = chainer.Variable(xp.full(
shape=(64, 1, 16, 16), fill_value=2, dtype=xp.float16))
x1 = chainer.Variable(xp.full(
shape=(64, 1, 16, 16), fill_value=-2, dtype=xp.float16))
loss = functions.mean_absolute_error(x0, x1)
self.assertFalse(xp.isinf(loss.array))
def test_fp16_overflow_cpu(self):
self.check_fp16_overflow(numpy)
@attr.gpu
def test_fp16_overflow_gpu(self):
self.check_fp16_overflow(cuda.cupy)
testing.run_module(__name__, __file__)
| 3,213
| 28.759259
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/test_ctc.py
|
import math
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class CTCTestBase(object):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (4, 2, 3)).astype(self.dtype)
self.t = numpy.array([[0, 1], [1, 0]]).astype(numpy.int32)
self.l = numpy.array([[2, 0, 2, 1, 2],
[2, 1, 2, 0, 2]]).astype(numpy.int32)
self.blank_symbol = 2
self.x_length = numpy.full((len(self.x[0]),), len(self.x), dtype='i')
self.l_length = numpy.full((len(self.t),), len(self.t[0]), dtype='i')
self.use_length = True
if self.reduce == 'mean':
self.gy = numpy.random.uniform(-1, 1, ()).astype(self.dtype)
else:
self.gy = numpy.random.uniform(-1, 1, (2,)).astype(self.dtype)
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-2}
self.check_backward_options = {
'atol': 1e-3, 'dtype': numpy.float64}
else:
self.check_forward_options = {}
self.check_backward_options = {'atol': 1e-4}
# recursive forward computation.
def alpha(self, x, l, t, u):
if u < 0:
return 0.0
if t == 0:
if u == 0:
return x[0][self.blank_symbol]
elif u == 1:
return x[0][l[1]]
else:
return 0.0
elif l[u] == self.blank_symbol or l[u] == l[u - 2]:
return (x[t][l[u]] *
(self.alpha(x, l, t - 1, u - 1) +
self.alpha(x, l, t - 1, u)))
else:
return (x[t][l[u]] *
(self.alpha(x, l, t - 1, u - 2) +
self.alpha(x, l, t - 1, u - 1) +
self.alpha(x, l, t - 1, u)))
def check_forward(self, t_data, xs_data, l_length, x_length,
wrap_variable=True):
if wrap_variable:
x = tuple(chainer.Variable(x_data) for x_data in xs_data)
t = chainer.Variable(t_data)
else:
x = xs_data
t = t_data
args = (x, t, self.blank_symbol)
if self.use_length:
if wrap_variable:
args += (chainer.Variable(x_length),
chainer.Variable(l_length))
else:
args += (x_length, l_length)
loss = functions.connectionist_temporal_classification(
*args, reduce=self.reduce).data
# compute expected value by recursive computation.
xp = backend.get_array_module(self.x)
xt = xp.swapaxes(self.x, 0, 1)
for b in range(xt.shape[0]):
for t in range(xt.shape[1]):
xt[b][t] = numpy.exp(xt[b][t]) / numpy.sum(numpy.exp(xt[b][t]))
batch_size = xt.shape[0]
path_length = 2 * l_length + 1
loss_expect = xp.zeros((batch_size,), dtype=self.dtype)
for i in range(batch_size):
xtb, lb, xlb, plb = xt[i], self.l[i], x_length[i], path_length[i]
loss_expect[i] = -math.log(
self.alpha(xtb, lb, int(xlb - 1), int(plb - 1)) +
self.alpha(xtb, lb, int(xlb - 1), int(plb - 2)))
if self.reduce == 'mean':
loss_expect = xp.mean(loss_expect)
testing.assert_allclose(
loss_expect, loss, **self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(self.t, tuple(self.x),
self.l_length, self.x_length)
def test_forward_without_wrap_cpu(self):
self.check_forward(self.t, tuple(self.x),
self.l_length, self.x_length, wrap_variable=False)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.t),
tuple(cuda.to_gpu(x_data) for x_data in self.x),
cuda.to_gpu(self.l_length),
cuda.to_gpu(self.x_length))
@attr.gpu
def test_forward_without_wrap_gpu(self):
self.check_forward(cuda.to_gpu(self.t),
tuple(cuda.to_gpu(x_data) for x_data in self.x),
cuda.to_gpu(self.l_length),
cuda.to_gpu(self.x_length),
wrap_variable=False)
# expected value(via numerical differentiation) from t_data
def check_backward(self, t_data, xs_data, l_length, x_length, gy_data):
def f(input_length, label_length, t, *x):
return functions.connectionist_temporal_classification(
x, t, self.blank_symbol, x_length, l_length,
reduce=self.reduce)
gradient_check.check_backward(
f, (x_length, l_length, t_data) + xs_data, gy_data,
eps=1e-2, **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.t, tuple(self.x),
self.l_length, self.x_length,
self.gy)
@condition.retry(3)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.t),
tuple(cuda.to_gpu(x_data) for x_data in self.x),
cuda.to_gpu(self.l_length),
cuda.to_gpu(self.x_length),
cuda.to_gpu(self.gy))
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'reduce': ['mean', 'no'],
}))
class TestCTC(unittest.TestCase, CTCTestBase):
def setUp(self):
CTCTestBase.setUp(self)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'reduce': ['mean', 'no'],
}))
class TestCTCWithoutLength(unittest.TestCase, CTCTestBase):
def setUp(self):
CTCTestBase.setUp(self)
self.use_length = False
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'reduce': ['mean', 'no'],
}))
class TestCTCWithLabelPadding(unittest.TestCase, CTCTestBase):
def setUp(self):
CTCTestBase.setUp(self)
self.l_length[0] = 1
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'reduce': ['mean', 'no'],
}))
class TestCTCWithInputPadding(unittest.TestCase, CTCTestBase):
def setUp(self):
CTCTestBase.setUp(self)
self.x_length[0] = 3
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'reduce': ['mean', 'no'],
}))
class TestCTCWithAllPadding(unittest.TestCase, CTCTestBase):
def setUp(self):
CTCTestBase.setUp(self)
self.x_length[...] = 3
self.l_length[...] = 1
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'reduce': ['mean', 'no'],
}))
class TestCTCWithRepeatedLabel(unittest.TestCase, CTCTestBase):
def setUp(self):
CTCTestBase.setUp(self)
self.t = numpy.array([[0, 1, 1], [0, 1, 0]]).astype(numpy.int32)
self.l = numpy.array([[2, 0, 2, 1, 2, 1, 2],
[2, 0, 2, 1, 2, 0, 2]]).astype(numpy.int32)
self.l_length = numpy.full((len(self.t),), len(self.t[0]), dtype='i')
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'reduce': ['mean', 'no'],
}))
class TestCTCBlankSymbol(unittest.TestCase, CTCTestBase):
def setUp(self):
CTCTestBase.setUp(self)
self.x = numpy.random.uniform(-1, 1, (4, 2, 4)).astype(self.dtype)
self.l = numpy.array([[3, 0, 3, 1, 3],
[3, 1, 3, 0, 3]]).astype(numpy.int32)
self.blank_symbol = 3
class TestCTCUseNoBackpropMode(unittest.TestCase):
def test_no_backprop_mode(self):
xs_data = numpy.random.uniform(-1, 1, (4, 2, 3)).astype(numpy.float32)
t_data = numpy.array([[0, 1], [1, 0]]).astype(numpy.int32)
with chainer.no_backprop_mode():
x = [chainer.Variable(x_data) for x_data in xs_data]
t = chainer.Variable(t_data)
functions.connectionist_temporal_classification(x, t, 2)
class TestCTCError(unittest.TestCase):
def test_not_iterable(self):
x = chainer.Variable(numpy.zeros((4, 2, 3), numpy.float32))
t = chainer.Variable(numpy.zeros((2, 2), numpy.int32))
with self.assertRaises(TypeError):
functions.connectionist_temporal_classification(x, t, 0)
class TestCTCInvalidReductionOption(unittest.TestCase):
def test_not_iterable(self):
x = chainer.Variable(numpy.zeros((4, 2, 3), numpy.float32))
t = chainer.Variable(numpy.zeros((2, 2), numpy.int32))
with self.assertRaises(ValueError):
functions.connectionist_temporal_classification(
tuple(x), t, 0, reduce='invalid_option')
testing.run_module(__name__, __file__)
| 9,219
| 34.057034
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/test_vae.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions as F
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(
*testing.product({
'wrap_m': [True, False],
'wrap_v': [True, False],
'reduce': ['no', 'sum', 'mean']
})
)
class TestGaussianKLDivergence(unittest.TestCase):
def setUp(self):
self.mean = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
self.ln_var = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
# Refer to Appendix B in the original paper
# Auto-Encoding Variational Bayes (https://arxiv.org/abs/1312.6114)
loss = -(1 + self.ln_var -
self.mean * self.mean -
numpy.exp(self.ln_var)) * 0.5
if self.reduce == 'sum':
self.expect = numpy.sum(loss)
elif self.reduce == 'mean':
self.expect = numpy.mean(loss)
elif self.reduce == 'no':
self.expect = loss
def check_gaussian_kl_divergence(self, mean, ln_var):
if self.wrap_m:
mean = chainer.Variable(mean)
if self.wrap_v:
ln_var = chainer.Variable(ln_var)
actual = cuda.to_cpu(
F.gaussian_kl_divergence(mean, ln_var, self.reduce).data)
actual = cuda.to_cpu(
F.gaussian_kl_divergence(mean, ln_var, self.reduce).data)
testing.assert_allclose(self.expect, actual)
@condition.retry(3)
def test_gaussian_kl_divergence_cpu(self):
self.check_gaussian_kl_divergence(self.mean, self.ln_var)
@attr.gpu
@condition.retry(3)
def test_gaussian_kl_divergence_gpu(self):
self.check_gaussian_kl_divergence(cuda.to_gpu(self.mean),
cuda.to_gpu(self.ln_var))
class TestGaussianKLDivergenceInvalidReductionOption(unittest.TestCase):
def setUp(self):
self.mean = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
self.ln_var = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
def check_invalid_option(self, xp):
m = chainer.Variable(xp.asarray(self.mean))
v = chainer.Variable(xp.asarray(self.ln_var))
with self.assertRaises(ValueError):
F.gaussian_kl_divergence(m, v, 'invalid_option')
def test_invalid_option_cpu(self):
self.check_invalid_option(numpy)
@attr.gpu
def test_invalid_option_gpu(self):
self.check_invalid_option(cuda.cupy)
@testing.parameterize(
*testing.product({
'wrap_x': [True, False],
'wrap_y': [True, False],
'reduce': ['no', 'sum', 'mean']
})
)
class TestBernoulliNLL(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
self.y = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
# Refer to Appendix C.1 in the original paper
# Auto-Encoding Variational Bayes (https://arxiv.org/abs/1312.6114)
p = 1 / (1 + numpy.exp(-self.y))
self.expect = -(self.x * numpy.log(p) +
(1 - self.x) * numpy.log(1 - p))
if self.reduce == 'sum':
self.expect = numpy.sum(self.expect)
elif self.reduce == 'mean':
self.expect = numpy.mean(self.expect)
def check_bernoulli_nll(self, x, y):
if self.wrap_x:
x = chainer.Variable(x)
if self.wrap_y:
y = chainer.Variable(y)
actual = cuda.to_cpu(F.bernoulli_nll(x, y, self.reduce).data)
testing.assert_allclose(self.expect, actual)
@condition.retry(3)
def test_bernoulli_nll_cpu(self):
self.check_bernoulli_nll(self.x, self.y)
@attr.gpu
@condition.retry(3)
def test_bernoulli_nll_gpu(self):
self.check_bernoulli_nll(cuda.to_gpu(self.x),
cuda.to_gpu(self.y))
class TestBernoulliNLLInvalidReductionOption(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
self.y = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
def check_invalid_option(self, xp):
x = chainer.Variable(xp.asarray(self.x))
y = chainer.Variable(xp.asarray(self.y))
with self.assertRaises(ValueError):
F.bernoulli_nll(x, y, 'invalid_option')
def test_invalid_option_cpu(self):
self.check_invalid_option(numpy)
@attr.gpu
def test_invalid_option_gpu(self):
self.check_invalid_option(cuda.cupy)
@testing.parameterize(
*testing.product({
'wrap_x': [True, False],
'wrap_m': [True, False],
'wrap_v': [True, False],
'reduce': ['no', 'sum', 'mean']
})
)
class TestGaussianNLL(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
self.mean = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
self.ln_var = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
# Refer to Appendix C.2 in the original paper
# Auto-Encoding Variational Bayes (https://arxiv.org/abs/1312.6114)
x_d = self.x - self.mean
var = numpy.exp(self.ln_var)
self.expect = (0.5 * numpy.log(2 * numpy.pi) +
0.5 * self.ln_var +
x_d * x_d / var * 0.5)
if self.reduce == 'sum':
self.expect = numpy.sum(self.expect)
elif self.reduce == 'mean':
self.expect = numpy.mean(self.expect)
def check_gaussian_nll(self, x, mean, ln_var):
if self.wrap_x:
x = chainer.Variable(x)
if self.wrap_m:
mean = chainer.Variable(mean)
if self.wrap_v:
ln_var = chainer.Variable(ln_var)
actual = cuda.to_cpu(F.gaussian_nll(x, mean, ln_var, self.reduce).data)
testing.assert_allclose(self.expect, actual)
@condition.retry(3)
def test_gaussian_nll_cpu(self):
self.check_gaussian_nll(self.x, self.mean, self.ln_var)
@attr.gpu
@condition.retry(3)
def test_gaussian_nll_gpu(self):
self.check_gaussian_nll(cuda.to_gpu(self.x),
cuda.to_gpu(self.mean),
cuda.to_gpu(self.ln_var))
class TestGaussianNLLInvalidReductionOption(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
self.mean = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
self.ln_var = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
def check_invalid_option(self, xp):
x = chainer.Variable(xp.asarray(self.x))
mean = chainer.Variable(xp.asarray(self.mean))
ln_var = chainer.Variable(xp.asarray(self.ln_var))
with self.assertRaises(ValueError):
F.gaussian_nll(x, mean, ln_var, 'invalid_option')
def test_invalid_option_cpu(self):
self.check_invalid_option(numpy)
@attr.gpu
def test_invalid_option_gpu(self):
self.check_invalid_option(cuda.cupy)
testing.run_module(__name__, __file__)
| 7,213
| 32.398148
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/test_decov.py
|
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.testing import backend
def _decov(h):
h_mean = h.mean(axis=0)
N, M = h.shape
loss_expect = numpy.zeros((M, M), dtype=h.dtype)
for i in six.moves.range(M):
for j in six.moves.range(M):
if i != j:
for n in six.moves.range(N):
loss_expect[i, j] += (h[n, i] - h_mean[i]) * (
h[n, j] - h_mean[j])
return loss_expect / N
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'reduce': ['half_squared_sum', 'no'],
}))
@backend.inject_backend_tests(
None,
# CPU tests
[{}]
# GPU tests
+ [{
'use_cuda': True,
}]
)
class TestDeCov(testing.FunctionTestCase):
skip_double_backward_test = True
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options.update({'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({'atol': 3e-2, 'eps': 0.02})
else:
self.check_forward_options.update({'rtol': 1e-4, 'atol': 1e-4})
self.check_backward_options.update({'atol': 1e-3, 'eps': 0.02})
def generate_inputs(self):
h = numpy.random.uniform(-1, 1, (4, 3)).astype(self.dtype)
return h,
def forward_expected(self, inputs):
h, = inputs
loss_expect = _decov(h)
if self.reduce == 'half_squared_sum':
loss_expect = (loss_expect ** 2).sum() * 0.5
return chainer.utils.force_array(loss_expect, self.dtype),
def forward(self, inputs, device):
h, = inputs
loss = functions.decov(h, self.reduce)
return loss,
class TestDeconvInvalidReductionOption(unittest.TestCase):
def setUp(self):
self.h = numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32)
def check_invalid_option(self, xp):
h = xp.asarray(self.h)
with self.assertRaises(ValueError):
functions.decov(h, 'invalid_option')
def test_invalid_option_cpu(self):
self.check_invalid_option(numpy)
@attr.gpu
def test_invalid_option_gpu(self):
self.check_invalid_option(cuda.cupy)
testing.run_module(__name__, __file__)
| 2,401
| 25.988764
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/test_squared_error.py
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'in_shape': [(5, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'function': [functions.squared_error, functions.squared_difference],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestSquaredError(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 1e-3, 'rtol': 1e-2})
self.check_backward_options.update({'atol': 1e-3, 'rtol': 1e-2})
self.check_double_backward_options \
.update({'atol': 1e-3, 'rtol': 1e-2})
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, self.in_shape).astype(self.dtype)
x2 = numpy.random.uniform(-1, 1, self.in_shape).astype(self.dtype)
return x1, x2
def forward(self, inputs, device):
x1, x2 = inputs
return self.function(x1, x2),
def forward_expected(self, inputs):
x1, x2 = inputs
expected = (x1-x2)**2
expected = numpy.asarray(expected)
return expected.astype(self.dtype),
testing.run_module(__name__, __file__)
| 1,684
| 28.051724
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/test_contrastive.py
|
import math
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product_dict(
[{'dtype': numpy.float16,
'forward_options': {'rtol': 1e-2, 'atol': 1e-2},
'backward_options': {'rtol': 1e-2, 'atol': 1e-2},
'double_backward_options': {'rtol': 3e-1, 'atol': 3e-1}},
{'dtype': numpy.float32,
'forward_options': {'rtol': 1e-2},
'backward_options': {'rtol': 1e-2, 'atol': 1e-3},
'double_backward_options': {'rtol': 1e-2, 'atol': 1e-3}},
{'dtype': numpy.float64,
'forward_options': {'rtol': 1e-2},
'backward_options': {'rtol': 1e-2, 'atol': 1e-3},
'double_backward_options': {'rtol': 1e-2, 'atol': 1e-3}},
],
testing.product({
'batchsize': [5, 10],
'input_dim': [2, 3],
'margin': [1, 2],
'reduce': ['mean', 'no'],
'label_dtype': [numpy.int32, numpy.int64]
})
))
class TestContrastive(unittest.TestCase):
def setUp(self):
x_shape = (self.batchsize, self.input_dim)
retry = 0
while True:
self.x0 = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
self.x1 = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
d = numpy.sqrt(numpy.sum((self.x0 - self.x1) ** 2, axis=1))
if (d > 3e-2).all() and (numpy.abs(d - self.margin) > 1e-2).all():
break
retry += 1
assert retry <= 10, 'Too many retries to generate inputs'
self.t = numpy.random.randint(
0, 2, (self.batchsize,)).astype(self.label_dtype)
if self.reduce == 'mean':
self.gy = numpy.random.uniform(-1, 1, ()).astype(self.dtype)
else:
self.gy = numpy.random.uniform(
-1, 1, (self.batchsize,)).astype(self.dtype)
self.gx0 = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
self.gx1 = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
def check_forward(self, x0_data, x1_data, t_data):
x0_val = chainer.Variable(x0_data)
x1_val = chainer.Variable(x1_data)
t_val = chainer.Variable(t_data)
loss = functions.contrastive(
x0_val, x1_val, t_val, self.margin, self.reduce)
self.assertEqual(loss.data.dtype, self.dtype)
if self.reduce == 'mean':
self.assertEqual(loss.data.shape, ())
else:
self.assertEqual(loss.data.shape, (self.batchsize,))
loss_value = cuda.to_cpu(loss.data)
# Compute expected value
loss_expect = numpy.empty((self.batchsize,), self.dtype)
for i in six.moves.range(self.x0.shape[0]):
x0d, x1d, td = self.x0[i], self.x1[i], self.t[i]
d = numpy.sum((x0d - x1d) ** 2)
if td == 1: # similar pair
loss_expect[i] = d
elif td == 0: # dissimilar pair
loss_expect[i] = max(self.margin - math.sqrt(d), 0) ** 2
loss_expect[i] /= 2.
if self.reduce == 'mean':
loss_expect = numpy.sum(loss_expect) / self.t.shape[0]
numpy.testing.assert_allclose(
loss_expect, loss_value, **self.forward_options)
def test_negative_margin(self):
self.margin = -1
self.assertRaises(ValueError, self.check_forward,
self.x0, self.x1, self.t)
self.assertRaises(ValueError, self.check_backward,
self.x0, self.x1, self.t, self.gy)
def test_forward_cpu(self):
self.check_forward(self.x0, self.x1, self.t)
@attr.gpu
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x0), cuda.to_gpu(self.x1),
cuda.to_gpu(self.t))
def check_backward(self, x0_data, x1_data, t_data, gy_data):
def f(x0, x1, t):
return functions.contrastive(x0, x1, t, self.margin, self.reduce)
gradient_check.check_backward(
f, (x0_data, x1_data, t_data), gy_data, dtype='d',
**self.backward_options)
def test_backward_cpu(self):
self.check_backward(self.x0, self.x1, self.t, self.gy)
@attr.gpu
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x0), cuda.to_gpu(self.x1),
cuda.to_gpu(self.t), cuda.to_gpu(self.gy))
def test_backward_zero_dist_cpu(self):
self.check_backward(self.x0, self.x0, self.t, self.gy)
@attr.gpu
def test_backward_zero_dist_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x0), cuda.to_gpu(self.x0),
cuda.to_gpu(self.t), cuda.to_gpu(self.gy))
def check_double_backward(
self, x0_data, x1_data, t_data, gy_data, gx0_data, gx1_data):
def f(x0, x1):
return functions.contrastive(
x0, x1, t_data, self.margin, self.reduce)
gradient_check.check_double_backward(
f, (x0_data, x1_data), gy_data,
(gx0_data, gx1_data),
dtype='f', **self.double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(
self.x0, self.x1, self.t, self.gy, self.gx0, self.gx1)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x0), cuda.to_gpu(self.x1),
cuda.to_gpu(self.t), cuda.to_gpu(self.gy),
cuda.to_gpu(self.gx0), cuda.to_gpu(self.gx1))
class TestContrastiveInvalidReductionOption(unittest.TestCase):
def setUp(self):
self.x0 = numpy.random.uniform(-1, 1, (5, 10)).astype(numpy.float32)
self.x1 = numpy.random.uniform(-1, 1, (5, 10)).astype(numpy.float32)
self.t = numpy.random.randint(0, 2, (5,)).astype(numpy.int32)
def check_invalid_option(self, xp):
x0 = xp.asarray(self.x0)
x1 = xp.asarray(self.x1)
t = xp.asarray(self.t)
with self.assertRaises(ValueError):
functions.contrastive(x0, x1, t, 1, 'invalid_option')
def test_invalid_option_cpu(self):
self.check_invalid_option(numpy)
@attr.gpu
def test_invalid_option_gpu(self):
self.check_invalid_option(cuda.cupy)
testing.run_module(__name__, __file__)
| 6,422
| 35.913793
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/test_crf1d.py
|
import itertools
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product_dict(
[
{'lengths': [3, 3], 'batches': [2, 2, 2]},
{'lengths': [3, 2, 1], 'batches': [3, 2, 1]},
{'lengths': [3, 1, 1], 'batches': [3, 1, 1]},
{'lengths': [1, 1], 'batches': [2]},
],
[
{'reduce': 'mean'},
{'reduce': 'no'},
]
))
class TestCRF1d(unittest.TestCase):
n_label = 3
def setUp(self):
self.cost = numpy.random.uniform(
-1, 1, (self.n_label, self.n_label)).astype(numpy.float32)
self.xs = [numpy.random.uniform(
-1, 1, (b, 3)).astype(numpy.float32) for b in self.batches]
self.ys = [
numpy.random.randint(
0, self.n_label, (b,)).astype(numpy.int32)
for b in self.batches]
self.g = numpy.random.uniform(
-1, 1, (len(self.lengths))).astype(numpy.float32)
def _calc_score(self, batch, ys):
return sum(x[batch, y] for x, y in zip(self.xs, ys)) + \
sum(self.cost[y1, y2] for y1, y2 in zip(ys[:-1], ys[1:]))
def check_forward(self, cost_data, xs_data, ys_data):
cost = chainer.Variable(cost_data)
xs = [chainer.Variable(x) for x in xs_data]
ys = [chainer.Variable(y) for y in ys_data]
actual = functions.crf1d(cost, xs, ys, reduce=self.reduce)
z = numpy.zeros((self.batches[0],), numpy.float32)
for b, length in enumerate(self.lengths):
for ys in itertools.product(range(self.n_label), repeat=length):
z[b] += numpy.exp(self._calc_score(b, ys))
score = numpy.zeros((self.batches[0],), numpy.float32)
for b, length in enumerate(self.lengths):
ys = [self.ys[i][b] for i in range(length)]
score[b] = self._calc_score(b, ys)
loss = -(score - numpy.log(z))
if self.reduce == 'mean':
expect = numpy.sum(loss) / self.batches[0]
elif self.reduce == 'no':
expect = loss
testing.assert_allclose(actual.data, expect)
def test_forward_cpu(self):
self.check_forward(self.cost, self.xs, self.ys)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.cost),
[cuda.to_gpu(x) for x in self.xs],
[cuda.to_gpu(y) for y in self.ys])
def check_backward(self, cost_data, xs_data, ys_data, g_data):
def f(cost, *args):
xs = args[:len(args) // 2]
ys = args[len(args) // 2:]
return functions.crf1d(cost, xs, ys, reduce=self.reduce)
args = [cost_data] + xs_data + ys_data
if self.reduce == 'mean':
grad = None
elif self.reduce == 'no':
grad = g_data
gradient_check.check_backward(
f, args, grad, rtol=1e-3, atol=1e-3)
def test_backward_cpu(self):
self.check_backward(self.cost, self.xs, self.ys, self.g)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.cost),
[cuda.to_gpu(x) for x in self.xs],
[cuda.to_gpu(y) for y in self.ys],
cuda.to_gpu(self.g))
def check_argmax(self, cost_data, xs_data):
cost = chainer.Variable(cost_data)
xs = [chainer.Variable(x) for x in xs_data]
s, path = functions.loss.crf1d.argmax_crf1d(cost, xs)
self.assertIsInstance(s, chainer.Variable)
self.assertIsInstance(path, list)
self.assertEqual(s.shape, (self.batches[0],))
self.assertEqual(len(path), len(self.batches))
for b, p in zip(self.batches, path):
self.assertEqual(p.shape, (b,))
best_paths = [numpy.empty((length,), numpy.int32)
for length in self.batches]
for b, length in enumerate(self.lengths):
best_path = None
best_score = 0
for ys in itertools.product(range(self.n_label), repeat=length):
score = self._calc_score(b, ys)
if best_path is None or best_score < score:
best_path = ys
best_score = score
for i, p in enumerate(best_path):
best_paths[i][b] = p
testing.assert_allclose(s.data[b], best_score)
for t in range(len(self.batches)):
numpy.testing.assert_array_equal(
cuda.to_cpu(path[t]), best_paths[t])
def test_argmax_cpu(self):
self.check_argmax(self.cost, self.xs)
@attr.gpu
def test_argmax_gpu(self):
self.check_argmax(cuda.to_gpu(self.cost),
[cuda.to_gpu(x) for x in self.xs])
def check_invalid_option(self, cost_data, xs_data, ys_data):
with self.assertRaises(ValueError):
functions.crf1d(cost_data, xs_data, ys_data, 'invalid_option')
def test_invalid_option_cpu(self):
self.check_invalid_option(self.cost, self.xs, self.ys)
@attr.gpu
def test_invalid_option_gpu(self):
self.check_invalid_option(
cuda.to_gpu(self.cost),
[cuda.to_gpu(x) for x in self.xs],
[cuda.to_gpu(y) for y in self.ys])
testing.run_module(__name__, __file__)
| 5,482
| 33.702532
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/test_absolute_error.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer import utils
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'shape': [(), (1,), (1, 1), (4,), (4, 3), (4, 3, 2)],
}))
@testing.inject_backend_tests(
['test_forward', 'test_backward', 'test_double_backward'],
# CPU
[{}]
# GPU
+ testing.product({
'use_cuda': [True],
})
# ChainerX
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
)
class TestAbsoluteError(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 1e-3, 'rtol': 1e-3})
self.check_backward_options.update({'atol': 5e-2, 'rtol': 5e-2})
self.check_double_backward_options.update(
{'atol': 3e-1, 'rtol': 3e-1})
def generate_inputs(self):
x0 = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
# Add sufficient margin to prevent computational error
diff = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
diff[abs(diff) < 0.02] = 0.5
x1 = numpy.asarray(x0 + diff)
return (x0, x1)
def forward_expected(self, inputs):
x0, x1 = inputs
return utils.force_array(numpy.abs(x0 - x1), self.dtype),
def forward(self, inputs, device):
x0, x1 = inputs
return functions.absolute_error(x0, x1),
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'shape': [(), (1,), (1, 1), (4,), (4, 3), (4, 3, 2)],
}))
class TestNonDefaultGPU(unittest.TestCase):
# This test is for https://github.com/chainer/chainer/issues/4669
def setUp(self):
self.x0 = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
# Add sufficient margin to prevent computational error
diff = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
diff[abs(diff) < 0.02] = 0.5
self.x1 = numpy.asarray(self.x0 + diff)
self.gy = numpy.random.random(self.shape).astype(self.dtype)
@attr.multi_gpu(2)
def test_backward_non_default_gpu(self):
x0 = chainer.Variable(cuda.to_gpu(self.x0, 1))
x1 = chainer.Variable(cuda.to_gpu(self.x1, 1))
gy = cuda.to_gpu(self.gy, 1)
with cuda.get_device_from_id(0):
y = functions.absolute_error(x0, x1)
y.grad = gy
y.backward()
testing.run_module(__name__, __file__)
| 2,692
| 30.682353
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/test_negative_sampling.py
|
import unittest
import numpy
import pytest
import six
import chainer
from chainer.backend import CpuDevice
from chainer.backends import cuda
from chainer import functions
from chainer.functions.loss import negative_sampling
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
def make_sampler(backend_config, high):
# To fix samples, use fixed samples.
def sampler(shape):
s = numpy.arange(numpy.prod(shape)) % high
s = s.reshape(shape).astype(numpy.int32)
return backend_config.get_array(s)
return sampler
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
't': [[0, 2], [-1, 1, 2]],
'reduce': ['sum', 'no'],
}))
@testing.backend.inject_backend_tests(
None,
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestNegativeSamplingFunction(unittest.TestCase):
in_size = 3
sample_size = 2
label_size = 5
def setUp(self):
batch = len(self.t)
x_shape = (batch, self.in_size)
w_shape = (self.label_size, self.in_size)
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
self.t = numpy.array(self.t).astype(numpy.int32)
self.w = numpy.random.uniform(-1, 1, w_shape).astype(self.dtype)
if self.reduce == 'no':
g_shape = self.t.shape
elif self.reduce == 'sum':
g_shape = ()
self.gy = numpy.random.uniform(-1, 1, g_shape).astype(self.dtype)
self.ggx = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
self.ggw = numpy.random.uniform(-1, 1, w_shape).astype(self.dtype)
self.check_forward_options = {}
self.check_backward_options = {
'eps': 1e-2, 'atol': 5e-4, 'rtol': 5e-3}
self.check_double_backward_options = {
'eps': 1e-2, 'atol': 1e-3, 'rtol': 1e-2}
if self.dtype == numpy.float16:
self.check_forward_options.update(
{'atol': 1e-3, 'rtol': 1e-3})
self.check_backward_options.update(
{'rtol': 5e-2, 'dtype': numpy.float64})
self.check_double_backward_options.update(
{'dtype': numpy.float64})
def test_forward(self, backend_config):
sampler = make_sampler(backend_config, self.label_size)
x_data = backend_config.get_array(self.x)
t_data = backend_config.get_array(self.t)
w_data = backend_config.get_array(self.w)
batch_size = len(self.t)
x = chainer.Variable(x_data)
t = chainer.Variable(t_data, requires_grad=False)
w = chainer.Variable(w_data)
# return_samples=False
y = functions.negative_sampling(
x, t, w, sampler, self.sample_size, reduce=self.reduce)
assert y.dtype == self.dtype
# return_samples=True
y_, samples = functions.negative_sampling(
x, t, w, sampler, self.sample_size, reduce=self.reduce,
return_samples=True)
xp = chainer.backend.get_array_module(x)
assert isinstance(samples, xp.ndarray)
assert samples.dtype == numpy.int32
assert samples.shape == (batch_size, self.sample_size + 1)
# Sampler is deterministic, so y and y_ should equal.
assert y.dtype == y_.dtype
cpu_device = CpuDevice()
numpy.testing.assert_array_equal(
cpu_device.send(y.array), cpu_device.send(y_.array))
assert y.shape == self.gy.shape
samples = cpu_device.send(samples)
loss = numpy.empty((len(self.x),), self.dtype)
for i in six.moves.range(len(self.x)):
ix = self.x[i]
it = self.t[i]
if it == -1:
loss[i] = 0
else:
iw = self.w[samples[i]]
f = iw.dot(ix)
# first one is positive example
f[0] *= -1
loss[i] = numpy.logaddexp(f, 0).sum()
if self.reduce == 'sum':
loss = loss.sum()
assert y.dtype == loss.dtype
testing.assert_allclose(y.data, loss, **self.check_forward_options)
def test_backward(self, backend_config):
sampler = make_sampler(backend_config, self.label_size)
x_data = backend_config.get_array(self.x)
t_data = backend_config.get_array(self.t)
w_data = backend_config.get_array(self.w)
y_grad = backend_config.get_array(self.gy)
def f(x, w):
return functions.negative_sampling(
x, t_data, w, sampler, self.sample_size, reduce=self.reduce)
with backend_config:
gradient_check.check_backward(
f, (x_data, w_data), y_grad, **self.check_backward_options)
def test_double_backward(self, backend_config):
sampler = make_sampler(backend_config, self.label_size)
x_data = backend_config.get_array(self.x)
t_data = backend_config.get_array(self.t)
w_data = backend_config.get_array(self.w)
y_grad = backend_config.get_array(self.gy)
x_grad_grad = backend_config.get_array(self.ggx)
w_grad_grad = backend_config.get_array(self.ggw)
def f(x, w):
return functions.negative_sampling(
x, t_data, w, sampler, self.sample_size, reduce=self.reduce)
with backend_config:
gradient_check.check_double_backward(
f, (x_data, w_data), y_grad, (x_grad_grad, w_grad_grad),
**self.check_double_backward_options)
class TestNegativeSamplingInvalidReductionOption(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
self.t = numpy.random.randint(0, 2, (2,)).astype(numpy.int32)
self.w = numpy.random.uniform(-1, 1, (5, 3)).astype(numpy.float32)
def check_invalid_option(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
w = xp.asarray(self.w)
with pytest.raises(ValueError):
negative_sampling.negative_sampling(
x, t, w, make_sampler(xp, 5), 2, reduce='invalid_option')
def test_invalid_option_cpu(self):
self.check_invalid_option(numpy)
@attr.gpu
def test_invalid_option_gpu(self):
self.check_invalid_option(cuda.cupy)
testing.run_module(__name__, __file__)
| 6,685
| 32.939086
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/test_black_out.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(
{'reduce': 'mean'},
{'reduce': 'no'}
)
class TestBlackOut(unittest.TestCase):
batch_size = 5
in_size = 4
n_vocab = 3
n_samples = 2
def setUp(self):
x_shape = (self.batch_size, self.in_size)
self.x = numpy.random.uniform(
-1, 1, x_shape).astype(numpy.float32)
self.t = numpy.random.randint(
self.n_vocab, size=self.batch_size).astype(numpy.int32)
w_shape = (self.n_vocab, self.in_size)
self.W = numpy.random.uniform(
-1, 1, w_shape).astype(numpy.float32)
self.samples = numpy.random.randint(
self.n_vocab, size=self.batch_size * self.n_samples) \
.astype(numpy.int32).reshape((self.batch_size, self.n_samples))
if self.reduce == 'no':
self.gy = numpy.random.uniform(
-1, 1, (self.batch_size,)).astype(numpy.float32)
else:
self.gy = numpy.random.uniform(-1, 1, ()).astype(numpy.float32)
def check_forward(self, x_data, t_data, w_data, samples_data):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
w = chainer.Variable(w_data)
samples = chainer.Variable(samples_data)
y = functions.black_out(x, t, w, samples, self.reduce)
expect_y = numpy.empty((self.batch_size), dtype=numpy.float32)
for b in range(self.batch_size):
z = 0
for i in range(self.n_samples):
w = self.samples[b, i]
z += numpy.exp(self.W[w].dot(self.x[b]))
y0 = self.W[self.t[b]].dot(self.x[b])
z += numpy.exp(y0)
l = y0 - numpy.log(z)
for i in range(self.n_samples):
w = self.samples[b, i]
l += numpy.log(1 - numpy.exp(self.W[w].dot(self.x[b])) / z)
expect_y[b] = l
if self.reduce == 'mean':
loss = -numpy.sum(expect_y) / self.batch_size
else:
loss = -expect_y
testing.assert_allclose(y.data, loss, atol=1.e-4)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.t, self.W, self.samples)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t), cuda.to_gpu(self.W),
cuda.to_gpu(self.samples))
def check_backward(self, x_data, t_data, w_data, samples_data, gy_data):
def _black_out(x, t, W, samples):
return functions.black_out(x, t, W, samples, self.reduce)
gradient_check.check_backward(
_black_out, (x_data, t_data, w_data, samples_data),
gy_data, dtype='d', atol=1e-2)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.t, self.W, self.samples, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t), cuda.to_gpu(self.W),
cuda.to_gpu(self.samples), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| 3,392
| 31.314286
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/test_hinge.py
|
import unittest
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product_dict(
[{'dtype': numpy.float16,
'forward_options': {'rtol': 3e-3, 'atol': 3e-3},
'backward_options': {'rtol': 1e-1, 'atol': 1e-1}},
{'dtype': numpy.float32,
'forward_options': {},
'backward_options': {'rtol': 1e-1, 'atol': 1e-1}},
{'dtype': numpy.float64,
'forward_options': {},
'backward_options': {'rtol': 1e-1, 'atol': 1e-1}},
],
[{'reduce': 'no'},
{'reduce': 'mean'},
],
[{'norm': 'L1'},
{'norm': 'L2'},
],
[{'label_dtype': numpy.int8},
{'label_dtype': numpy.int16},
{'label_dtype': numpy.int32},
{'label_dtype': numpy.int64},
],
))
class TestHinge(unittest.TestCase):
def setUp(self):
self._config_user = chainer.using_config('dtype', self.dtype)
self._config_user.__enter__()
shape = (10, 5)
self.x = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
# Avoid values around -1.0 for stability
self.x[numpy.logical_and(-1.01 < self.x, self.x < -0.99)] = 0.5
self.t = numpy.random.randint(
0, shape[1], shape[:1]).astype(self.label_dtype)
if self.reduce == 'no':
self.gy = numpy.random.uniform(
-1, 1, self.x.shape).astype(self.dtype)
def tearDown(self):
self._config_user.__exit__(None, None, None)
def check_forward(self, x_data, t_data):
x_val = chainer.Variable(x_data)
t_val = chainer.Variable(t_data, requires_grad=False)
loss = functions.hinge(x_val, t_val, self.norm, self.reduce)
if self.reduce == 'mean':
self.assertEqual(loss.data.shape, ())
else:
self.assertEqual(loss.data.shape, self.x.shape)
self.assertEqual(loss.data.dtype, self.dtype)
loss_value = cuda.to_cpu(loss.data)
# Compute expected value
for i in six.moves.range(self.x.shape[0]):
self.x[i, self.t[i]] *= -1
for i in six.moves.range(self.x.shape[0]):
for j in six.moves.range(self.x.shape[1]):
self.x[i, j] = max(0, 1.0 + self.x[i, j])
if self.norm == 'L1':
loss_expect = self.x
elif self.norm == 'L2':
loss_expect = self.x ** 2
if self.reduce == 'mean':
loss_expect = numpy.sum(loss_expect) / self.x.shape[0]
testing.assert_allclose(
loss_expect, loss_value, **self.forward_options)
def test_forward_cpu(self):
self.check_forward(self.x, self.t)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.chainerx
def test_forward_chainerx_native(self):
self.check_forward(
backend.to_chx(self.x), backend.to_chx(self.t))
@attr.gpu
@attr.chainerx
def test_forward_chainerx_cuda(self):
self.check_forward(
backend.to_chx(cuda.to_gpu(self.x)),
backend.to_chx(cuda.to_gpu(self.t)))
def check_backward(self, x_data, t_data):
def f(x, t):
return functions.hinge(x, t, self.norm)
gradient_check.check_backward(
f, (x_data, t_data), None, dtype='d', **self.backward_options)
def check_backward_chainerx(self, x_data, t_data):
# TODO(niboshi): gradient_check does not support integer input
# (no_grads) for ChainerX. Support it and merge this method with
# `self.check_backward`.
def f(x):
return functions.hinge(x, t_data, self.norm)
gradient_check.check_backward(
f, (x_data,), None, dtype='d', **self.backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.t)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.chainerx
def test_backward_chainerx_native(self):
self.check_backward_chainerx(
backend.to_chx(self.x),
backend.to_chx(self.t))
@attr.gpu
@attr.chainerx
def test_backward_chainerx_cuda(self):
self.check_backward_chainerx(
backend.to_chx(cuda.to_gpu(self.x)),
backend.to_chx(cuda.to_gpu(self.t)))
class TestHingeInvalidOption(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (10, 5)).astype(numpy.float32)
self.t = numpy.random.randint(0, 5, (10,)).astype(numpy.int32)
def check_invalid_norm_option(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
with self.assertRaises(NotImplementedError):
functions.hinge(x, t, 'invalid_norm', 'mean')
def test_invalid_norm_option_cpu(self):
self.check_invalid_norm_option(numpy)
@attr.gpu
def test_invalid_norm_option_gpu(self):
self.check_invalid_norm_option(cuda.cupy)
def check_invalid_reduce_option(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
with self.assertRaises(ValueError):
functions.hinge(x, t, 'L1', 'invalid_option')
def test_invalid_reduce_option_cpu(self):
self.check_invalid_reduce_option(numpy)
@attr.gpu
def test_invalid_reduce_option_gpu(self):
self.check_invalid_reduce_option(cuda.cupy)
testing.run_module(__name__, __file__)
| 5,596
| 30.801136
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/test_huber_loss.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer import utils
@testing.parameterize(*testing.product_dict(
[{'dtype': numpy.float16,
'forward_options': {'rtol': 5e-3, 'atol': 5e-3},
'backward_options': {'eps': 1e-1, 'rtol': 1e-1, 'atol': 1e-1},
'double_backward_options': {'eps': 1e-1, 'rtol': 1e-1, 'atol': 1e-1}},
{'dtype': numpy.float32,
'forward_options': {},
'backward_options': {'eps': 1e-3, 'rtol': 1e-2, 'atol': 1e-2},
'double_backward_options': {'eps': 1e-3, 'rtol': 1e-3, 'atol': 1e-3}},
{'dtype': numpy.float64,
'forward_options': {},
'backward_options': {'eps': 1e-3, 'rtol': 1e-2, 'atol': 1e-2},
'double_backward_options': {'eps': 1e-3, 'rtol': 1e-3, 'atol': 1e-3}},
],
testing.product({
'shape': [(), (3,)],
'reduce': ['no'],
}) + testing.product({
'shape': [(4, 10), (2, 5, 3, 3)],
'reduce': ['no', 'sum_along_second_axis'],
}),
))
class TestHuberLoss(unittest.TestCase):
def setUp(self):
self._config_user = chainer.using_config('dtype', self.dtype)
self._config_user.__enter__()
self.x = utils.force_array(
(numpy.random.random(self.shape) - 0.5) * 3, self.dtype)
self.t = utils.force_array(
(numpy.random.random(self.shape) - 0.5), self.dtype)
if self.reduce == 'sum_along_second_axis':
gy_shape = self.shape[:1] + self.shape[2:]
else:
gy_shape = self.shape
self.gy = utils.force_array(numpy.random.random(gy_shape), self.dtype)
self.ggx = utils.force_array(
numpy.random.uniform(-1, 1, self.x.shape), self.dtype)
self.ggt = utils.force_array(
numpy.random.uniform(-1, 1, self.t.shape), self.dtype)
def tearDown(self):
self._config_user.__exit__(None, None, None)
def check_forward(self, x_data, t_data):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.huber_loss(x, t, delta=1, reduce=self.reduce)
self.assertEqual(loss.data.dtype, self.dtype)
loss_value = cuda.to_cpu(loss.data)
diff_data = cuda.to_cpu(x_data) - cuda.to_cpu(t_data)
loss_expect = numpy.zeros(self.shape)
mask = numpy.abs(diff_data) < 1
loss_expect[mask] = 0.5 * diff_data[mask] ** 2
loss_expect[~mask] = numpy.abs(diff_data[~mask]) - 0.5
if self.reduce == 'sum_along_second_axis':
loss_expect = numpy.sum(loss_expect, axis=1)
testing.assert_allclose(
loss_value, loss_expect, **self.forward_options)
def test_forward_cpu(self):
self.check_forward(self.x, self.t)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
def check_backward(self, x_data, t_data, y_grad):
def f(x, t):
return functions.huber_loss(x, t, delta=1, reduce=self.reduce)
gradient_check.check_backward(
f, (x_data, t_data), y_grad, **self.backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.t, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t),
cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, t_data, y_grad, x_grad_grad,
t_grad_grad):
delta = 1
eps = self.double_backward_options['eps'] * 2
xp = chainer.backend.get_array_module(x_data)
mask = xp.abs(xp.abs(x_data - t_data) - delta) < eps
x_data[mask] = 0
t_data[mask] = 0
def f(x, t):
return functions.huber_loss(x, t, delta=delta, reduce=self.reduce)
gradient_check.check_double_backward(
f, (x_data, t_data), y_grad, (x_grad_grad, t_grad_grad),
**self.double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.t, self.gy, self.ggx, self.ggt)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t), cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx), cuda.to_gpu(self.ggt))
class TestHuberLossInvalidReductionOption(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (4, 10)).astype(numpy.float32)
self.t = numpy.random.uniform(-1, 1, (4, 10)).astype(numpy.float32)
def check_invalid_option(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
with self.assertRaises(ValueError):
functions.huber_loss(x, t, 1, 'invalid_option')
def test_invalid_option_cpu(self):
self.check_invalid_option(numpy)
@attr.gpu
def test_invalid_option_gpu(self):
self.check_invalid_option(cuda.cupy)
testing.run_module(__name__, __file__)
| 5,125
| 34.109589
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/test_discriminative_margin_based_clustering_loss.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'delta_v': [0.5],
'delta_d': [5],
'alpha': [1],
'beta': [1],
'gamma': [0.001],
'norm': [1],
'result_l_dist': [6.0776708],
'result_l_var': [64.0],
'result_l_reg': [0.03419368]
}) + testing.product(({
'delta_v': [3],
'delta_d': [10],
'alpha': [0.1],
'beta': [0.1],
'gamma': [0.1],
'max_n_clusters': [2],
'norm': [2],
'result_l_dist': [0.0],
'result_l_var': [26.56423595],
'result_l_reg': [1.55665027]
})))
class TestDiscriminativeMarginBasedClusteringLoss(unittest.TestCase):
def setUp(self):
self.max_n_clusters = 5
self.batch = 5
self.width = 10
self.height = 10
shape = (self.batch, self.max_n_clusters,
self.width, self.height)
input_arr = numpy.linspace(0, 100,
shape[0] * shape[1] *
shape[2] * shape[3])
self.input = input_arr.reshape(shape)
g_s = (self.batch, self.width, self.height)
self.gt = numpy.linspace(0, 10,
g_s[0] * g_s[1] * g_s[2]).astype(numpy.int32)
self.gt = numpy.reshape(self.gt, g_s)
self.y = (numpy.asarray(self.result_l_dist),
numpy.asarray(self.result_l_var),
numpy.asarray(self.result_l_reg))
def get_result(self, embeddings, labels):
out = functions.discriminative_margin_based_clustering_loss(
embeddings, labels,
self.delta_v, self.delta_d, self.max_n_clusters,
self.norm, self.alpha, self.beta, self.gamma)
return out
def check_forward_cpu(self, embeddings, labels, t_data):
t_dist, t_var, t_reg = \
chainer.Variable(t_data[0]), \
chainer.Variable(t_data[1]), \
chainer.Variable(t_data[2])
l_dist, l_var, l_reg = self.get_result(embeddings, labels)
numpy.testing.assert_almost_equal(l_dist.data, t_dist.data)
numpy.testing.assert_almost_equal(l_var.data, t_var.data)
numpy.testing.assert_almost_equal(l_reg.data, t_reg.data)
def check_forward_gpu(self, embeddings, labels, t_data):
t_dist, t_var, t_reg = \
chainer.Variable(t_data[0]), \
chainer.Variable(t_data[1]), \
chainer.Variable(t_data[2])
l_dist, l_var, l_reg = self.get_result(embeddings, labels)
l_dist.to_cpu()
l_var.to_cpu()
l_reg.to_cpu()
t_dist.to_cpu()
t_var.to_cpu()
t_reg.to_cpu()
numpy.testing.assert_almost_equal(l_dist.data, t_dist.data)
numpy.testing.assert_almost_equal(l_var.data, t_var.data)
numpy.testing.assert_almost_equal(l_reg.data, t_reg.data)
def test_forward_cpu(self):
self.check_forward_cpu(cuda.to_cpu(self.input), cuda.to_cpu(self.gt),
self.y)
@attr.gpu
def test_forward_gpu(self):
self.check_forward_gpu(cuda.to_gpu(self.input), cuda.to_gpu(self.gt),
self.y)
@attr.gpu
def test_forward_gpu_cpu(self):
cpu_res = self.get_result(cuda.to_cpu(self.input),
cuda.to_cpu(self.gt))
gpu_res = self.get_result(cuda.to_gpu(self.input),
cuda.to_gpu(self.gt))
for idx in range(len(gpu_res)):
gpu_res[idx].to_cpu()
numpy.testing.assert_almost_equal(cpu_res[idx].data,
gpu_res[idx].data)
def check_backward(self, x0_data, x1_data, y_grad):
gradient_check.check_backward(
functions.squared_error,
(x0_data, x1_data), y_grad, eps=1e-2,
**self.check_backward_options)
testing.run_module(__name__, __file__)
| 4,064
| 32.875
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/test_cross_covariance.py
|
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
def _cross_covariance(y, z, dtype):
row = y.shape[1]
col = z.shape[1]
y, z = cuda.to_cpu(y), cuda.to_cpu(z)
y_mean = y.mean(axis=0)
z_mean = z.mean(axis=0)
N = y.shape[0]
loss_expect = numpy.zeros((row, col), dtype=dtype)
for i in six.moves.xrange(row):
for j in six.moves.xrange(col):
for n in six.moves.xrange(N):
loss_expect[i, j] += (y[n, i] - y_mean[i]) * (
z[n, j] - z_mean[j])
loss_expect /= N
return loss_expect
@testing.parameterize(*testing.product_dict(
[{'dtype': numpy.float16,
'forward_options': {'rtol': 1e-3, 'atol': 1e-3},
'backward_options': {'rtol': 3e-2, 'atol': 3e-2},
'double_backward_options': {'rtol': 5e-1, 'atol': 5e-1}},
{'dtype': numpy.float32,
'forward_options': {'rtol': 1e-4, 'atol': 1e-4},
'backward_options': {'rtol': 1e-4, 'atol': 1e-4},
'double_backward_options': {'rtol': 1e-4, 'atol': 1e-4}},
{'dtype': numpy.float64,
'forward_options': {'rtol': 1e-4, 'atol': 1e-4},
'backward_options': {'rtol': 1e-4, 'atol': 1e-4},
'double_backward_options': {'rtol': 1e-4, 'atol': 1e-4}},
],
[{'reduce': 'half_squared_sum'},
{'reduce': 'no'},
]
))
class TestCrossCovariance(unittest.TestCase):
def setUp(self):
self.y = numpy.random.uniform(-1, 1, (4, 3)).astype(self.dtype)
self.z = numpy.random.uniform(-1, 1, (4, 2)).astype(self.dtype)
if self.reduce == 'half_squared_sum':
gloss_shape = ()
else:
gloss_shape = (3, 2)
self.gloss = numpy.random.uniform(
-1, 1, gloss_shape).astype(self.dtype)
self.ggy = numpy.random.uniform(-1, 1, (4, 3)).astype(self.dtype)
self.ggz = numpy.random.uniform(-1, 1, (4, 2)).astype(self.dtype)
def check_forward(self, y_data, z_data):
y = chainer.Variable(y_data)
z = chainer.Variable(z_data)
loss = functions.cross_covariance(y, z, self.reduce)
self.assertEqual(loss.shape, self.gloss.shape)
self.assertEqual(loss.data.dtype, self.dtype)
loss_value = cuda.to_cpu(loss.data)
# Compute expected value
loss_expect = _cross_covariance(y_data, z_data, dtype=self.dtype)
if self.reduce == 'half_squared_sum':
loss_expect = numpy.sum(loss_expect ** 2) * 0.5
numpy.testing.assert_allclose(
loss_expect, loss_value, **self.forward_options)
def test_forward_cpu(self):
self.check_forward(self.y, self.z)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.y), cuda.to_gpu(self.z))
def check_backward(self, y_data, z_data, gloss_data):
def f(y, z):
return functions.cross_covariance(y, z, self.reduce)
gradient_check.check_backward(
f, (y_data, z_data), gloss_data, eps=0.02, **self.backward_options)
def test_backward_cpu(self):
self.check_backward(self.y, self.z, self.gloss)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.y), cuda.to_gpu(self.z),
cuda.to_gpu(self.gloss))
def check_type(self, y_data, z_data, gloss_data):
y = chainer.Variable(y_data)
z = chainer.Variable(z_data)
loss = functions.cross_covariance(y, z, self.reduce)
loss.grad = gloss_data
loss.backward()
self.assertEqual(y_data.dtype, y.grad.dtype)
self.assertEqual(z_data.dtype, z.grad.dtype)
def test_backward_type_cpu(self):
self.check_type(self.y, self.z, self.gloss)
@attr.gpu
def test_backward_type_gpu(self):
self.check_type(cuda.to_gpu(self.y), cuda.to_gpu(self.z),
cuda.to_gpu(self.gloss))
def check_double_backward(self, y_data, z_data, gloss_data, ggy_data,
ggz_data):
def f(y, z):
return functions.cross_covariance(y, z, self.reduce)
gradient_check.check_double_backward(
f, (y_data, z_data), gloss_data, (ggy_data, ggz_data),
**self.double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(
self.y, self.z, self.gloss, self.ggy, self.ggz)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.y), cuda.to_gpu(self.z), cuda.to_gpu(self.gloss),
cuda.to_gpu(self.ggy), cuda.to_gpu(self.ggz))
class TestCrossCovarianceInvalidReductionOption(unittest.TestCase):
def setUp(self):
self.y = numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32)
self.z = numpy.random.uniform(-1, 1, (4, 2)).astype(numpy.float32)
def check_invalid_option(self, xp):
y = xp.asarray(self.y)
z = xp.asarray(self.z)
with self.assertRaises(ValueError):
functions.cross_covariance(y, z, 'invalid_option')
def test_invalid_option_cpu(self):
self.check_invalid_option(numpy)
@attr.gpu
def test_invalid_option_gpu(self):
self.check_invalid_option(cuda.cupy)
testing.run_module(__name__, __file__)
| 5,427
| 32.925
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/test_sigmoid_cross_entropy.py
|
import math
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer import utils
@testing.parameterize(*(testing.product({
# Test dtype
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'shape': [(8, 7)],
'normalize': [True],
'label_dtype': [numpy.int32],
}) + [
# Test normalize
{'dtype': numpy.float32,
'shape': (8, 7),
'normalize': False,
'label_dtype': numpy.int32,
},
# Test ignore_all
{'dtype': numpy.float32,
'shape': (8, 7),
'normalize': True,
'ignore_all': True,
'label_dtype': numpy.int32,
},
# too large shape causes int32 -> float64 issue
{'dtype': numpy.float32,
'shape': (65536, 1),
'normalize': False,
'label_dtype': numpy.int32,
},
] + testing.product({
# Test label_dtype
'dtype': [numpy.float32],
'shape': [(8, 7)],
'normalize': [True],
'label_dtype': [numpy.int8, numpy.int16, numpy.int32, numpy.int64],
})))
class TestSigmoidCrossEntropy(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
if getattr(self, 'ignore_all', False):
self.t = -numpy.ones(self.shape).astype(self.label_dtype)
else:
self.t = numpy.random.randint(-1, 2,
self.shape).astype(self.label_dtype)
self.gy = numpy.random.random(self.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(
-1, 1, self.shape).astype(self.dtype)
if self.dtype == numpy.float16:
self.places = 2
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 5e-2, 'rtol': 5e-2}
self.check_double_backward_options = {
'dtype': numpy.float64, 'atol': 5e-2, 'rtol': 5e-2}
else:
self.places = 5
self.check_backward_options = {'atol': 5e-3, 'rtol': 5e-3}
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-3}
def check_forward(self, x_data, t_data, use_cudnn='always'):
x_val = chainer.Variable(x_data)
t_val = chainer.Variable(t_data)
with chainer.using_config('use_cudnn', use_cudnn):
loss = functions.sigmoid_cross_entropy(x_val, t_val,
self.normalize)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, self.dtype)
loss_value = float(cuda.to_cpu(loss.data))
# Compute expected value
loss_expect = 0
non_ignore_count = 0
for i in six.moves.range(self.x.shape[0]):
for j in six.moves.range(self.x.shape[1]):
xd, td = self.x[i, j], self.t[i, j]
if td == -1:
continue
loss_expect -= xd * (td - (xd >= 0)) \
- math.log(1 + math.exp(-numpy.abs(xd)))
non_ignore_count += 1
if non_ignore_count == 0:
loss_expect = 0
elif self.normalize:
loss_expect /= non_ignore_count
else:
loss_expect /= self.t.shape[0]
self.assertAlmostEqual(loss_expect, loss_value, places=self.places)
def check_forward_no_reduction(self, x_data, t_data):
x_val = chainer.Variable(x_data)
t_val = chainer.Variable(t_data)
loss = functions.sigmoid_cross_entropy(
x_val, t_val, self.normalize, reduce='no')
self.assertEqual(loss.data.shape, self.x.shape)
self.assertEqual(loss.data.dtype, self.dtype)
loss_value = cuda.to_cpu(loss.data)
# Compute expected value
if not getattr(self, 'ignore_all', False):
for i in six.moves.range(self.x.shape[0]):
for j in six.moves.range(self.x.shape[1]):
xd, td = self.x[i, j], self.t[i, j]
if td == -1:
loss_expect = 0
else:
loss_expect = -(
xd * (td - (xd >= 0)) -
math.log(1 + math.exp(-numpy.abs(xd))))
self.assertAlmostEqual(
loss_expect, loss_value[i, j], places=self.places)
def test_forward_cpu(self):
with chainer.using_config('use_cudnn', 'always'):
self.check_forward(self.x, self.t)
def test_forward_no_reduction_cpu(self):
with chainer.using_config('use_cudnn', 'always'):
self.check_forward_no_reduction(self.x, self.t)
@attr.gpu
def test_forward_gpu(self):
with chainer.using_config('use_cudnn', 'always'):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.gpu
def test_forward_no_reduction_gpu(self):
with chainer.using_config('use_cudnn', 'always'):
self.check_forward_no_reduction(
cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.gpu
def test_forward_gpu_no_cudnn(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.gpu
def test_forward_no_reduction_gpu_no_cudnn(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_forward_no_reduction(
cuda.to_gpu(self.x), cuda.to_gpu(self.t))
def check_backward(self, x_data, t_data):
# Skip too large case. That requires a long time.
if self.shape[0] == 65536:
return
gradient_check.check_backward(
functions.sigmoid_cross_entropy,
(x_data, t_data), None, **self.check_backward_options)
def check_backward_no_reduction(
self, x_data, t_data, y_grad):
# Skip too large case. That requires a long time.
if self.shape[0] == 65536:
return
def f(x, t):
return chainer.functions.sigmoid_cross_entropy(x, t, reduce='no')
gradient_check.check_backward(
f, (x_data, t_data), y_grad, **self.check_backward_options)
def test_backward_cpu(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_backward(self.x, self.t)
def test_backward_no_reduction_cpu(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_backward_no_reduction(self.x, self.t, self.gy)
@attr.gpu
def test_backward_gpu(self):
with chainer.using_config('use_cudnn', 'always'):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.gpu
def test_backward_no_reduction_gpu(self):
with chainer.using_config('use_cudnn', 'always'):
self.check_backward_no_reduction(
cuda.to_gpu(self.x), cuda.to_gpu(self.t), cuda.to_gpu(self.gy))
@attr.gpu
def test_backward_gpu_no_cudnn(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.gpu
def test_backward_no_reduction_gpu_no_cudnn(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_backward_no_reduction(
cuda.to_gpu(self.x), cuda.to_gpu(self.t),
cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, t_data, y_grad, gx_grad,
normalize=True, reduce='mean'):
# Skip too large case. That requires a long time.
if self.shape[0] == 65536:
return
if reduce == 'mean':
y_grad = utils.force_array(y_grad.sum())
def f(x, t):
return chainer.functions.sigmoid_cross_entropy(
x, t, normalize=normalize, reduce=reduce)
gradient_check.check_double_backward(
f, (x_data, t_data), y_grad, (gx_grad,),
**self.check_double_backward_options)
def test_double_backward_cpu(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_double_backward(self.x, self.t, self.gy, self.ggx,
normalize=self.normalize, reduce='mean')
@attr.gpu
def test_double_backward_gpu(self):
with chainer.using_config('use_cudnn', 'always'):
self.check_double_backward(cuda.to_gpu(self.x),
cuda.to_gpu(self.t),
cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx),
normalize=self.normalize,
reduce='mean')
def test_double_backward_no_reduction_cpu(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_double_backward(self.x, self.t, self.gy, self.ggx,
normalize=self.normalize, reduce='no')
@attr.gpu
def test_double_backward_no_reduction_gpu(self):
with chainer.using_config('use_cudnn', 'always'):
self.check_double_backward(cuda.to_gpu(self.x),
cuda.to_gpu(self.t),
cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx),
normalize=self.normalize,
reduce='no')
@testing.parameterize(
{'use_cudnn': 'always'},
{'use_cudnn': 'auto'},
{'use_cudnn': 'never'},
)
@attr.cudnn
class TestSigmoidCrossEntropyCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32)
self.t = cuda.cupy.random.randint(0, 3, (4, 3)).astype(numpy.int32)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expect = chainer.should_use_cudnn('==always')
def forward(self):
x = chainer.Variable(self.x)
t = chainer.Variable(self.t)
return functions.sigmoid_cross_entropy(x, t)
# Note that SigmoidCrossEntropy does not use cudnn on forward
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
with testing.patch('cupy.cudnn.activation_forward') as func:
y.backward()
self.assertEqual(func.called, self.expect)
testing.run_module(__name__, __file__)
| 10,640
| 36.076655
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/loss_tests/test_softmax_cross_entropy.py
|
import unittest
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
import chainerx
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
@testing.parameterize(
*testing.product({
# test each option flags
'reduce': ['mean', 'no'],
'cache_score': [True, False],
'normalize': [True, False],
'weight_apply': [True, False],
'shape_ignore': ['special',
((2, 3), (0,)),
((2, 3, 2, 2), (0, 1, 0))],
'dtype': [numpy.float32],
'label_dtype': [numpy.int32],
}) + testing.product({
# test floating dtypes
'reduce': ['mean', 'no'],
'cache_score': [False],
'normalize': [True],
'weight_apply': [True],
'shape_ignore': ['special',
((2, 3), (slice(None),)),
((2, 3, 2), (0,)),
((2, 3, 2, 2), (0, 1, 0))],
'dtype': [numpy.float16, numpy.float64],
'label_dtype': [numpy.int32],
}) + testing.product({
# test label dtypes
'reduce': ['mean', 'no'],
'cache_score': [False],
'normalize': [True],
'weight_apply': [True],
'shape_ignore': ['special',
((2, 3), (slice(None),)),
((2, 3, 2), (0,)),
((2, 3, 2, 2), (0, 1, 0))],
'dtype': [numpy.float32],
'label_dtype': [numpy.int8, numpy.int16, numpy.int64],
}) + testing.product({
# Test float16 does not under/overflow in reduction for large batch
'reduce': ['mean'],
'cache_score': [False],
'normalize': [False, True],
'weight_apply': [True],
'shape_ignore': [
((300000, 2), None)],
'dtype': [numpy.float16],
'label_dtype': [numpy.int64],
}))
@testing.parameterize(
*testing.product({'enable_double_backprop': [False, True]}))
@testing.fix_random()
class TestSoftmaxCrossEntropy(testing.FunctionTestCase):
def setUp(self):
# Skip double-backward test if double-backprop is disabled
if not self.enable_double_backprop:
self.skip_double_backward_test = True
# shape and ignore_index
if self.shape_ignore == 'special':
shape = (1, 2)
ignore_index = None
else:
shape, ignore_index = self.shape_ignore
self.shape = shape
self.ignore_index = ignore_index
# t
label_dtype = self.label_dtype
if self.shape_ignore == 'special':
t = numpy.array([0], dtype=label_dtype)
else:
out_shape = (shape[0],) + shape[2:]
t = numpy.random.randint(0, shape[1], out_shape)
t = t.astype(label_dtype)
if ignore_index is not None and len(ignore_index) <= t.ndim:
t[ignore_index] = -1
self.t = t
# class_weight
if self.weight_apply:
class_weight = numpy.random.uniform(0, 10, (shape[1],))
class_weight = class_weight.astype(self.dtype)
else:
class_weight = None
self.class_weight = class_weight
# numeric tolerances
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
else:
self.check_forward_options = {}
self.check_backward_options = {}
self.check_double_backward_options = {}
def generate_inputs(self):
shape = self.shape
dtype = self.dtype
if self.shape_ignore == 'special':
if dtype == numpy.float16:
x = numpy.array([[-5, 1]], dtype=dtype)
else:
x = numpy.array([[-1000, 1]], dtype=dtype)
else:
x = numpy.random.uniform(-1, 1, shape).astype(dtype)
return x,
def forward(self, inputs, device):
x, = inputs
t = device.send(self.t)
class_weight = device.send(self.class_weight)
loss = functions.softmax_cross_entropy(
x, t, normalize=self.normalize, reduce=self.reduce,
cache_score=self.cache_score, class_weight=class_weight,
enable_double_backprop=self.enable_double_backprop)
if not (self.enable_double_backprop or device.xp is chainerx):
assert (loss.creator.y is not None) == self.cache_score
# All the loss values except those corresponding to the ignored label
# must be positive.
# TODO(niboshi): Use device.xp.where once chainerx supports it.
assert numpy.where(
backend.CpuDevice().send(t == -1),
True,
backend.CpuDevice().send(loss.array) > 0).all()
return loss,
def forward_expected(self, inputs):
x, = inputs
t = self.t
class_weight = self.class_weight
if self.reduce == 'mean':
loss = self.expected_forward_with_reduce(x, t, class_weight)
else:
loss = self.expected_forward_without_reduce(x, t, class_weight)
return loss,
def expected_forward_with_reduce(self, x_data, t_data, class_weight):
# Compute expected value
loss_expect = 0.0
count = 0
x = numpy.rollaxis(x_data, 1, x_data.ndim).reshape(
(t_data.size, x_data.shape[1]))
t = t_data.ravel()
for xi, ti in six.moves.zip(x, t):
if ti == -1:
continue
log_z = numpy.ufunc.reduce(numpy.logaddexp, xi)
if class_weight is None:
loss_expect -= (xi - log_z)[ti]
else:
loss_expect -= (xi - log_z)[ti] * class_weight[ti]
count += 1
if self.normalize:
if count == 0:
loss_expect = 0.0
else:
loss_expect /= count
else:
if len(t_data) == 0:
loss_expect = 0.0
else:
loss_expect /= len(t_data)
return numpy.asarray(loss_expect, dtype=x.dtype)
def expected_forward_without_reduce(self, x_data, t_data, class_weight):
x = numpy.rollaxis(x_data, 1, x_data.ndim).reshape(
(t_data.size, x_data.shape[1]))
t = t_data.ravel()
loss_shape = x_data.shape[0:1] + x_data.shape[2:]
loss_expect = numpy.zeros(loss_shape, x_data.dtype)
for i, (ti, loss_idx) in enumerate(zip(t, numpy.ndindex(*loss_shape))):
xi = x[i]
if ti == -1:
continue
log_z = numpy.ufunc.reduce(numpy.logaddexp, xi)
if class_weight is None:
loss_expect[loss_idx] = -(xi - log_z)[ti]
else:
loss_expect[loss_idx] = -(xi - log_z)[ti] * class_weight[ti]
return numpy.asarray(loss_expect, dtype=x.dtype)
@testing.parameterize(*testing.product_dict(
[
{'t_value': -2, 'valid': False},
{'t_value': 3, 'valid': False},
{'t_value': -1, 'valid': True} # -1 is ignore_label
],
[
{'enable_double_backprop': True},
{'enable_double_backprop': False}
]
))
class TestSoftmaxCrossEntropyValueCheck(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 2)).astype(numpy.float32)
# `0` is required to avoid NaN
self.t = numpy.array([self.t_value, 0], dtype=numpy.int32)
self.original_debug = chainer.is_debug()
chainer.set_debug(True)
def tearDown(self):
chainer.set_debug(self.original_debug)
def check_value_check(self, x_data, t_data, use_cudnn):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
with chainer.using_config('use_cudnn', use_cudnn):
if self.valid:
# Check if it throws nothing
functions.softmax_cross_entropy(
x, t, enable_double_backprop=self.enable_double_backprop)
else:
with self.assertRaises(ValueError):
functions.softmax_cross_entropy(
x, t,
enable_double_backprop=self.enable_double_backprop)
def test_value_check_cpu(self):
self.check_value_check(self.x, self.t, 'never')
@attr.gpu
def test_value_check_gpu(self):
self.check_value_check(self.x, self.t, 'never')
@attr.gpu
def test_value_check_gpu_cudnn(self):
self.check_value_check(cuda.to_gpu(self.x), cuda.to_gpu(self.t),
'always')
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestSoftmaxCrossEntropyCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.random.uniform(-1, 1, (4, 3)).astype(self.dtype)
self.t = cuda.cupy.random.randint(0, 3, (4,)).astype(numpy.int32)
def forward(self):
x = chainer.Variable(self.x)
t = chainer.Variable(self.t)
return functions.softmax_cross_entropy(
x, t, enable_double_backprop=False)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cudnn.softmax_forward') as func:
self.forward()
self.assertEqual(func.called,
chainer.should_use_cudnn('>=auto'))
# Note that SoftmaxCrossEntropy does not use cudnn on backward
@testing.parameterize(
{'enable_double_backprop': True},
{'enable_double_backprop': False},
)
class TestClassWeightAssertion(unittest.TestCase):
def setUp(self):
self.x = numpy.array([[0, 1], [2, 3]])
self.t = numpy.array([0, 1])
def test_ndim_assertion(self):
wrong_ndim_class_weight = numpy.array([[0, 0]], dtype='f')
with self.assertRaises(ValueError):
functions.softmax_cross_entropy(
self.x, self.t, class_weight=wrong_ndim_class_weight,
enable_double_backprop=self.enable_double_backprop)
def test_dtype_assertion(self):
wrong_dtype_class_weight = numpy.array([0, 0], dtype=numpy.int32)
with self.assertRaises(ValueError):
functions.softmax_cross_entropy(
self.x, self.t, class_weight=wrong_dtype_class_weight,
enable_double_backprop=self.enable_double_backprop)
def test_variable_assertion(self):
wrong_inst_class_weight = chainer.Variable(
numpy.array([0, 0], dtype='f'))
with self.assertRaises(ValueError):
functions.softmax_cross_entropy(
self.x, self.t, class_weight=wrong_inst_class_weight,
enable_double_backprop=self.enable_double_backprop)
@testing.parameterize(*testing.product({
'enable_double_backprop': [True, False],
}))
class TestSoftmaxCrossEntropyInvalidReduce(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype('f')
self.t = numpy.zeros((2,), 'i')
def check_invalid_reduce(self, x, t):
with self.assertRaises(ValueError):
functions.softmax_cross_entropy(
x, t,
reduce='unknown_reduce_type',
enable_double_backprop=self.enable_double_backprop)
def test_invalid_reduce_cpu(self):
self.check_invalid_reduce(self.x, self.t)
@attr.gpu
def test_invalid_reduce_gpu(self):
self.check_invalid_reduce(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@testing.parameterize(*testing.product({
'ignore_label': [-2, 9],
'reduce': ['mean', 'no'],
'enable_double_backprop': [False, True],
'class_weight': [None, numpy.ones((3,), dtype=numpy.float32)]})
)
class TestNonDefaultIgnoreLabel(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
self.t = numpy.full((2,), self.ignore_label, dtype=numpy.int32)
if self.reduce == 'mean':
gy_shape = ()
else:
gy_shape = (2,)
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(numpy.float32)
self.ggx = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
def check_forward(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
if self.class_weight is not None:
class_weight = xp.asarray(self.class_weight)
else:
class_weight = None
loss = functions.softmax_cross_entropy(
x, t, reduce=self.reduce,
class_weight=class_weight,
ignore_label=self.ignore_label,
enable_double_backprop=self.enable_double_backprop)
if self.reduce == 'mean':
expect = 0.
else:
expect = numpy.zeros((2,), dtype=numpy.float32)
testing.assert_allclose(loss.data, expect)
def test_forward_cpu(self):
self.check_forward(numpy)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.cupy)
def check_backward(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
gy = xp.asarray(self.gy)
if self.class_weight is not None:
class_weight = xp.asarray(self.class_weight)
else:
class_weight = None
def f(x_, t_):
return functions.softmax_cross_entropy(
x_, t_, class_weight=class_weight, reduce=self.reduce,
ignore_label=self.ignore_label,
enable_double_backprop=self.enable_double_backprop)
gradient_check.check_backward(f, (x, t), gy)
def test_backward_cpu(self):
self.check_backward(numpy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.cupy)
def check_double_backward(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
gy = xp.asarray(self.gy)
ggx = xp.asarray(self.ggx)
if self.class_weight is not None:
class_weight = xp.asarray(self.class_weight)
else:
class_weight = None
def f(x_):
return functions.softmax_cross_entropy(
x_, t, class_weight=class_weight, reduce=self.reduce,
ignore_label=self.ignore_label,
enable_double_backprop=True)
gradient_check.check_double_backward(f, x, gy, ggx)
def test_double_backward_cpu(self):
self.check_double_backward(numpy)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(cuda.cupy)
@testing.parameterize(*(testing.product({
'shape_ignore': [(None, None),
((2, 3), (slice(None),)),
((2, 3, 2), (0,)),
((2, 3, 2, 2), (0, 1, 0))],
'normalize': [True, False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'weight_apply': [False, True],
})))
class TestForwardConsistency(unittest.TestCase):
# This test case checks if forward propagation of
# double backpropable impl. and non-double backpropable impl.
# agree.
def setUp(self):
self.shape, self.ignore_index = self.shape_ignore
if self.shape is None:
if self.dtype == numpy.float16:
self.x = numpy.array([[-5, 1]], dtype=self.dtype)
else:
self.x = numpy.array([[-1000, 1]], dtype=self.dtype)
self.t = numpy.array([0], dtype=numpy.int32)
else:
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
out_shape = (self.shape[0],) + self.shape[2:]
self.t = numpy.random.randint(
0, self.shape[1], out_shape).astype(numpy.int32)
if (self.ignore_index is not None and
len(self.ignore_index) <= self.t.ndim):
self.t[self.ignore_index] = -1
if self.weight_apply:
self.class_weight = numpy.random.uniform(
0, 10, (self.x.shape[1],)).astype(self.dtype)
else:
self.class_weight = None
def check_consistency(self, xp):
if self.class_weight is None:
class_weight = None
else:
class_weight = xp.asarray(self.class_weight)
x = xp.asarray(self.x)
t = xp.asarray(self.t)
def f(enable_double_backprop):
kwargs = {
'normalize': self.normalize,
'class_weight': class_weight,
'enable_double_backprop': enable_double_backprop
}
return functions.softmax_cross_entropy(x, t, **kwargs).data
loss_single = f(False)
loss_double = f(True)
check_forward_options = {}
if self.dtype == numpy.float16:
check_forward_options = {'atol': 5e-4, 'rtol': 5e-3}
testing.assert_allclose(
loss_single, loss_double, **check_forward_options)
def test_consistency_cpu(self):
self.check_consistency(numpy)
@attr.gpu
def test_consistency_gpu_always(self):
with chainer.using_config('use_cudnn', 'always'):
self.check_consistency(cuda.cupy)
@attr.gpu
def test_consistency_gpu_auto(self):
with chainer.using_config('use_cudnn', 'auto'):
self.check_consistency(cuda.cupy)
@attr.gpu
def test_consistency_gpu_never(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_consistency(cuda.cupy)
class BaseSoftTarget(object):
def setUp(self):
x_shape = (self.nb,) + self.shape
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
if self.reduce == 'mean':
self.gy = numpy.random.uniform(-1, 1, ()).astype(self.dtype)
else:
y_shape = (self.nb,) + self.shape[1:]
self.gy = numpy.random.uniform(-1, 1, y_shape).astype(self.dtype)
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.check_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
else:
self.check_forward_options = {}
self.check_backward_options = {}
def check_forward(self, xp):
raise NotImplementedError
def test_forward_cpu(self):
self.check_forward(numpy)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.cupy)
def check_backward(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
gy = None
if self.reduce == 'no':
gy = xp.asarray(self.gy)
def f(x_, t_):
return functions.softmax_cross_entropy(
x_, t_, reduce=self.reduce)
gradient_check.check_backward(f, (x, t), gy, dtype=numpy.float64,
no_grads=(False, True),
**self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(numpy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.cupy)
@testing.parameterize(*(testing.product({
'nb': [1, 2, 4],
'shape': [(3,), (3, 2), (3, 2, 2)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'reduce': ['mean', 'no'],
'soft_target_loss': ['cross-entropy', 'kl-divergence'],
})))
class TestSoftTargetCompareToHard(BaseSoftTarget, unittest.TestCase):
def setUp(self):
BaseSoftTarget.setUp(self)
t_hard_shape = (self.nb,) + self.shape[1:]
self.t_hard = numpy.random.randint(
0, self.shape[0], t_hard_shape).astype(numpy.int32)
t = numpy.zeros(self.x.size).astype(self.dtype)
t = t.reshape(self.shape[0], -1)
t[[self.t_hard.ravel()], [range(t.shape[1])]] = 1.0
t = t.reshape((self.shape[0], self.nb,) + self.shape[1:])
self.t = t.swapaxes(0, 1)
def check_forward(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
loss = functions.softmax_cross_entropy(x, t, reduce=self.reduce)
expect = functions.softmax_cross_entropy(
x, xp.asarray(self.t_hard), reduce=self.reduce,
soft_target_loss=self.soft_target_loss)
testing.assert_allclose(loss.data, expect.data,
**self.check_forward_options)
@testing.parameterize(*(testing.product({
'nb': [1, 2, 4],
'shape': [(3,), (3, 2), (3, 2, 2)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'reduce': ['mean', 'no'],
'soft_target_loss': ['kl-divergence'],
})))
class TestSoftTargetKLDivergence(BaseSoftTarget, unittest.TestCase):
def setUp(self):
BaseSoftTarget.setUp(self)
self.t = functions.softmax(self.x).array
def check_forward(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
loss = functions.softmax_cross_entropy(
x, t, reduce=self.reduce, soft_target_loss=self.soft_target_loss)
if self.reduce == 'mean':
expect = 0.
else:
expect = numpy.zeros(self.gy.shape, dtype=self.dtype)
testing.assert_allclose(loss.data, expect,
**self.check_forward_options)
@testing.parameterize(*(testing.product({
'nb': [1, 2, 4],
'shape': [(3,), (3, 2), (3, 2, 2)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'reduce': ['mean', 'no'],
'soft_target_loss': ['cross-entropy'],
})))
class TestSoftTargetCrossEntropy(BaseSoftTarget, unittest.TestCase):
def setUp(self):
BaseSoftTarget.setUp(self)
self.t = functions.softmax(self.x).array
self.expect = numpy.sum(-self.t * functions.log_softmax(self.x).array,
axis=1)
if self.reduce == 'mean':
self.expect = numpy.average(self.expect)
def check_forward(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
loss = functions.softmax_cross_entropy(
x, t, reduce=self.reduce, soft_target_loss=self.soft_target_loss)
testing.assert_allclose(loss.data, self.expect,
**self.check_forward_options)
testing.run_module(__name__, __file__)
| 23,058
| 33.519461
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/pooling_tests/test_spatial_pyramid_pooling_2d.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import type_check
from chainer_tests.functions_tests.pooling_tests import pooling_nd_helper
@testing.parameterize(*testing.product_dict(
[
{'pyramid_height': 3, 'output_dim': 63, 'n': 2, 'c': 3, 'h': 9, 'w': 8}
],
[
{'pooling': 'max'},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64}
]
))
class TestSpatialPyramidPooling2D(unittest.TestCase):
def setUp(self):
# Spacial pyramid pooling uses max pooling in its implementation.
# To avoid instability of numerical gradient, use enough different
# values.
shape = (self.n, self.c, self.h, self.w)
self.x = pooling_nd_helper.shuffled_linspace(shape, self.dtype)
self.one = numpy.ones(
(self.n, self.c, self.h, self.w)).astype(self.dtype)
self.gy = numpy.random.uniform(
-1, 1, (self.n, self.output_dim, 1, 1)).astype(self.dtype)
self.ggx = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
def func(self, x):
return functions.spatial_pyramid_pooling_2d(
x, self.pyramid_height, pooling=self.pooling)
def check_forward(self, x_data, use_cudnn='always'):
x = chainer.Variable(x_data)
with chainer.using_config('use_cudnn', use_cudnn):
y = self.func(x)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
def check_forward_ones(self, x_data, use_cudnn='always'):
x = chainer.Variable(x_data)
with chainer.using_config('use_cudnn', use_cudnn):
y = self.func(x)
y_data = cuda.to_cpu(y.data)
self.assertEqual(y_data.shape, (self.n, self.output_dim, 1, 1))
self.assertEqual(y_data.dtype, self.dtype)
testing.assert_allclose(y_data, numpy.ones_like(y_data))
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
self.check_forward_ones(self.one)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
self.check_forward_ones(cuda.to_gpu(self.one))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), 'never')
self.check_forward_ones(cuda.to_gpu(self.one), 'never')
def check_backward(self, x_data, y_grad, use_cudnn='always'):
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_backward(
self.func, x_data, y_grad,
dtype=numpy.float64, atol=5e-4, rtol=5e-3)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy), 'never')
def check_double_backward(self, x_data, y_grad, x_grad_grad,
use_cudnn='always'):
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_double_backward(
self.func, x_data, y_grad, x_grad_grad,
dtype=numpy.float64, atol=5e-3, rtol=5e-3)
@condition.retry(3)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx, 'never')
@attr.gpu
@condition.retry(3)
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
@attr.gpu
@condition.retry(3)
def test_double_backward_gpu_non_contiguous(self):
self.check_double_backward(
cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.ggx)))
@attr.gpu
@condition.retry(3)
def test_double_backward_gpu_no_cudnn(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),
'never')
class TestInvalidDtype(unittest.TestCase):
def setUp(self):
self.x = numpy.random.randn(5, 3, 5, 5)
self.v = chainer.Variable(self.x.astype(numpy.int32))
def check_invalid_dtype(self):
functions.spatial_pyramid_pooling_2d(
self.v, 3, pooling='max')
def test_invalid_dtype_cpu(self):
with self.assertRaises(type_check.InvalidType):
self.check_invalid_dtype()
@attr.gpu
def test_invalid_dtype_gpu(self):
self.v.to_gpu()
with self.assertRaises(type_check.InvalidType):
self.check_invalid_dtype()
class TestInvalidArguments(unittest.TestCase):
def setUp(self):
self.x = numpy.random.randn(5, 3, 5, 5)
self.v = chainer.Variable(self.x.astype(numpy.float32))
def check_ambiguous_poolings(self):
with self.assertRaises(ValueError):
functions.spatial_pyramid_pooling_2d(self.v, 3)
def check_invalid_poolings(self):
with self.assertRaises(ValueError):
functions.spatial_pyramid_pooling_2d(self.v, 3, pooling='avg')
def test_ambiguous_pooling(self):
self.check_ambiguous_poolings()
def test_invalid_pooling(self):
self.check_invalid_poolings()
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.gpu
@attr.cudnn
class TestMaxPooling2DCudnnCall(unittest.TestCase):
def setUp(self):
shape = (2, 3, 9, 8)
size = 2 * 3 * 9 * 8
self.x = cuda.cupy.arange(size, dtype=self.dtype).reshape(shape)
self.gy = cuda.cupy.random.uniform(
-1, 1, (2, 63, 1, 1)).astype(self.dtype)
def forward(self):
x = chainer.Variable(self.x)
return functions.spatial_pyramid_pooling_2d(
x, 3, pooling='max')
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cudnn.pooling_forward') as func:
self.forward()
self.assertEqual(func.called,
chainer.should_use_cudnn('>=auto'))
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
expect = chainer.should_use_cudnn('>=auto')
y = self.forward()
y.grad = self.gy
# should be consistent to forward regardless of use_cudnn config
with testing.patch('cupy.cudnn.pooling_backward') as func:
y.backward()
self.assertEqual(func.called, expect)
testing.run_module(__name__, __file__)
| 7,222
| 32.133028
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/pooling_tests/test_unpooling_nd.py
|
import itertools
import unittest
import numpy
import six
import chainer
from chainer import backend
from chainer import functions
from chainer import testing
from chainer.utils import conv
from chainer.utils import type_check
def xs_iter(dims):
return itertools.product(*[range(d) for d in dims])
def kxs_iter(x, outs, ksize, stride, pad):
return itertools.product(
*[range(max(0, -p + s * _x), min(-p + s * _x + k, out))
for (_x, out, k, s, p) in zip(x, outs, ksize, stride, pad)])
def expected_unpooling_nd(x_data, outs, ksize, stride, pad):
N, c = x_data.shape[:2]
dims = x_data.shape[2:]
y_expected_shape = (N, c) + outs
y_expected = numpy.zeros(y_expected_shape, dtype=x_data.dtype)
for i in six.moves.range(N):
for _c in six.moves.range(c):
for x in xs_iter(dims):
x_idx = (i, _c) + x
for kx in kxs_iter(x, outs, ksize, stride, pad):
y_idx = (i, _c) + kx
y_expected[y_idx] += x_data[x_idx]
return y_expected
@testing.parameterize(*(testing.product({
'dims': [(5,), (2, 3, 4)],
'_ksize': [3],
'_stride': [3],
'_pad': [1],
'cover_all': [True],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}) + testing.product({
'dims': [(3, 2)],
'_ksize': [1, 2, 3],
'_stride': [1, 2, 3],
'_pad': [0, 1],
'cover_all': [True, False],
'dtype': [numpy.float32],
})))
@testing.inject_backend_tests(
['test_forward', 'test_backward', 'test_double_backward',
'test_consistency_regression_forward',
'test_consistency_regression_backward'],
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestUnpoolingND(testing.FunctionTestCase):
def setUp(self):
N = 2
c = 3
self.ndim = len(self.dims)
self.ksize = (self._ksize,) * self.ndim
self.stride = (self._stride,) * self.ndim
self.pad = (self._pad,) * self.ndim
self.x_shape = (N, c) + self.dims
self.outs = tuple(
conv.get_deconv_outsize(d, k, s, p, cover_all=self.cover_all)
for (d, k, s, p)
in zip(self.dims, self.ksize, self.stride, self.pad))
self.gy_shape = (N, c) + self.outs
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 2 ** -4, 'rtol': 2 ** -4}
self.check_backward_options = {'atol': 2 ** -4, 'rtol': 2 ** -4}
self.check_double_backward_options = {}
else:
self.check_forward_options = {}
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-3}
self.check_double_backward_options = {'atol': 3e-3, 'rtol': 3e-2}
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
outs = self.gy_shape[2:]
y_expected = expected_unpooling_nd(
x, outs, self.ksize, self.stride, self.pad)
return y_expected,
def forward(self, inputs, device):
x, = inputs
y = functions.unpooling_nd(
x, self.ksize, self.stride, self.pad, cover_all=self.cover_all)
return y,
def check_forward_consistency_regression(self, backend_config):
# Regression test to two-dimensional unpooling layer.
inputs, = self.generate_inputs()
x = chainer.Variable(backend_config.get_array(inputs))
ksize = self.ksize
stride = self.stride
pad = self.pad
y_nd = functions.unpooling_nd(x, ksize, stride=stride, pad=pad,
cover_all=self.cover_all)
y_2d = functions.unpooling_2d(x, ksize, stride=stride, pad=pad,
cover_all=self.cover_all)
testing.assert_allclose(
y_nd.array, y_2d.array, **self.check_forward_options)
def test_consistency_regression_forward(self, backend_config):
if len(self.dims) == 2:
self.check_forward_consistency_regression(backend_config)
def check_backward_consistency_regression(self, backend_config):
# Regression test to two-dimensional unpooling layer.
x_data, = self.generate_inputs()
gy_data = numpy.random.uniform(-1, 1, self.gy_shape).astype(self.dtype)
ksize = self.ksize
stride = self.stride
pad = self.pad
xp = backend.get_array_module(x_data)
# Backward computation for N-dimensional unpooling layer.
x_nd = chainer.Variable(xp.array(x_data))
y_nd = functions.unpooling_nd(
x_nd, ksize, stride=stride, pad=pad, cover_all=self.cover_all)
y_nd.grad = gy_data
y_nd.backward()
# Backward computation for two-dimensional unpooling layer.
x_2d = chainer.Variable(xp.array(x_data))
y_2d = functions.unpooling_2d(
x_2d, ksize, stride=stride, pad=pad, cover_all=self.cover_all)
y_2d.grad = gy_data
y_2d.backward()
# Test that the two result gradients are close enough.
opt = self.check_backward_options
testing.assert_allclose(
x_nd.grad, x_2d.grad, atol=opt['atol'], rtol=opt['rtol'])
def test_consistency_regression_backward(self, backend_config):
ndim = len(self.dims)
if ndim == 2:
self.check_backward_consistency_regression(backend_config)
@testing.parameterize(*testing.product({
'outsize': [(10,), (10, 9), (10, 9, 8)],
'_ksize': [1, 2, 3],
'_stride': [1, 2, 3],
'_pad': [0, 1],
'cover_all': [True, False],
}))
class TestUnpoolingNDOutsize(unittest.TestCase):
def setUp(self):
self.N = 2
self.c = 3
ndim = len(self.outsize)
self.ksize = (self._ksize,) * ndim
self.stride = (self._stride,) * ndim
self.pad = (self._pad,) * ndim
def test_valid_insize(self):
N = self.N
c = self.c
ksize = self.ksize
stride = self.stride
pad = self.pad
outs = self.outsize
cover_all = self.cover_all
# Make input.
dims = tuple(conv.get_conv_outsize(out, k, s, p, cover_all=cover_all)
for (out, k, s, p) in zip(outs, ksize, stride, pad))
x_shape = (N, c) + dims
x_data = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
x = chainer.Variable(x_data)
# Compute unpooling.
y = functions.unpooling_nd(
x, ksize, stride, pad, outsize=outs, cover_all=cover_all)
# Test output's value.
y_expected = expected_unpooling_nd(x_data, outs, ksize, stride, pad)
testing.assert_allclose(y_expected, y.data)
def test_invalid_insize(self):
ksize = self.ksize
stride = self.stride
pad = self.pad
outs = self.outsize
cover_all = self.cover_all
# Make input with invalid shape.
dims = tuple(conv.get_conv_outsize(out, k, s, p, cover_all=cover_all)
for (out, k, s, p) in zip(outs, ksize, stride, pad))
dims = tuple(d + 1 for d in dims) # Make invalid input shape.
x_shape = (self.N, self.c) + dims
x_data = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
x = chainer.Variable(x_data)
# Computing unpooling raises exception.
with self.assertRaises(type_check.InvalidType):
functions.unpooling_nd(
x, ksize, stride, pad, outsize=outs, cover_all=cover_all)
class TestUnpoolingNDWrappers(unittest.TestCase):
def _get_data(self, ndim):
x_shape = (2, 3) + (3,) * ndim
dtype = numpy.float32
x = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
ksize = (2,) * ndim
return x, ksize
def test_unpooling_1d(self):
(x, ksize) = self._get_data(1)
testing.assert_allclose(
functions.unpooling_nd(x, ksize).data,
functions.unpooling_1d(x, ksize).data)
def test_unpooling_1d_invalid(self):
(x, ksize) = self._get_data(2)
with self.assertRaises(ValueError):
functions.unpooling_1d(x, ksize)
def test_unpooling_3d(self):
(x, ksize) = self._get_data(3)
testing.assert_allclose(
functions.unpooling_nd(x, ksize).data,
functions.unpooling_3d(x, ksize).data)
def test_unpooling_3d_invalid(self):
(x, ksize) = self._get_data(2)
with self.assertRaises(ValueError):
functions.unpooling_3d(x, ksize)
testing.run_module(__name__, __file__)
| 9,058
| 32.18315
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/pooling_tests/pooling_nd_helper.py
|
import itertools
import numpy
import six
from chainer import testing
import chainer.utils
def pooling_patches(dims, ksize, stride, pad, cover_all):
"""Return tuples of slices that indicate pooling patches."""
# Left-top indexes of each pooling patch.
if cover_all:
xss = itertools.product(
*[six.moves.range(-p, d + p - k + s, s)
for (d, k, s, p) in six.moves.zip(dims, ksize, stride, pad)])
else:
xss = itertools.product(
*[six.moves.range(-p, d + p - k + 1, s)
for (d, k, s, p) in six.moves.zip(dims, ksize, stride, pad)])
# Tuples of slices for pooling patches.
return [tuple(slice(max(x, 0), min(x + k, d))
for (x, d, k) in six.moves.zip(xs, dims, ksize))
for xs in xss]
def shuffled_linspace(shape, dtype):
size = chainer.utils.size_of_shape(shape)
x = numpy.random.permutation(size) + numpy.random.uniform(0.3, 0.7, size)
x = (2 * x / max(1, size) - 1).astype(dtype)
return x.reshape(shape)
testing.run_module(__name__, __file__)
| 1,082
| 29.942857
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/pooling_tests/test_roi_max_pooling_2d.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer_tests.functions_tests.pooling_tests import pooling_nd_helper
def _pair(x):
if isinstance(x, chainer.utils.collections_abc.Iterable):
return x
return x, x
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64],
'outsize': [
5, 7, (5, 7),
(numpy.int32(5), numpy.int32(7))],
'spatial_scale': [
0.6, 1.0, 2, numpy.float32(0.6), numpy.int32(2)],
}))
class TestROIMaxPooling2D(unittest.TestCase):
def setUp(self):
N = 3
n_channels = 3
self.x = pooling_nd_helper.shuffled_linspace(
(N, n_channels, 12, 8), self.dtype)
self.rois = numpy.array([
[1, 1, 7, 7],
[2, 6, 12, 8],
[1, 3, 11, 6],
[3, 3, 4, 4]
], dtype=self.dtype)
self.roi_indices = numpy.array([0, 2, 1, 0], dtype=numpy.int32)
n_rois = self.rois.shape[0]
outsize = _pair(self.outsize)
self.gy = numpy.random.uniform(
-1, 1, (n_rois, n_channels,
outsize[0], outsize[1])).astype(self.dtype)
if self.dtype == numpy.float16:
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
else:
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
def check_forward(self, x_data, roi_data, roi_index_data):
x = chainer.Variable(x_data)
rois = chainer.Variable(roi_data)
roi_indices = chainer.Variable(roi_index_data)
y = functions.roi_max_pooling_2d(
x, rois, roi_indices, outsize=self.outsize,
spatial_scale=self.spatial_scale)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
def test_forward_cpu(self):
self.check_forward(self.x, self.rois, self.roi_indices)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices))
@attr.gpu
def test_forward_cpu_gpu_equal(self):
# cpu
x_cpu = chainer.Variable(self.x)
rois_cpu = chainer.Variable(self.rois)
roi_indices_cpu = chainer.Variable(self.roi_indices)
y_cpu = functions.roi_max_pooling_2d(
x_cpu, rois_cpu, roi_indices_cpu, outsize=self.outsize,
spatial_scale=self.spatial_scale)
# gpu
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
rois_gpu = chainer.Variable(cuda.to_gpu(self.rois))
roi_indices_gpu = chainer.Variable(cuda.to_gpu(self.roi_indices))
y_gpu = functions.roi_max_pooling_2d(
x_gpu, rois_gpu, roi_indices_gpu, outsize=self.outsize,
spatial_scale=self.spatial_scale)
testing.assert_allclose(y_cpu.data, cuda.to_cpu(y_gpu.data))
def check_backward(self, x_data, roi_data, roi_index_data, y_grad):
def f(x, rois, roi_indices):
y = functions.roi_max_pooling_2d(
x, rois, roi_indices, outsize=self.outsize,
spatial_scale=self.spatial_scale)
xp = cuda.get_array_module(y)
# replace -inf with zero for gradient_check
y = functions.where(
xp.isinf(y.array), xp.zeros(y.shape, dtype=y.dtype), y)
return y
gradient_check.check_backward(
f, (x_data, roi_data, roi_index_data), y_grad,
no_grads=[False, True, True], **self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.rois, self.roi_indices, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| 4,127
| 33.689076
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/pooling_tests/test_average_pooling_nd.py
|
import functools
import operator
import unittest
import numpy
import pytest
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import conv
from chainer_tests.functions_tests.pooling_tests import pooling_nd_helper
@testing.parameterize(*testing.product({
'dims': [(4,), (4, 3), (4, 3, 2), (1, 1, 1, 1)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'pad_value': [None, 0],
'contiguous': ['C', None],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[{}]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
]
)
class TestAveragePoolingND(testing.FunctionTestCase):
def setUp(self):
self.ndim = len(self.dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
self.input_shape = (2, 3) + self.dims
outs = tuple(conv.get_conv_outsize(d, k, s, p, False)
for (d, k, s, p) in six.moves.zip(
self.dims, self.ksize, self.stride, self.pad))
self.output_shape = (2, 3) + outs
self.check_backward_options.update({'atol': 5e-3, 'rtol': 5e-3})
self.check_double_backward_options.update({'atol': 5e-3, 'rtol': 5e-3})
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 5e-4, 'rtol': 5e-3})
self.check_backward_options.update({
'eps': 1e-2, 'atol': 5e-3, 'rtol': 5e-2})
self.check_backward_options.update({
'eps': 1e-2, 'atol': 5e-3, 'rtol': 5e-2})
def generate_inputs(self):
return numpy.random.uniform(
-1, 1, self.input_shape).astype(self.dtype),
def forward(self, inputs, device):
x, = inputs
return functions.average_pooling_nd(
x, self.ksize, self.stride, self.pad, self.pad_value),
def forward_expected(self, inputs):
x, = inputs
patches = pooling_nd_helper.pooling_patches(
self.dims, self.ksize, self.stride, self.pad, False)
def denom(idx):
if self.pad_value is None:
s = 1
for slic in idx:
s *= slic.stop - slic.start
return s
else:
return functools.reduce(operator.mul, self.ksize)
y = []
for k in six.moves.range(2):
tmp = []
for c in six.moves.range(3):
x_ = x[k, c]
expect = numpy.array(
[x_[idx].sum() / denom(idx) for idx in patches])
expect = expect.reshape(self.output_shape[2:])
tmp.append(expect)
y.append(tmp)
return numpy.asarray(y, dtype=self.dtype),
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.inject_backend_tests(
['test_forward_consistency', 'test_backward_consistency'],
# CPU tests
[{}]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
)
class TestConsistencyAveragePoolingND(unittest.TestCase):
def setUp(self):
x_shape = (2, 3, 4, 3)
self.ksize = (3, 3)
self.stride = (2, 2)
self.pad = (1, 1)
self.pad_value = 0
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
outs = tuple(conv.get_conv_outsize(d, k, s, p, False)
for (d, k, s, p) in six.moves.zip(
x_shape[2:], self.ksize, self.stride, self.pad))
gy_shape = (2, 3) + outs
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
self.tolerance = {}
if self.dtype == numpy.float16:
self.tolerance.update({'atol': 1e-3, 'rtol': 1e-4})
def check_forward_consistency_regression(self, x_data, backend_config):
ksize = self.ksize
stride = self.stride
pad = self.pad
pad_value = self.pad_value
with backend_config:
y_nd = functions.average_pooling_nd(
x_data, ksize, stride=stride, pad=pad, pad_value=pad_value)
y_2d = functions.average_pooling_2d(
x_data, ksize, stride=stride, pad=pad)
testing.assert_allclose(y_nd.array, y_2d.array, **self.tolerance)
def test_forward_consistency(self, backend_config):
x = self.x.copy()
x = backend_config.get_array(x)
self.check_forward_consistency_regression(x, backend_config)
def check_backward_consistency_regression(
self, x_data, gy_data, backend_config):
# Regression test to two-dimensional average pooling layer.
ksize = self.ksize
stride = self.stride
pad = self.pad
pad_value = self.pad_value
# Backward computation for N-dimensional average pooling layer.
x_nd = chainer.Variable(x_data)
with backend_config:
y_nd = functions.average_pooling_nd(
x_nd, ksize, stride=stride, pad=pad, pad_value=pad_value)
y_nd.grad = gy_data
y_nd.backward()
# Backward computation for two-dimensional average pooling layer.
x_2d = chainer.Variable(x_data)
with backend_config:
y_2d = functions.average_pooling_2d(
x_2d, ksize, stride=stride, pad=pad)
y_2d.grad = gy_data
y_2d.backward()
# Test that the two result gradients are close enough.
testing.assert_allclose(x_nd.grad, x_2d.grad, **self.tolerance)
def test_backward_consistency(self, backend_config):
x = backend_config.get_array(self.x)
gy = backend_config.get_array(self.gy)
self.check_backward_consistency_regression(x, gy, backend_config)
@testing.parameterize(*testing.product({
'dims': [(4, 3, 2), (3, 2), (2,)],
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestAveragePoolingNDCudnnCall(unittest.TestCase):
def setUp(self):
self.ndim = len(self.dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
x_shape = (2, 3) + self.dims
self.x = cuda.cupy.arange(functools.reduce(operator.mul, x_shape),
dtype=self.dtype).reshape(x_shape)
gy_shape = (2, 3) + tuple(
conv.get_conv_outsize(d, k, s, p)
for (d, k, s, p)
in six.moves.zip(self.dims, self.ksize, self.stride, self.pad))
self.gy = cuda.cupy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
def forward(self):
x = chainer.Variable(self.x)
return functions.average_pooling_nd(
x, self.ksize, self.stride, self.pad)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cudnn.pooling_forward') as func:
self.forward()
assert func.called == (
chainer.should_use_cudnn('>=auto') and self.ndim > 1)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
expect = chainer.should_use_cudnn('>=auto') and self.ndim > 1
y = self.forward()
# should be consistent to forward regardless of use_cudnn config
y.grad = self.gy
with testing.patch('cupy.cudnn.pooling_backward') as func:
y.backward()
assert func.called == expect
class TestAveragePoolingNDWrappers(unittest.TestCase):
def _get_data(self, ndim):
x_shape = (2, 3) + (3,) * ndim
dtype = numpy.float32
x = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
ksize = (2,) * ndim
return x, ksize
def test_average_pooling_1d(self):
(x, ksize) = self._get_data(1)
testing.assert_allclose(
functions.average_pooling_nd(x, ksize).array,
functions.average_pooling_1d(x, ksize).array)
def test_average_pooling_1d_invalid(self):
(x, ksize) = self._get_data(2)
with pytest.raises(ValueError):
functions.average_pooling_1d(x, ksize)
def test_average_pooling_3d(self):
(x, ksize) = self._get_data(3)
testing.assert_allclose(
functions.average_pooling_nd(x, ksize).data,
functions.average_pooling_3d(x, ksize).data)
def test_average_pooling_3d_invalid(self):
(x, ksize) = self._get_data(2)
with pytest.raises(ValueError):
functions.average_pooling_3d(x, ksize)
testing.run_module(__name__, __file__)
| 9,162
| 33.318352
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/pooling_tests/test_roi_pooling_2d.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer_tests.functions_tests.pooling_tests import pooling_nd_helper
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))
class TestROIPooling2D(unittest.TestCase):
def setUp(self):
N = 3
n_channels = 3
self.x = pooling_nd_helper.shuffled_linspace(
(N, n_channels, 12, 8), self.dtype)
self.rois = numpy.array([
[0, 1, 1, 6, 6],
[2, 6, 2, 7, 11],
[1, 3, 1, 5, 10],
[0, 3, 3, 3, 3]
], dtype=self.dtype)
n_rois = self.rois.shape[0]
self.outh, self.outw = 5, 7
self.spatial_scale = 0.6
self.gy = numpy.random.uniform(
-1, 1, (n_rois, n_channels,
self.outh, self.outw)).astype(self.dtype)
if self.dtype == numpy.float16:
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
else:
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
def check_forward(self, x_data, roi_data):
x = chainer.Variable(x_data)
rois = chainer.Variable(roi_data)
y = functions.roi_pooling_2d(
x, rois, outh=self.outh, outw=self.outw,
spatial_scale=self.spatial_scale)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
def test_forward_cpu(self):
self.check_forward(self.x, self.rois)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.rois))
@attr.gpu
def test_forward_cpu_gpu_equal(self):
# cpu
x_cpu = chainer.Variable(self.x)
rois_cpu = chainer.Variable(self.rois)
y_cpu = functions.roi_pooling_2d(
x_cpu, rois_cpu, outh=self.outh, outw=self.outw,
spatial_scale=self.spatial_scale)
# gpu
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
rois_gpu = chainer.Variable(cuda.to_gpu(self.rois))
y_gpu = functions.roi_pooling_2d(
x_gpu, rois_gpu, outh=self.outh, outw=self.outw,
spatial_scale=self.spatial_scale)
testing.assert_allclose(y_cpu.data, cuda.to_cpu(y_gpu.data))
def check_backward(self, x_data, roi_data, y_grad):
def f(x, rois):
return functions.roi_pooling_2d(
x, rois, outh=self.outh, outw=self.outw,
spatial_scale=self.spatial_scale)
gradient_check.check_backward(
f, (x_data, roi_data), y_grad, no_grads=[False, True],
**self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.rois, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| 3,199
| 31.989691
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/pooling_tests/test_pooling_nd_kernel.py
|
import unittest
import chainer
from chainer.functions.pooling import pooling_nd_kernel
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'ndim': [2, 3, 4],
}))
@attr.gpu
class TestPoolingNDKernelMemo(unittest.TestCase):
def setUp(self):
chainer.backends.cuda.clear_memo()
def test_pooling_nd_kernel_forward_memo(self):
ndim = self.ndim
with testing.patch(
'chainer.functions.pooling.pooling_nd_kernel.'
'PoolingNDKernelForward._generate', wraps=None) as m:
pooling_nd_kernel.PoolingNDKernelForward.generate(ndim)
m.assert_called_once_with(ndim)
pooling_nd_kernel.PoolingNDKernelForward.generate(ndim)
# Check that the mocked _generate() function is called just once
# because the result of generate() function is cached.
m.assert_called_once_with(ndim)
def test_pooling_nd_kernel_backward_memo(self):
ndim = self.ndim
with testing.patch(
'chainer.functions.pooling.pooling_nd_kernel.'
'PoolingNDKernelBackward._generate', wraps=None) as m:
pooling_nd_kernel.PoolingNDKernelBackward.generate(ndim)
m.assert_called_once_with(ndim)
pooling_nd_kernel.PoolingNDKernelBackward.generate(ndim)
# Check that the mocked _generate() function is called just once
# because the result of generate() function is cached.
m.assert_called_once_with(ndim)
testing.run_module(__name__, __file__)
| 1,601
| 35.409091
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/pooling_tests/test_max_pooling_2d.py
|
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.testing import backend
from chainer_tests.functions_tests.pooling_tests import pooling_nd_helper
_inject_backend_tests = backend.inject_backend_tests(
None,
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
)
@_inject_backend_tests
@testing.parameterize(*testing.product({
'cover_all': [True, False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'contiguous': [None, 'C'],
}))
class TestMaxPooling2D(testing.FunctionTestCase):
def setUp(self):
if self.cover_all:
self.output_shape = (2, 3, 3, 2)
else:
self.output_shape = (2, 3, 2, 2)
if self.dtype == numpy.float16:
self.check_backward_options = {
'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {
'atol': 1e-3, 'rtol': 1e-2}
else:
self.check_backward_options = {
'atol': 1e-4, 'rtol': 1e-3}
self.check_double_backward_options = {
'atol': 1e-4, 'rtol': 1e-3}
def generate_inputs(self):
return pooling_nd_helper.shuffled_linspace((2, 3, 4, 3), self.dtype),
def forward_expected(self, inputs):
x, = inputs
expect = numpy.empty(self.output_shape, dtype=self.dtype)
for i in six.moves.range(2):
for c in six.moves.range(3):
xx = x[i, c]
if self.cover_all:
expect[i, c] = numpy.array([
[xx[0:2, 0:2].max(), xx[0:2, 1:3].max()],
[xx[1:4, 0:2].max(), xx[1:4, 1:3].max()],
[xx[3:4, 0:2].max(), xx[3:4, 1:3].max()]])
else:
expect[i, c] = numpy.array([
[xx[0:2, 0:2].max(), xx[0:2, 1:3].max()],
[xx[1:4, 0:2].max(), xx[1:4, 1:3].max()]])
return expect,
def forward(self, inputs, device):
x, = inputs
y = functions.max_pooling_2d(x, 3, stride=2, pad=1,
cover_all=self.cover_all)
return y,
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestMaxPooling2DForwardCpuWide(unittest.TestCase):
# see #120
def test_forward_cpu_wide(self):
x_data = numpy.random.rand(2, 3, 15, 15).astype(self.dtype)
x = chainer.Variable(x_data)
functions.max_pooling_2d(x, 6, stride=6, pad=0)
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestMaxPooling2DCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.arange(
2 * 3 * 4 * 3, dtype=self.dtype).reshape(2, 3, 4, 3)
self.gy = cuda.cupy.random.uniform(-1, 1,
(2, 3, 2, 2)).astype(self.dtype)
def forward(self):
x = chainer.Variable(self.x)
return functions.max_pooling_2d(
x, 3, stride=2, pad=1, cover_all=False)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cudnn.pooling_forward') as func:
self.forward()
self.assertEqual(func.called,
chainer.should_use_cudnn('>=auto'))
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
expect = chainer.should_use_cudnn('>=auto')
y = self.forward()
# should be consistent to forward regardless of use_cudnn config
y.grad = self.gy
with testing.patch('cupy.cudnn.pooling_backward') as func:
y.backward()
self.assertEqual(func.called, expect)
class TestMaxPooling2DIndices(unittest.TestCase):
def setUp(self):
self.x = pooling_nd_helper.shuffled_linspace(
(2, 3, 4, 4), numpy.float32)
def _check(self, x):
out, indices = functions.max_pooling_2d(
x, 2, cover_all=False, return_indices=True)
assert isinstance(out, chainer.Variable)
assert isinstance(out.array, type(x))
assert isinstance(indices, type(x))
assert indices.shape == out.array.shape
# Calculate expected indices.
expect = numpy.zeros(indices.shape, dtype=indices.dtype)
for i in six.moves.range(2):
for c in six.moves.range(3):
xx = x[i, c]
expect[i, c] = numpy.array([
[xx[0:2, 0:2].ravel().argmax(),
xx[0:2, 2:4].ravel().argmax()],
[xx[2:4, 0:2].ravel().argmax(),
xx[2:4, 2:4].ravel().argmax()],
])
if out.xp is cuda.cupy:
expect = cuda.to_gpu(expect)
assert (expect == indices).all()
def test_cpu(self):
self._check(self.x)
@attr.gpu
@attr.cudnn
def test_gpu(self):
x = cuda.to_gpu(self.x)
with chainer.using_config('use_cudnn', 'never'):
self._check(x)
with chainer.using_config('use_cudnn', 'always'):
self._check(x)
testing.run_module(__name__, __file__)
| 5,784
| 31.683616
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/pooling_tests/test_max_pooling_nd.py
|
import functools
from operator import mul
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import conv
from chainer_tests.functions_tests.pooling_tests import pooling_nd_helper
@testing.parameterize(*testing.product({
'in_dims': [(4,), (4, 3), (4, 3, 2), (1, 1, 1, 1)],
'cover_all': [True, False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestMaxPoolingND(testing.FunctionTestCase):
def setUp(self):
self.ndim = len(self.in_dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
if self.dtype == numpy.float16:
self.check_backward_options.update({
'atol': 1e-3, 'rtol': 1e-2})
self.check_double_backward_options.update({
'atol': 1e-3, 'rtol': 1e-2})
def generate_inputs(self):
x_shape = (2, 3) + self.in_dims
if self.test_name in ('test_backward', 'test_double_backward'):
x = numpy.arange(functools.reduce(mul, x_shape), dtype=self.dtype)
x = x.reshape(x_shape)
x = 2 * x / x.size - 1
else:
x = numpy.random.randn(*x_shape).astype(self.dtype, copy=False)
return x,
def forward(self, inputs, device):
ksize = self.ksize
stride = self.stride
pad = self.pad
cover_all = self.cover_all
x, = inputs
y = functions.max_pooling_nd(
x, ksize, stride=stride, pad=pad, cover_all=cover_all)
return y,
def _get_out_dims(self, in_dims):
out_dims = tuple(
conv.get_conv_outsize(d, k, s, p, self.cover_all)
for d, k, s, p
in six.moves.zip(in_dims, self.ksize, self.stride, self.pad))
return out_dims
def forward_expected(self, inputs):
in_dims = self.in_dims
ksize = self.ksize
stride = self.stride
pad = self.pad
cover_all = self.cover_all
patches = pooling_nd_helper.pooling_patches(
in_dims, ksize, stride, pad, cover_all)
x, = inputs
out_dims = self._get_out_dims(x.shape[2:])
y_shape = x.shape[:2] + out_dims
x = x.astype(numpy.float64)
y = numpy.empty(y_shape, numpy.float64)
for i in six.moves.range(2):
for c in six.moves.range(3):
d = numpy.array([x[i, c][idx].max() for idx in patches])
y[i, c, ...] = d.reshape(out_dims)
return y.astype(self.dtype),
@testing.parameterize(*testing.product({
'dims': [(4, 3, 2), (3, 2), (2,)],
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestMaxPoolingNDCudnnCall(unittest.TestCase):
def setUp(self):
self.ndim = len(self.dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
x_shape = (2, 3) + self.dims
self.x = cuda.cupy.arange(functools.reduce(mul, x_shape),
dtype=self.dtype).reshape(x_shape)
gy_shape = (2, 3) + tuple(
conv.get_conv_outsize(d, k, s, p)
for (d, k, s, p)
in six.moves.zip(self.dims, self.ksize, self.stride, self.pad))
self.gy = cuda.cupy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
def forward(self):
x = chainer.Variable(self.x)
return functions.max_pooling_nd(
x, self.ksize, self.stride, self.pad, cover_all=False)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cudnn.pooling_forward') as func:
self.forward()
self.assertEqual(func.called,
chainer.should_use_cudnn('>=auto') and
self.ndim > 1)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
expect = chainer.should_use_cudnn('>=auto') and self.ndim > 1
y = self.forward()
# should be consistent to forward regardless of use_cudnn config
y.grad = self.gy
with testing.patch('cupy.cudnn.pooling_backward') as func:
y.backward()
self.assertEqual(func.called, expect)
class TestMaxPoolingNDWrappers(unittest.TestCase):
def _get_data(self, ndim):
x_shape = (2, 3) + (3,) * ndim
dtype = numpy.float32
x = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
ksize = (2,) * ndim
return x, ksize
def test_max_pooling_1d(self):
(x, ksize) = self._get_data(1)
testing.assert_allclose(
functions.max_pooling_nd(x, ksize).data,
functions.max_pooling_1d(x, ksize).data)
def test_max_pooling_1d_invalid(self):
(x, ksize) = self._get_data(2)
with self.assertRaises(ValueError):
functions.max_pooling_1d(x, ksize)
def test_max_pooling_3d(self):
(x, ksize) = self._get_data(3)
testing.assert_allclose(
functions.max_pooling_nd(x, ksize).data,
functions.max_pooling_3d(x, ksize).data)
def test_max_pooling_3d_invalid(self):
(x, ksize) = self._get_data(2)
with self.assertRaises(ValueError):
functions.max_pooling_3d(x, ksize)
class TestMaxPoolingNDIndices(unittest.TestCase):
def setUp(self):
self.x = numpy.arange(
2 * 3 * 4 * 4, dtype=numpy.float32).reshape(2, 3, 4, 4)
def _check(self, x):
out, indices = functions.max_pooling_nd(
x, 2, cover_all=False, return_indices=True)
assert isinstance(out, chainer.Variable)
assert isinstance(out.array, type(x))
assert isinstance(indices, type(x))
assert indices.shape == out.array.shape
# Calculate expected indices.
expect = numpy.zeros(indices.shape, dtype=indices.dtype)
for i in six.moves.range(2):
for c in six.moves.range(3):
xx = x[i, c]
expect[i, c] = numpy.array([
[xx[0:2, 0:2].ravel().argmax(),
xx[0:2, 2:4].ravel().argmax()],
[xx[2:4, 0:2].ravel().argmax(),
xx[2:4, 2:4].ravel().argmax()],
])
if out.xp is cuda.cupy:
expect = cuda.to_gpu(expect)
assert (expect == indices).all()
def test_cpu(self):
self._check(self.x)
@attr.gpu
@attr.cudnn
def test_gpu(self):
x = cuda.to_gpu(self.x)
with chainer.using_config('use_cudnn', 'never'):
self._check(x)
with chainer.using_config('use_cudnn', 'always'):
self._check(x)
testing.run_module(__name__, __file__)
| 7,481
| 32.401786
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/pooling_tests/test_average_pooling_2d.py
|
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.testing import backend
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'contiguous': [None, 'C'],
}))
@backend.inject_backend_tests(
None,
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
])
class TestAveragePooling2D(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_double_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, (2, 3, 4, 3)).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.average_pooling_2d(x, 3, stride=2, pad=1),
def forward_expected(self, inputs):
x, = inputs
y = numpy.empty((2, 3, 2, 2), dtype=self.dtype)
for k in six.moves.range(2):
for c in six.moves.range(3):
xx = x[k, c]
y[k, c] = numpy.array([
[xx[0:2, 0:2].sum(), xx[0:2, 1:3].sum()],
[xx[1:4, 0:2].sum(), xx[1:4, 1:3].sum()]]) / 9
return y,
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestAveragePooling2DCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.arange(
2 * 3 * 4 * 3, dtype=self.dtype).reshape(2, 3, 4, 3)
self.gy = cuda.cupy.random.uniform(-1, 1,
(2, 3, 2, 2)).astype(self.dtype)
def forward(self):
x = chainer.Variable(self.x)
return functions.average_pooling_2d(x, 3, stride=2, pad=1)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cudnn.pooling_forward') as func:
self.forward()
self.assertEqual(func.called,
chainer.should_use_cudnn('>=auto'))
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
expect = chainer.should_use_cudnn('>=auto')
y = self.forward()
# should be consistent to forward regardless of use_cudnn config
y.grad = self.gy
with testing.patch('cupy.cudnn.pooling_backward') as func:
y.backward()
self.assertEqual(func.called, expect)
testing.run_module(__name__, __file__)
| 3,177
| 31.10101
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/pooling_tests/test_roi_average_align_2d.py
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer_tests.functions_tests.pooling_tests import pooling_nd_helper
def _pair(x):
if isinstance(x, chainer.utils.collections_abc.Iterable):
return x
return x, x
@testing.parameterize(*testing.product({
'sampling_ratio': [
None, 1, 2, (None, 3), (1, 2),
(numpy.int32(1), numpy.int32(2)),
],
'outsize': [
5, 7, (5, 7),
(numpy.int32(5), numpy.int32(7)),
],
'spatial_scale': [
0.6, 1.0, 2.0, numpy.float32(0.6),
],
}))
class TestROIAlign2D(unittest.TestCase):
def setUp(self):
N = 3
n_channels = 3
self.x = pooling_nd_helper.shuffled_linspace(
(N, n_channels, 12, 8), numpy.float32)
self.rois = numpy.array([
[1, 1, 6, 6],
[2, 6, 11, 7],
[1, 3, 10, 5],
[3, 3, 3, 3],
[1.1, 2.2, 3.3, 4.4],
], dtype=numpy.float32)
self.roi_indices = numpy.array([0, 2, 1, 0, 2], dtype=numpy.int32)
n_rois = self.rois.shape[0]
outsize = _pair(self.outsize)
self.gy = numpy.random.uniform(
-1, 1, (n_rois, n_channels,
outsize[0], outsize[1])).astype(numpy.float32)
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
def check_forward(self, x_data, roi_data, roi_index_data):
x = chainer.Variable(x_data)
rois = chainer.Variable(roi_data)
roi_indices = chainer.Variable(roi_index_data)
y = functions.roi_average_align_2d(
x, rois, roi_indices, outsize=self.outsize,
spatial_scale=self.spatial_scale,
sampling_ratio=self.sampling_ratio,
)
self.assertEqual(y.data.dtype, numpy.float32)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.rois, self.roi_indices)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices))
@attr.gpu
@condition.retry(3)
def test_forward_cpu_gpu_equal(self):
# cpu
x_cpu = chainer.Variable(self.x)
rois_cpu = chainer.Variable(self.rois)
roi_indices_cpu = chainer.Variable(self.roi_indices)
y_cpu = functions.roi_average_align_2d(
x_cpu, rois_cpu, roi_indices_cpu, outsize=self.outsize,
spatial_scale=self.spatial_scale,
sampling_ratio=self.sampling_ratio,
)
# gpu
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
rois_gpu = chainer.Variable(cuda.to_gpu(self.rois))
roi_indices_gpu = chainer.Variable(cuda.to_gpu(self.roi_indices))
y_gpu = functions.roi_average_align_2d(
x_gpu, rois_gpu, roi_indices_gpu, outsize=self.outsize,
spatial_scale=self.spatial_scale,
sampling_ratio=self.sampling_ratio,
)
testing.assert_allclose(y_cpu.data, cuda.to_cpu(y_gpu.data))
def check_backward(self, x_data, roi_data, roi_index_data, y_grad):
def f(x, rois, roi_indices):
return functions.roi_average_align_2d(
x, rois, roi_indices, outsize=self.outsize,
spatial_scale=self.spatial_scale,
sampling_ratio=self.sampling_ratio)
gradient_check.check_backward(
f, (x_data, roi_data, roi_index_data), y_grad,
no_grads=[False, True, True], **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.rois, self.roi_indices, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| 4,229
| 31.790698
| 74
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/pooling_tests/test_upsampling_2d.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.utils import conv
@testing.parameterize(*testing.product({
'in_shape': [(4, 3, 6, 8), (4, 3, 5, 7)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestUpsampling2D(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.in_shape).astype(self.dtype)
self.ksize = 2
self.stride = 2
with chainer.using_config('use_cudnn', 'never'):
self.pooled_y, self.indices = F.max_pooling_2d(
self.x, ksize=self.ksize, stride=self.stride,
return_indices=True)
self.gy = numpy.random.uniform(
-1, 1, self.in_shape).astype(self.dtype)
self.ggx = numpy.random.uniform(
-1, 1, self.pooled_y.shape).astype(self.dtype)
self.check_backward_options = {}
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
if self.dtype == numpy.float16:
self.check_double_backward_options = {'atol': 3e-3, 'rtol': 3e-2}
def check_forward(self, y):
y = F.upsampling_2d(
self.pooled_y, self.indices, ksize=self.ksize,
stride=self.stride, outsize=self.in_shape[2:])
if isinstance(y.array, numpy.ndarray):
y = conv.im2col_cpu(
y.array, self.ksize, self.ksize, self.stride, self.stride,
0, 0)
else:
y = conv.im2col_gpu(
y.array, self.ksize, self.ksize, self.stride, self.stride,
0, 0)
for i in numpy.ndindex(y.shape):
n, c, ky, kx, oy, ox = i
up_y = y[n, c, ky, kx, oy, ox]
if ky * y.shape[3] + kx == self.indices[n, c, oy, ox]:
in_y = self.pooled_y.array[n, c, oy, ox]
testing.assert_allclose(in_y, up_y)
else:
testing.assert_allclose(up_y, 0)
def test_forward_cpu(self):
self.pooled_y.to_cpu()
self.check_forward(self.pooled_y)
@attr.gpu
def test_forward_gpu(self):
self.pooled_y.to_gpu()
self.check_forward(self.pooled_y)
def check_backward(self, x_data, y_grad):
def f(x):
return F.upsampling_2d(
x, self.indices, ksize=self.ksize, stride=self.stride,
outsize=self.in_shape[2:])
gradient_check.check_backward(
f, x_data, y_grad, dtype='d', **self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.pooled_y.array, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(
self.pooled_y.array), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, x_grad_grad,
use_cudnn='always'):
def f(x):
y = F.upsampling_2d(
x, self.indices, ksize=self.ksize,
stride=self.stride, outsize=self.in_shape[2:])
return y * y
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_double_backward(
f, x_data, y_grad, x_grad_grad, dtype='d',
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(
self.pooled_y.array, self.gy, self.ggx, 'never')
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.pooled_y.array), cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx))
@attr.gpu
def test_double_backward_gpu_non_contiguous(self):
self.check_double_backward(
cuda.cupy.asfortranarray(cuda.to_gpu(self.pooled_y.array)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.ggx)))
@attr.gpu
def test_double_backward_gpu_no_cudnn(self):
self.check_double_backward(
cuda.to_gpu(self.pooled_y.array), cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx), 'never')
testing.run_module(__name__, __file__)
| 4,291
| 34.766667
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/pooling_tests/test_unpooling_2d.py
|
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer_tests.functions_tests.pooling_tests import pooling_nd_helper
@testing.parameterize(*testing.product_dict(
[
# we assume insize as (2, 1)
# standard output size which is estimated with get_deconv_outsize
# function
{'cover_all': False, 'outsize': (4, 2)},
{'cover_all': True, 'outsize': (3, 1)},
{'cover_all': False, 'outsize': None, 'expected_outsize': (4, 2)},
{'cover_all': True, 'outsize': None, 'expected_outsize': (3, 1)},
# another sizes which can be outsize of insize (2, 1)
{'cover_all': False, 'outsize': (5, 2)},
{'cover_all': True, 'outsize': (4, 2)},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
class TestUnpooling2D(unittest.TestCase):
def setUp(self):
self.N = 2
self.n_channels = 3
inh, inw = 2, 1
self.x = pooling_nd_helper.shuffled_linspace(
(self.N, self.n_channels, inh, inw), self.dtype)
self.ksize = 2
outh, outw = self.outsize or self.expected_outsize
self.gy = numpy.random.uniform(
-1, 1, (self.N, self.n_channels, outh, outw)).astype(self.dtype)
self.check_backward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_double_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options = {'atol': 2e-3, 'rtol': 2e-2}
self.check_double_backward_options = {'atol': 3e-3, 'rtol': 3e-2}
self.ggx = numpy.random.uniform(
-1, 1, self.x.shape).astype(self.dtype)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.unpooling_2d(x, self.ksize, outsize=self.outsize,
cover_all=self.cover_all)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
for i in six.moves.range(self.N):
for c in six.moves.range(self.n_channels):
outsize = self.outsize or self.expected_outsize
assert y_data.shape[2:] == outsize
if outsize == (5, 2):
expect = numpy.zeros(outsize, dtype=self.dtype)
expect[:2, :] = self.x[i, c, 0, 0]
expect[2:4, :] = self.x[i, c, 1, 0]
elif outsize == (4, 2):
expect = numpy.array([
[self.x[i, c, 0, 0], self.x[i, c, 0, 0]],
[self.x[i, c, 0, 0], self.x[i, c, 0, 0]],
[self.x[i, c, 1, 0], self.x[i, c, 1, 0]],
[self.x[i, c, 1, 0], self.x[i, c, 1, 0]],
])
elif outsize == (3, 1):
expect = numpy.array([
[self.x[i, c, 0, 0]],
[self.x[i, c, 0, 0]],
[self.x[i, c, 1, 0]],
])
else:
raise ValueError('Unsupported outsize: {}'.format(outsize))
testing.assert_allclose(expect, y_data[i, c])
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
def f(x):
return functions.unpooling_2d(x, self.ksize, outsize=self.outsize,
cover_all=self.cover_all)
gradient_check.check_backward(
f, x_data, y_grad, dtype=numpy.float64,
**self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, x_grad_grad,
use_cudnn='always'):
def f(x):
return functions.unpooling_2d(x, self.ksize, outsize=self.outsize,
cover_all=self.cover_all)
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_double_backward(
f, x_data, y_grad, x_grad_grad, dtype=numpy.float64,
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(
self.x, self.gy, self.ggx, 'never')
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
@attr.gpu
def test_double_backward_gpu_non_contiguous(self):
self.check_double_backward(
cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.ggx)))
@attr.gpu
def test_double_backward_gpu_no_cudnn(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),
'never')
@testing.parameterize(*testing.product_dict(
[
{'insize': (2, 1), 'outsize': (4, 2), 'ksize': 2, 'pad': 0},
{'insize': (4, 5), 'outsize': (4, 6), 'ksize': 2, 'pad': 2},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
class TestIntegerScaleUnpooling2D(unittest.TestCase):
def setUp(self):
self.N = 2
self.n_channels = 3
inh, inw = self.insize
self.x = pooling_nd_helper.shuffled_linspace(
(self.N, self.n_channels, inh, inw), self.dtype)
outh, outw = self.outsize or self.expected_outsize
self.gy = numpy.random.uniform(
-1, 1, (self.N, self.n_channels, outh, outw)).astype(self.dtype)
self.check_backward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_double_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options = {'atol': 2e-3, 'rtol': 2e-2}
self.check_double_backward_options = {'atol': 3e-3, 'rtol': 3e-2}
self.ggx = numpy.random.uniform(
-1, 1, self.x.shape).astype(self.dtype)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.unpooling_2d(
x, self.ksize, outsize=self.outsize, pad=self.pad)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
for i in six.moves.range(self.N):
for c in six.moves.range(self.n_channels):
outsize = self.outsize or self.expected_outsize
assert y_data.shape[2:] == outsize
if outsize == (4, 2):
expect = numpy.array([
[self.x[i, c, 0, 0], self.x[i, c, 0, 0]],
[self.x[i, c, 0, 0], self.x[i, c, 0, 0]],
[self.x[i, c, 1, 0], self.x[i, c, 1, 0]],
[self.x[i, c, 1, 0], self.x[i, c, 1, 0]],
])
elif outsize == (4, 6):
expect = numpy.array([
[self.x[i, c, 1, 1], self.x[i, c, 1, 1],
self.x[i, c, 1, 2], self.x[i, c, 1, 2],
self.x[i, c, 1, 3], self.x[i, c, 1, 3]],
[self.x[i, c, 1, 1], self.x[i, c, 1, 1],
self.x[i, c, 1, 2], self.x[i, c, 1, 2],
self.x[i, c, 1, 3], self.x[i, c, 1, 3]],
[self.x[i, c, 2, 1], self.x[i, c, 2, 1],
self.x[i, c, 2, 2], self.x[i, c, 2, 2],
self.x[i, c, 2, 3], self.x[i, c, 2, 3]],
[self.x[i, c, 2, 1], self.x[i, c, 2, 1],
self.x[i, c, 2, 2], self.x[i, c, 2, 2],
self.x[i, c, 2, 3], self.x[i, c, 2, 3]],
])
else:
raise ValueError('Unsupported outsize: {}'.format(outsize))
testing.assert_allclose(expect, y_data[i, c])
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
def f(x):
return functions.unpooling_2d(x, self.ksize, outsize=self.outsize,
pad=self.pad)
gradient_check.check_backward(
f, x_data, y_grad, dtype=numpy.float64,
**self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, x_grad_grad,
use_cudnn='always'):
def f(x):
return functions.unpooling_2d(x, self.ksize, outsize=self.outsize,
pad=self.pad)
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_double_backward(
f, x_data, y_grad, x_grad_grad, dtype=numpy.float64,
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(
self.x, self.gy, self.ggx, 'never')
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
@attr.gpu
def test_double_backward_gpu_non_contiguous(self):
self.check_double_backward(
cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.ggx)))
@attr.gpu
def test_double_backward_gpu_no_cudnn(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),
'never')
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'h': [5],
'k': [3],
's': [3],
'p': [0],
'cover_all': [True, False],
}))
class TestMaxPoolingUnpooling(unittest.TestCase):
def check_left_inverse(self, xp, use_cudnn='never'):
x = xp.arange(self.h * self.h).reshape(
(1, 1, self.h, self.h)).astype(self.dtype)
with chainer.using_config('use_cudnn', use_cudnn):
y = chainer.functions.unpooling_2d(
x, self.k, self.s, self.p, None, self.cover_all)
x_ = chainer.functions.max_pooling_2d(
y, self.k, self.s, self.p, self.cover_all).data
self.assertEqual(x.shape, x_.shape)
self.assertEqual(x.dtype, x_.dtype)
chainer.testing.assert_allclose(x, x_)
def test_left_inverse_cpu(self):
self.check_left_inverse(numpy)
@attr.gpu
def test_left_inverse_cupy(self):
self.check_left_inverse(cuda.cupy)
@attr.gpu
def test_left_inverse_cudnn(self):
self.check_left_inverse(cuda.cupy, 'always')
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'h': [5],
'k': [3],
's': [3],
'p': [0],
}))
class TestAveragePoolingUnpooling(unittest.TestCase):
def check_left_inverse(self, xp, use_cudnn='never'):
x = xp.arange(self.h * self.h).reshape(
(1, 1, self.h, self.h)).astype(self.dtype)
with chainer.using_config('use_cudnn', use_cudnn):
# average_pooling_2d does not have cover_all option
# as max_pooling_2d has.
y = chainer.functions.unpooling_2d(
x, self.k, self.s, self.p, None, False)
x_ = chainer.functions.average_pooling_2d(
y, self.k, self.s, self.p).data
self.assertEqual(x.shape, x_.shape)
self.assertEqual(x.dtype, x_.dtype)
chainer.testing.assert_allclose(x, x_)
def test_left_inverse_cpu(self):
self.check_left_inverse(numpy)
@attr.gpu
def test_left_inverse_cupy(self):
self.check_left_inverse(cuda.cupy)
@attr.gpu
def test_left_inverse_cudnn(self):
self.check_left_inverse(cuda.cupy, 'always')
testing.run_module(__name__, __file__)
| 12,806
| 36.778761
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/pooling_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/pooling_tests/test_roi_average_pooling_2d.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.utils import collections_abc
from chainer_tests.functions_tests.pooling_tests import pooling_nd_helper
def _pair(x):
if isinstance(x, collections_abc.Iterable):
return x
return x, x
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64],
'outsize': [
5, 7, (5, 7),
(numpy.int32(5), numpy.int32(7))],
'spatial_scale': [0.6, 1.0, 2.0, numpy.float32(0.6), numpy.int32(2)],
}))
class TestROIAveragePooling2D(unittest.TestCase):
def setUp(self):
N = 3
n_channels = 3
self.x = pooling_nd_helper.shuffled_linspace(
(N, n_channels, 12, 8), self.dtype)
self.rois = numpy.array([
[1, 1, 7, 7],
[2, 6, 12, 8],
[1, 3, 11, 6],
[3, 3, 4, 4]
], dtype=self.dtype)
self.roi_indices = numpy.array([0, 2, 1, 0], dtype=numpy.int32)
n_rois = self.rois.shape[0]
outsize = _pair(self.outsize)
self.gy = numpy.random.uniform(
-1, 1, (n_rois, n_channels,
outsize[0], outsize[1])).astype(self.dtype)
if self.dtype == numpy.float16:
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
else:
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
def check_forward(self, x_data, roi_data, roi_index_data):
x = chainer.Variable(x_data)
rois = chainer.Variable(roi_data)
roi_indices = chainer.Variable(roi_index_data)
y = functions.roi_average_pooling_2d(
x, rois, roi_indices, outsize=self.outsize,
spatial_scale=self.spatial_scale)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
def test_forward_cpu(self):
self.check_forward(self.x, self.rois, self.roi_indices)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices))
@attr.gpu
def test_forward_cpu_gpu_equal(self):
# cpu
x_cpu = chainer.Variable(self.x)
rois_cpu = chainer.Variable(self.rois)
roi_indices_cpu = chainer.Variable(self.roi_indices)
y_cpu = functions.roi_average_pooling_2d(
x_cpu, rois_cpu, roi_indices_cpu, outsize=self.outsize,
spatial_scale=self.spatial_scale)
# gpu
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
rois_gpu = chainer.Variable(cuda.to_gpu(self.rois))
roi_indices_gpu = chainer.Variable(cuda.to_gpu(self.roi_indices))
y_gpu = functions.roi_average_pooling_2d(
x_gpu, rois_gpu, roi_indices_gpu, outsize=self.outsize,
spatial_scale=self.spatial_scale)
testing.assert_allclose(y_cpu.data, cuda.to_cpu(y_gpu.data))
def check_backward(self, x_data, roi_data, roi_index_data, y_grad):
def f(x, rois, roi_indices):
return functions.roi_average_pooling_2d(
x, rois, roi_indices, outsize=self.outsize,
spatial_scale=self.spatial_scale)
gradient_check.check_backward(
f, (x_data, roi_data, roi_index_data), y_grad,
no_grads=[False, True, True], **self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.rois, self.roi_indices, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| 3,947
| 33.631579
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/pooling_tests/test_roi_max_align_2d.py
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer_tests.functions_tests.pooling_tests import pooling_nd_helper
def _pair(x):
if isinstance(x, chainer.utils.collections_abc.Iterable):
return x
return x, x
@testing.parameterize(*testing.product({
'sampling_ratio': [
None, 1, 2, (None, 3), (1, 2),
(numpy.int32(1), numpy.int32(2)),
],
'outsize': [
5, 7, (5, 7),
(numpy.int32(5), numpy.int32(7)),
],
'spatial_scale': [
0.6, 1.0, 2.0, numpy.float32(0.6), numpy.int32(2),
],
}))
class TestROIMaxAlign2D(unittest.TestCase):
def setUp(self):
N = 3
n_channels = 3
self.x = pooling_nd_helper.shuffled_linspace(
(N, n_channels, 12, 8), numpy.float32)
self.rois = numpy.array([
[1, 1, 6, 6],
[6, 2, 7, 11],
[3, 1, 5, 10],
[3, 3, 3, 3],
[1.1, 2.2, 3.3, 4.4],
], dtype=numpy.float32)
self.roi_indices = numpy.array([0, 2, 1, 0, 2], dtype=numpy.int32)
n_rois = self.rois.shape[0]
outsize = _pair(self.outsize)
self.gy = numpy.random.uniform(
-1, 1, (n_rois, n_channels,
outsize[0], outsize[1])).astype(numpy.float32)
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
def check_forward(self, x_data, roi_data, roi_index_data):
x = chainer.Variable(x_data)
rois = chainer.Variable(roi_data)
roi_indices = chainer.Variable(roi_index_data)
y = functions.roi_max_align_2d(
x, rois, roi_indices, outsize=self.outsize,
spatial_scale=self.spatial_scale,
sampling_ratio=self.sampling_ratio,
)
self.assertEqual(y.data.dtype, numpy.float32)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.rois, self.roi_indices)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices))
@attr.gpu
@condition.retry(3)
def test_forward_cpu_gpu_equal(self):
# cpu
x_cpu = chainer.Variable(self.x)
rois_cpu = chainer.Variable(self.rois)
roi_index_cpu = chainer.Variable(self.roi_indices)
y_cpu = functions.roi_max_align_2d(
x_cpu, rois_cpu, roi_index_cpu,
outsize=self.outsize, spatial_scale=self.spatial_scale,
sampling_ratio=self.sampling_ratio,
)
# gpu
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
rois_gpu = chainer.Variable(cuda.to_gpu(self.rois))
roi_index_gpu = chainer.Variable(cuda.to_gpu(self.roi_indices))
y_gpu = functions.roi_max_align_2d(
x_gpu, rois_gpu, roi_index_gpu,
outsize=self.outsize, spatial_scale=self.spatial_scale,
sampling_ratio=self.sampling_ratio,
)
testing.assert_allclose(y_cpu.data, cuda.to_cpu(y_gpu.data))
def check_backward(self, x_data, roi_data, roi_index_data, y_grad):
def f(x, rois, roi_indices):
y = functions.roi_max_align_2d(
x, rois, roi_indices, outsize=self.outsize,
spatial_scale=self.spatial_scale,
sampling_ratio=self.sampling_ratio)
xp = chainer.backend.get_array_module(y)
y = functions.where(
xp.isinf(y.array), xp.zeros(y.shape, dtype=y.dtype), y)
return y
gradient_check.check_backward(
f, (x_data, roi_data, roi_index_data), y_grad,
no_grads=[False, True, True], **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.rois, self.roi_indices, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| 4,400
| 32.090226
| 74
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/connection_tests/test_convolution_2d.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
from chainer.testing import attr
from chainer.testing import backend
@testing.parameterize(*(testing.product({
'contiguous': ['C', None],
'cover_all': [True, False],
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
'dilate': [1],
'groups': [1, 2],
'nobias': [True, False],
}) + testing.product({
'contiguous': [None],
'cover_all': [False],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
'dilate': [1],
'groups': [1, 2],
'nobias': [True, False],
})))
@backend.inject_backend_tests(
None,
# ChainerX tests
testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
# CPU tests
+ testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product([
[{'use_cuda': True}],
# Without cuDNN
testing.product({
'use_cudnn': ['never'],
})
# With cuDNN
+ testing.product({
'use_cudnn': ['always'],
'cudnn_deterministic': [True, False],
'autotune': [True, False],
})]))
class TestConvolution2DFunction(testing.FunctionTestCase):
def setUp(self):
self.batches = 2
self.in_channels_a_group = 3
self.out_channels_a_group = 2
self.in_channels = self.in_channels_a_group * self.groups
self.out_channels = self.out_channels_a_group * self.groups
self.kh, self.kw = (3, 3)
self.stride = 2
self.pad = (
int(self.kh / 2) * self.dilate, int(self.kw / 2) * self.dilate)
self.check_forward_options.update({
'atol': 5e-4, 'rtol': 5e-3
})
self.check_backward_options.update({
'atol': 5e-4, 'rtol': 5e-3
})
self.check_double_backward_options.update({
'atol': 5e-4, 'rtol': 5e-3
})
self.old_numpy_fp16 = False
if numpy.float16 in (self.x_dtype, self.W_dtype):
# Old numpy versions have a bug in the fp16 conversion
# that happens on the matrix multiplication for the grouped
# convolution, outputs will be zeroed but computations
# will be performed in order to detect other issues
old_numpy = numpy.lib.NumpyVersion(numpy.__version__) < '1.17.0'
self.old_numpy_fp16 = (old_numpy
and self.groups == 2
and self.x_dtype == self.W_dtype)
self.check_forward_options.update({
'atol': 1e-3, 'rtol': 1e-2
})
self.check_backward_options.update({
'atol': 1e-3, 'rtol': 1e-3
})
self.check_double_backward_options.update({
'atol': 1e-2, 'rtol': 1e-2
})
def before_test(self, test_name):
# cuDNN 5 and 5.1 results suffer from precision issues
using_old_cudnn = (self.backend_config.xp is cuda.cupy
and self.backend_config.use_cudnn == 'always'
and cuda.cuda.cudnn.getVersion() < 6000)
if using_old_cudnn:
self.check_backward_options.update({
'atol': 1e-3, 'rtol': 1e-3})
self.check_double_backward_options.update({
'atol': 1e-2, 'rtol': 1e-2})
def generate_inputs(self):
W = numpy.random.normal(
0, numpy.sqrt(1. / (self.kh * self.kw * self.in_channels_a_group)),
(self.out_channels, self.in_channels_a_group, self.kh, self.kw)
).astype(self.W_dtype)
x = numpy.random.uniform(
-1, 1, (self.batches, self.in_channels, 4, 3)).astype(self.x_dtype)
if self.nobias:
return x, W
else:
b = numpy.random.uniform(
-1, 1, self.out_channels).astype(self.x_dtype)
return x, W, b
def forward_expected(self, inputs):
"""
Current forward_expected implementation depends on
F.convolution_2d itself and thus it's only capable
of checking consistency between backends, not absolute
correctness of computations
"""
if self.nobias:
x, W = inputs
b = None
else:
x, W, b = inputs
with chainer.using_config('use_ideep', 'never'):
y_expected = F.convolution_2d(
x, W, b, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
if self.old_numpy_fp16:
return y_expected.array*0,
return y_expected.array,
def forward(self, inputs, device):
if self.nobias:
x, W = inputs
b = None
else:
x, W, b = inputs
out = F.convolution_2d(
x, W, b, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
if self.old_numpy_fp16:
return out*0,
return out,
@testing.parameterize(*(testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'cudnn_deterministic': [False, True],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'dilate': [1],
'groups': [1, 2],
}) + testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'cudnn_deterministic': [False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'dilate': [2],
'groups': [1, 2],
})))
@attr.cudnn
class TestConvolution2DCudnnCall(unittest.TestCase):
def setUp(self):
batches = 2
in_channels_a_group = 3
out_channels_a_group = 2
in_channels = in_channels_a_group * self.groups
out_channels = out_channels_a_group * self.groups
kh, kw = (3, 3)
self.stride = 2
self.pad = (int(kh / 2) * self.dilate, int(kw / 2) * self.dilate)
self.x = cuda.cupy.random.uniform(
-1, 1, (batches, in_channels, 4, 3)).astype(self.dtype)
self.W = cuda.cupy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels_a_group)),
(out_channels, in_channels_a_group, kh, kw)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(
-1, 1, (batches, out_channels, 2, 2)).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.should_call_cudnn = chainer.should_use_cudnn('>=auto')
if self.dilate > 1 and cuda.cuda.cudnn.getVersion() < 6000:
self.should_call_cudnn = False
if self.groups > 1 and cuda.cuda.cudnn.getVersion() < 7000:
self.should_call_cudnn = False
def forward(self):
x = chainer.Variable(self.x)
W = chainer.Variable(self.W)
return F.convolution_2d(x, W, None, stride=self.stride, pad=self.pad,
dilate=self.dilate, groups=self.groups)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with chainer.using_config('cudnn_deterministic',
self.cudnn_deterministic):
with testing.patch('cupy.cudnn.convolution_forward') as func:
self.forward()
self.assertEqual(func.called, self.should_call_cudnn)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with chainer.using_config('cudnn_deterministic',
self.cudnn_deterministic):
y = self.forward()
y.grad = self.gy
name = 'cupy.cudnn.convolution_backward_data'
with testing.patch(name) as func:
y.backward()
self.assertEqual(func.called, self.should_call_cudnn)
@testing.parameterize(*testing.product({
'c_contiguous': [True, False],
'nobias': [True, False],
'groups': [1, 2],
}))
@attr.gpu
@attr.cudnn
class TestConvolution2DFunctionCudnnDeterministic(unittest.TestCase):
def setUp(self):
self.stride = 2
self.pad = 1
batch_sz = 2
in_channels_a_group = 64
out_channels_a_group = 64
in_channels = in_channels_a_group * self.groups
out_channels = out_channels_a_group * self.groups
kh, kw = (3, 3)
in_h, in_w = (32, 128)
out_h, out_w = (16, 64)
# should be same types for cudnn test
x_dtype = numpy.float32
W_dtype = numpy.float32
self.W = numpy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels_a_group)),
(out_channels, in_channels_a_group, kh, kw)).astype(W_dtype)
self.b = numpy.random.uniform(-1, 1, out_channels).astype(x_dtype)
self.x = numpy.random.uniform(
-1, 1, (batch_sz, in_channels, in_h, in_w)).astype(x_dtype)
self.gy = numpy.random.uniform(
-1, 1, (batch_sz, out_channels, out_h, out_w)).astype(x_dtype)
self.should_call_cudnn = True
if self.groups > 1 and cuda.cuda.cudnn.getVersion() < 7000:
self.should_call_cudnn = False
def test_called(self):
with testing.patch(
'cupy.cudnn.convolution_backward_filter', autospec=True) as f:
# cuDNN version >= v3 supports `cudnn_deterministic` option
self._run()
# in Convolution2DFunction.backward_gpu()
assert f.called == self.should_call_cudnn
def test_cudnn_deterministic(self):
x1, W1, b1, y1 = self._run()
x2, W2, b2, y2 = self._run()
cuda.cupy.testing.assert_array_equal(x1.grad, x2.grad)
cuda.cupy.testing.assert_array_equal(y1.data, y2.data)
cuda.cupy.testing.assert_array_equal(W1.grad, W2.grad)
def _contiguous(self, x_data, W_data, b_data, gy_data):
if not self.c_contiguous:
x_data = numpy.asfortranarray(x_data)
W_data = numpy.asfortranarray(W_data)
gy_data = numpy.asfortranarray(gy_data)
self.assertFalse(x_data.flags.c_contiguous)
self.assertFalse(W_data.flags.c_contiguous)
self.assertFalse(gy_data.flags.c_contiguous)
b = numpy.empty((len(b_data) * 2,), dtype=self.b.dtype)
b[::2] = b_data
b_data = b[::2]
self.assertFalse(b_data.flags.c_contiguous)
return x_data, W_data, b_data, gy_data
def _run(self):
with chainer.using_config('use_cudnn', 'always'):
with chainer.using_config('cudnn_deterministic', True):
# verify data continuity and move to gpu
x_data, W_data, b_data, gy_data = tuple(
cuda.to_gpu(data) for data in self._contiguous(
self.x, self.W, self.b, self.gy))
x, W, b, y = self._run_forward(x_data, W_data, b_data)
y.grad = gy_data
y.backward()
return x, W, b, y
def _run_forward(self, x_data, W_data, b_data):
x = chainer.Variable(x_data)
W = chainer.Variable(W_data)
b = None if self.nobias else chainer.Variable(b_data)
y = F.convolution_2d(x, W, b, stride=self.stride, pad=self.pad,
cover_all=False, groups=self.groups)
return x, W, b, y
class TestConvolution2DBackwardNoncontiguousGradOutputs(unittest.TestCase):
# NumPy raises an error when the inputs of dot operation are not
# contiguous. This test ensures this issue is correctly handled.
# (https://github.com/chainer/chainer/issues/2744)
# This test depdends on that backward() of F.sum generates
# a non-contiguous array.
def test_1(self):
n_batches = 2
in_channels = 3
out_channels = 1 # important
x_shape = (n_batches, in_channels, 10, 10)
w_shape = (out_channels, in_channels, 3, 3)
x = numpy.ones(x_shape, numpy.float32)
w = numpy.ones(w_shape, numpy.float32)
y = F.convolution_2d(x, chainer.Variable(w))
z = F.sum(y)
z.backward()
class TestConvolution2DInvalidDilation(unittest.TestCase):
n_batches = 2
in_channels = 3
out_channels = 2
dilate = 0
x_shape = (n_batches, in_channels, 10, 10)
w_shape = (out_channels, in_channels, 3, 3)
def check_invalid_dilation(self, x_data, w_data):
x = chainer.Variable(x_data)
w = chainer.Variable(w_data)
F.convolution_2d(x, w, dilate=self.dilate)
def test_invalid_dilation_cpu(self):
x = numpy.ones(self.x_shape, numpy.float32)
w = numpy.ones(self.w_shape, numpy.float32)
with self.assertRaises(ValueError):
with chainer.using_config('use_ideep', 'never'):
self.check_invalid_dilation(x, w)
@attr.ideep
def test_invalid_dilation_cpu_ideep(self):
x = numpy.ones(self.x_shape, numpy.float32)
w = numpy.ones(self.w_shape, numpy.float32)
with self.assertRaises(ValueError):
with chainer.using_config('use_ideep', 'always'):
self.check_invalid_dilation(x, w)
@attr.gpu
def test_invalid_dilation_gpu(self):
x = cuda.cupy.ones(self.x_shape, numpy.float32)
w = cuda.cupy.ones(self.w_shape, numpy.float32)
with self.assertRaises(ValueError):
with chainer.using_config('use_cudnn', 'never'):
self.check_invalid_dilation(x, w)
@attr.cudnn
def test_invalid_dilation_gpu_cudnn(self):
x = cuda.cupy.ones(self.x_shape, numpy.float32)
w = cuda.cupy.ones(self.w_shape, numpy.float32)
with self.assertRaises(ValueError):
with chainer.using_config('use_cudnn', 'always'):
self.check_invalid_dilation(x, w)
testing.run_module(__name__, __file__)
| 14,148
| 36.136483
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/connection_tests/test_deformable_convolution_2d_sampler.py
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer.functions import convolution_2d
from chainer.functions import deformable_convolution_2d_sampler
from chainer import utils
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'params': [
(1, 1, 1, 1, 1, 1),
(2, 2, 2, 2, 2, 2),
(1, 2, 2, 1, 1, 2),
(1, 2, 3, 4, 1, 2),
(1, 2, 3, 4, 4, 5),
(3, 3, 2, 2, 1, 1),
],
'use_cudnn': ['always', 'never']
}))
class TestDeformableConvolution2DSamplerFunctionZeroOffset(unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
batch_size = 2
h = 9
w = 9
kh, kw, sy, sx, ph, pw = self.params
self.stride = (sy, sx)
self.pad = (ph, pw)
self.W = numpy.random.normal(
size=(out_channels, in_channels, kh, kw)).astype(numpy.float32)
self.b = numpy.random.uniform(
size=(out_channels,)).astype(numpy.float32)
self.x = numpy.random.uniform(
size=(batch_size, in_channels, h, w)).astype(numpy.float32)
out_h = utils.conv.get_conv_outsize(h, kh, sy, ph)
out_w = utils.conv.get_conv_outsize(w, kw, sx, pw)
self.offset = numpy.zeros(
(batch_size, 2 * kh * kw, out_h, out_w), dtype=numpy.float32)
def check_forward(self, x, offset, W, b, stride, pad):
with chainer.using_config('use_cudnn', self.use_cudnn):
x = chainer.Variable(x)
offset = chainer.Variable(offset)
out = deformable_convolution_2d_sampler(
x, offset, W, b, stride, pad).data
expeceted = convolution_2d(
x, W, b, stride, pad).data
testing.assert_allclose(out, expeceted)
def test_forward_cpu(self):
self.check_forward(
self.x, self.offset, self.W, self.b, self.stride, self.pad)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x),
cuda.to_gpu(self.offset),
cuda.to_gpu(self.W),
cuda.to_gpu(self.b),
self.stride, self.pad)
@testing.parameterize(*testing.product({
'params': [
(1, 1, 1, 1, 1, 1),
(2, 2, 2, 2, 2, 2),
(1, 2, 2, 1, 1, 2),
(1, 2, 3, 4, 1, 2),
(1, 2, 3, 4, 4, 5),
(3, 3, 2, 2, 1, 1),
],
'use_cudnn': ['always', 'never']
}))
class TestDeformableConvolution2DSamplerFunctionLeftBottomOffset(
unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
batch_size = 2
h = 9
w = 9
kh, kw, sy, sx, ph, pw = self.params
self.stride = (sy, sx)
self.pad = (ph, pw)
self.W = numpy.random.normal(
size=(out_channels, in_channels, kh, kw)).astype(numpy.float32)
self.b = numpy.random.uniform(
size=(out_channels,)).astype(numpy.float32)
self.x = numpy.random.uniform(
size=(batch_size, in_channels, h, w)).astype(numpy.float32)
out_h = utils.conv.get_conv_outsize(h, kh, sy, ph)
out_w = utils.conv.get_conv_outsize(w, kw, sx, pw)
self.offset = numpy.zeros(
(batch_size, 2 * kh * kw, out_h, out_w), dtype=numpy.float32)
def check_forward(self, x, offset, W, b, stride, pad):
with chainer.using_config('use_cudnn', self.use_cudnn):
_, _, h, w = x.shape
_, _, kh, kw = W.shape
offset[:, :kh * kw] = -1 * stride[1]
offset[:, kh * kw:] = 1 * stride[0]
x = chainer.Variable(x)
offset = chainer.Variable(offset)
out = deformable_convolution_2d_sampler(
x, offset, W, b, stride, pad).data
pad = (pad[0] + 1 * stride[0], pad[1] + 1 * stride[1])
expeceted = convolution_2d(
x, W, b, stride, pad).data
expeceted = expeceted[:, :, 2:, :-2]
testing.assert_allclose(out, expeceted)
def test_forward_cpu(self):
self.check_forward(
self.x, self.offset, self.W, self.b, self.stride, self.pad)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x),
cuda.to_gpu(self.offset),
cuda.to_gpu(self.W),
cuda.to_gpu(self.b),
self.stride, self.pad)
testing.run_module(__name__, __file__)
| 4,512
| 29.493243
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/connection_tests/test_convolution_nd.py
|
import functools
from operator import mul
import unittest
import numpy
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
from chainer.testing import attr
from chainer.utils import conv
@testing.parameterize(*(testing.product({
'dims': [(5,), (4, 3), (3, 4, 3)],
'dilate': [1, 2],
'groups': [1, 2],
'cover_all': [True, False],
'contiguous': ['C'],
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
'b_dtype': [numpy.float32],
'autotune': [True, False],
'nobias': [True, False],
}) + testing.product({
'dims': [(4,)],
'dilate': [1],
'groups': [1],
'cover_all': [False],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
'b_dtype': [numpy.float16, numpy.float32, numpy.float64],
'autotune': [False],
'nobias': [True, False],
})))
@testing.inject_backend_tests(
None,
# CPU tests
[{}]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
]
)
class TestConvolutionND(testing.FunctionTestCase):
def setUp(self):
self.N = 2
self.in_channels = 4
self.out_channels = 2
self.ndim = len(self.dims)
self.ksize = (2,) * self.ndim
self.stride = (1,) * self.ndim
self.pad = (1,) * self.ndim
self.dilate = (self.dilate,) * self.ndim
self.x_shape = (self.N, self.in_channels) + self.dims
self.W_shape = (
self.out_channels, self.in_channels // self.groups) + self.ksize
self.W_scale = numpy.sqrt(
1. / functools.reduce(mul, self.ksize, self.in_channels))
self.gy_shape = (self.N, self.out_channels) + tuple(
conv.get_conv_outsize(d, k, s, p, cover_all=self.cover_all, d=di)
for (d, k, s, p, di)
in zip(self.dims, self.ksize, self.stride, self.pad, self.dilate))
self.check_backward_options.update({'atol': 5e-5, 'rtol': 5e-4})
self.check_double_backward_options.update(
{'atol': 5e-4, 'rtol': 5e-3})
if self.x_dtype == numpy.float16 or self.W_dtype == numpy.float16:
self.check_forward_options.update({'atol': 5e-4, 'rtol': 5e-3})
self.check_backward_options.update({
'atol': 2 ** -4, 'rtol': 2 ** -4})
self.check_double_backward_options.update({
'atol': 2 ** -4, 'rtol': 2 ** -4})
def before_test(self, test_name):
# Some of the test configurations do not
# support autotune so this hack is necessary
# for the CI to work
self.backend_config.autotune = self.autotune
# cuDNN 5 and 5.1 results suffer from precision issues
using_old_cudnn = (self.backend_config.xp is cuda.cupy
and self.backend_config.use_cudnn == 'always'
and cuda.cuda.cudnn.getVersion() < 6000)
if using_old_cudnn:
self.check_backward_options.update({
'atol': 1e-3, 'rtol': 1e-3})
self.check_double_backward_options.update({
'atol': 1e-2, 'rtol': 1e-2})
def generate_inputs(self):
W = numpy.random.normal(
0, self.W_scale, self.W_shape).astype(self.W_dtype)
x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.x_dtype)
if self.nobias:
return x, W
else:
b = numpy.random.uniform(
-1, 1, self.out_channels).astype(self.x_dtype)
return x, W, b
def forward_expected(self, inputs):
"""
Current forward_expected implementation depends on
F.convolution_nd itself and thus it's only capable
of checking consistency between backends, not absolute
correctness of computations
"""
if self.nobias:
x, W = inputs
b = None
else:
x, W, b = inputs
y_expected = F.convolution_nd(
x, W, b, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
return y_expected.array,
def forward(self, inputs, device):
if self.nobias:
x, W = inputs
b = None
else:
x, W, b = inputs
y = F.convolution_nd(
x, W, b, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
return y,
def check_forward_consistency_regression(self, backend_config):
inputs = self.generate_inputs()
if self.nobias:
x, W = inputs
b = None
else:
x, W, b = inputs
x = chainer.Variable(backend_config.get_array(x))
W = chainer.Variable(backend_config.get_array(W))
if b is not None:
b = chainer.Variable(backend_config.get_array(b))
with chainer.using_config('use_cudnn', 'never'):
y_nd = F.convolution_nd(
x, W, b, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
y_2d = F.convolution_2d(
x, W, b, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
testing.assert_allclose(
y_nd.array, y_2d.array, **self.check_forward_options)
def test_consistency_regression_forward(self, backend_config):
# Regression test to convolution_2d.
if len(self.dims) == 2:
self.check_forward_consistency_regression(backend_config)
@testing.parameterize(*testing.product({
'dims': [(10,), (10, 8), (10, 8, 6)],
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestConvolutionNDCudnnCall(unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
ndim = len(self.dims)
ksize = (3,) * ndim
self.stride = (2,) * ndim
self.pad = (1,) * ndim
x_shape = (2, 3) + self.dims
self.x = cuda.cupy.random.uniform(-1, 1, x_shape).astype(self.dtype)
W_scale = numpy.sqrt(1. / functools.reduce(mul, ksize, in_channels))
W_shape = (out_channels, in_channels) + ksize
self.W = cuda.cupy.random.normal(
0, W_scale, W_shape).astype(self.dtype)
gy_shape = (2, 2) + tuple(
conv.get_conv_outsize(d, k, s, p) for (d, k, s, p) in zip(
self.dims, ksize, self.stride, self.pad))
self.gy = cuda.cupy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expect = chainer.should_use_cudnn('>=auto') and ndim > 1
def forward(self):
x = chainer.Variable(cuda.to_gpu(self.x))
W = chainer.Variable(cuda.to_gpu(self.W))
return F.convolution_nd(
x, W, None, stride=self.stride, pad=self.pad)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cudnn.convolution_forward') as func:
self.forward()
self.assertEqual(func.called, self.expect)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
y.grad = self.gy
name = 'cupy.cudnn.convolution_backward_data'
with testing.patch(name) as func:
y.backward()
self.assertEqual(func.called, self.expect)
class TestConvolutionNDarraySupplied(unittest.TestCase):
def setUp(self):
N = 2
in_channels = 3
out_channels = 2
dtype = numpy.float32
x_shape = (N, in_channels, 3, 3, 3)
self.x_data = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
W_shape = (out_channels, in_channels, 1, 1, 1)
self.W_data = numpy.random.uniform(-1, 1, W_shape).astype(dtype)
self.b_data = numpy.random.uniform(-1, 1, out_channels).astype(dtype)
def check_array_supplied(self, x_ary, W_ary, b_ary):
y_ary = F.convolution_nd(x_ary, W_ary, b_ary)
x_var = chainer.Variable(x_ary)
W_var = chainer.Variable(W_ary)
b_var = chainer.Variable(b_ary)
y_var = F.convolution_nd(x_var, W_var, b_var)
testing.assert_allclose(y_ary.data, y_var.data)
def test_array_supplied_cpu(self):
self.check_array_supplied(self.x_data, self.W_data, self.b_data)
@attr.gpu
def test_array_supplied_gpu(self):
self.check_array_supplied(cuda.to_gpu(self.x_data),
cuda.to_gpu(self.W_data),
cuda.to_gpu(self.b_data))
class TestConvolutionNDBackwardNoncontiguousGradOutputs(unittest.TestCase):
# NumPy raises an error when the inputs of dot operation are not
# contiguous. This test ensures this issue is correctly handled.
# (https://github.com/chainer/chainer/issues/2744)
# This test depdends on that backward() of F.sum generates
# a non-contiguous array.
def test_1(self):
n_batches = 2
in_channels = 3
out_channels = 1 # important
x_shape = (n_batches, in_channels, 4)
w_shape = (out_channels, in_channels, 3)
x = numpy.ones(x_shape, numpy.float32)
w = numpy.ones(w_shape, numpy.float32)
y = F.convolution_nd(chainer.Variable(x), w)
z = F.sum(y)
z.backward()
def test_2(self):
n_batches = 2
in_channels = 3
out_channels = 1 # important
x_shape = (n_batches, in_channels, 4)
w_shape = (out_channels, in_channels, 3)
x = numpy.ones(x_shape, numpy.float32)
w = numpy.ones(w_shape, numpy.float32)
y = F.convolution_nd(x, chainer.Variable(w))
z = F.sum(y)
z.backward()
class TestConvolutionNDWrappers(unittest.TestCase):
def _get_data(self, ndim):
in_channels = 3
out_channels = 2
dtype = numpy.float32
x_shape = (2, in_channels) + (3,) * ndim
x = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
W_shape = (out_channels, in_channels) + (1,) * ndim
W = numpy.random.uniform(-1, 1, W_shape).astype(dtype)
b = numpy.random.uniform(-1, 1, out_channels).astype(dtype)
return x, W, b
def test_conv1d(self):
(x, W, b) = self._get_data(1)
testing.assert_allclose(
F.convolution_nd(x, W, b).data, F.convolution_1d(x, W, b).data)
def test_conv1d_invalid(self):
(x, W, b) = self._get_data(2)
with self.assertRaises(ValueError):
F.convolution_1d(x, W, b)
def test_conv3d(self):
(x, W, b) = self._get_data(3)
testing.assert_allclose(
F.convolution_nd(x, W, b).data, F.convolution_3d(x, W, b).data)
def test_conv3d_invalid(self):
(x, W, b) = self._get_data(2)
with self.assertRaises(ValueError):
F.convolution_3d(x, W, b)
testing.run_module(__name__, __file__)
| 11,475
| 34.310769
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/connection_tests/test_deconvolution_nd.py
|
import functools
from operator import mul
import unittest
import numpy
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
from chainer.testing import attr
from chainer.testing import parameterize
from chainer.utils import conv
from chainer.utils import type_check
@parameterize(*testing.product({
'dims': [(4, 3, 2), (2,)],
'dilate': [1, 2],
'groups': [1, 2],
'nobias': [False],
'test_outsize': [False],
'contiguous': ['C'],
'b_dtype': [numpy.float32],
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
}) + testing.product({
'dims': [(3, 2)],
'dilate': [1, 2],
'groups': [1],
'nobias': [False],
'test_outsize': [False],
'contiguous': ['C'],
'b_dtype': [numpy.float16, numpy.float32, numpy.float64],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
}) + testing.product({
'dims': [(3, 2)],
'dilate': [1, 2],
'groups': [1],
'nobias': [True, False],
'test_outsize': [True, False],
'contiguous': ['C', None],
'b_dtype': [numpy.float32],
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
}))
@testing.inject_backend_tests(
None,
# ChainerX tests
testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
# CPU tests
+ testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product([
[{'use_cuda': True}],
# Without cuDNN
testing.product({
'use_cudnn': ['never'],
})
# With cuDNN
+ testing.product({
'use_cudnn': ['always'],
'autotune': [True, False],
})]))
class TestDeconvolutionND(testing.FunctionTestCase):
def setUp(self):
self.N = 2
self.in_channels = 4
self.out_channels = 2
self.ndim = len(self.dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
self.dilate = (self.dilate,) * self.ndim
self.W_scale = numpy.sqrt(1. / functools.reduce(mul, self.ksize,
self.in_channels))
self.W_shape = (self.in_channels,
self.out_channels // self.groups) + self.ksize
outs = tuple(
conv.get_deconv_outsize(d, k, s, p, d=di)
for (d, k, s, p, di)
in zip(self.dims, self.ksize, self.stride, self.pad, self.dilate))
self.outsize = outs if self.test_outsize else None
self.x_shape = (self.N, self.in_channels) + self.dims
self.gy_shape = (self.N, self.out_channels) + outs
self.check_backward_options.update({'atol': 3e-5, 'rtol': 3e-4})
self.check_double_backward_options.update({'atol': 5e-3, 'rtol': 5e-2})
if (self.x_dtype == numpy.float16 or self.W_dtype == numpy.float16
or self.b_dtype == numpy.float16):
self.check_forward_options.update({'atol': 5e-3, 'rtol': 5e-3})
self.check_backward_options.update({
'atol': 2 ** -4, 'rtol': 2 ** -4})
self.check_double_backward_options.update({
'atol': 2 ** -4, 'rtol': 2 ** -4})
def before_test(self, test_name):
# cuDNN 5 and 5.1 results suffer from precision issues
using_old_cudnn = (self.backend_config.xp is cuda.cupy
and self.backend_config.use_cudnn == 'always'
and cuda.cuda.cudnn.getVersion() < 6000)
if using_old_cudnn:
self.check_backward_options.update({
'atol': 1e-3, 'rtol': 1e-3})
self.check_double_backward_options.update({
'atol': 1e-2, 'rtol': 1e-2})
def generate_inputs(self):
W = numpy.random.normal(
0, self.W_scale, self.W_shape).astype(self.W_dtype)
x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.x_dtype)
if self.nobias:
return x, W
else:
b = numpy.random.uniform(
-1, 1, self.out_channels).astype(self.x_dtype)
return x, W, b
def forward_expected(self, inputs):
"""
Current forward_expected implementation depends on
F.deconvolution_nd itself and thus it's only capable
of checking consistency between backends, not absolute
correctness of computations
"""
if self.nobias:
x, W = inputs
b = None
else:
x, W, b = inputs
y_expected = F.deconvolution_nd(
x, W, b, stride=self.stride, pad=self.pad,
outsize=self.outsize, dilate=self.dilate,
groups=self.groups)
return y_expected.array,
def forward(self, inputs, device):
if self.nobias:
x, W = inputs
b = None
else:
x, W, b = inputs
y = F.deconvolution_nd(
x, W, b, stride=self.stride, pad=self.pad,
outsize=self.outsize, dilate=self.dilate,
groups=self.groups)
return y,
def check_forward_consistency_regression(self, backend_config):
inputs = self.generate_inputs()
if self.nobias:
x, W = inputs
b = None
else:
x, W, b = inputs
x = chainer.Variable(backend_config.get_array(x))
W = chainer.Variable(backend_config.get_array(W))
if b is not None:
b = chainer.Variable(backend_config.get_array(b))
use_cudnn = backend_config.use_cudnn
with chainer.using_config('use_cudnn', use_cudnn):
y_nd = F.deconvolution_nd(x, W, b, stride=self.stride,
pad=self.pad, outsize=self.outsize,
dilate=self.dilate)
y_2d = F.deconvolution_2d(x, W, b, stride=self.stride,
pad=self.pad, outsize=self.outsize,
dilate=self.dilate)
testing.assert_allclose(
y_nd.array, y_2d.array, **self.check_forward_options)
def test_consistency_regression_forward(self, backend_config):
# Regression test to convolution_2d.
if len(self.dims) == 2:
self.check_forward_consistency_regression(backend_config)
@testing.parameterize(*testing.product({
'dims': [(5, 4, 3), (4, 3), (3,)],
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestDeconvolutionNDCudnnCall(unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
ndim = len(self.dims)
ksize = (3,) * ndim
stride = (1,) * ndim
pad = (1,) * ndim
W_scale = numpy.sqrt(1. / functools.reduce(mul, ksize, in_channels))
W_shape = (in_channels, out_channels) + ksize
self.W = cuda.cupy.random.normal(
0, W_scale, W_shape).astype(self.dtype)
outs = tuple(
conv.get_deconv_outsize(d, k, s, p)
for (d, k, s, p) in zip(self.dims, ksize, stride, pad))
x_shape = (2, in_channels) + self.dims
self.x = cuda.cupy.random.uniform(-1, 1, x_shape).astype(self.dtype)
gy_shape = (2, out_channels) + outs
self.gy = cuda.cupy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expected = chainer.should_use_cudnn('>=auto') and ndim > 1
def forward(self):
x = chainer.Variable(self.x)
W = chainer.Variable(self.W)
return F.deconvolution_nd(x, W, None, stride=1, pad=1)
def test_call_cudnn_forward(self):
name = 'cupy.cudnn.convolution_backward_data'
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch(name) as func:
self.forward()
self.assertEqual(func.called, self.expected)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
y.grad = self.gy
with testing.patch('cupy.cudnn.convolution_forward') as func:
y.backward()
self.assertEqual(func.called, self.expected)
class TestDeconvolutionNDarraySupplied(unittest.TestCase):
def setUp(self):
N = 2
in_channels = 3
out_channels = 2
dtype = numpy.float32
x_shape = (N, in_channels, 3, 3, 3)
self.x_data = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
W_shape = (in_channels, out_channels, 1, 1, 1)
self.W_data = numpy.random.uniform(-1, 1, W_shape).astype(dtype)
self.b_data = numpy.random.uniform(-1, 1, out_channels).astype(dtype)
def check_array_supplied(self, x_ary, W_ary, b_ary):
y_ary = F.deconvolution_nd(x_ary, W_ary, b_ary)
x_var = chainer.Variable(x_ary)
W_var = chainer.Variable(W_ary)
b_var = chainer.Variable(b_ary)
y_var = F.deconvolution_nd(x_var, W_var, b_var)
testing.assert_allclose(y_ary.data, y_var.data)
def test_array_supplied_cpu(self):
self.check_array_supplied(self.x_data, self.W_data, self.b_data)
@attr.gpu
def test_array_supplied_gpu(self):
self.check_array_supplied(cuda.to_gpu(self.x_data),
cuda.to_gpu(self.W_data),
cuda.to_gpu(self.b_data))
class TestDeconvolutionNDTypeCheck(unittest.TestCase):
def test_number_of_inputs(self):
# Too few inputs
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
F.connection.deconvolution_nd.DeconvolutionND(1).apply((x,))
# Too much inputs
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
W = numpy.random.uniform(-1, 1, (3, 2, 2)).astype(numpy.float32)
b = numpy.random.uniform(-1, 1, (2,)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
F.connection.deconvolution_nd.DeconvolutionND(1).apply(
(x, W, b, x))
def test_data_and_weight(self):
# dtype of data
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.int32)
W = numpy.random.uniform(-1, 1, (3, 2, 2)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
F.deconvolution_nd(x, W)
# dtype of weight
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
W = numpy.random.uniform(-1, 1, (3, 2, 2)).astype(numpy.int32)
with self.assertRaises(type_check.InvalidType):
F.deconvolution_nd(x, W)
# ndim of weight
x = numpy.random.uniform(-1, 1, (2, 3, 4, 4)).astype(numpy.float32)
W = numpy.random.uniform(-1, 1, (3, 2, 2)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
F.deconvolution_nd(x, W)
# shapes of data and weight
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
W = numpy.random.uniform(-1, 1, (2, 2, 2)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
F.deconvolution_nd(x, W)
def test_supplied_outsize(self):
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
W = numpy.random.uniform(-1, 1, (3, 2, 2)).astype(numpy.float32)
outsize = (10,)
with self.assertRaises(type_check.InvalidType):
F.deconvolution_nd(x, W, outsize=outsize)
def test_bias(self):
# dtype
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
W = numpy.random.uniform(-1, 1, (3, 2, 2)).astype(numpy.float32)
b = numpy.random.uniform(-1, 1, (2,)).astype(numpy.int32)
with self.assertRaises(type_check.InvalidType):
F.deconvolution_nd(x, W, b=b)
# ndim
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
W = numpy.random.uniform(-1, 1, (3, 2, 2)).astype(numpy.float32)
b = numpy.random.uniform(-1, 1, (2, 2)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
F.deconvolution_nd(x, W, b=b)
def test_estimated_outsize(self):
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(numpy.float32)
W = numpy.random.uniform(-1, 1, (3, 2, 2)).astype(numpy.float32)
stride = 1
pad = 10
with self.assertRaises(AssertionError):
F.deconvolution_nd(x, W, stride=stride, pad=pad)
class TestDeconvolutionNDWrappers(unittest.TestCase):
def _get_data(self, ndim):
in_channels = 3
out_channels = 2
dtype = numpy.float32
x_shape = (2, in_channels) + (3,) * ndim
x = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
W_shape = (in_channels, out_channels) + (1,) * ndim
W = numpy.random.uniform(-1, 1, W_shape).astype(dtype)
b = numpy.random.uniform(-1, 1, out_channels).astype(dtype)
return x, W, b
def test_deconv1d(self):
(x, W, b) = self._get_data(1)
testing.assert_allclose(
F.deconvolution_nd(x, W, b).data, F.deconvolution_1d(x, W, b).data)
def test_deconv1d_invalid(self):
(x, W, b) = self._get_data(2)
with self.assertRaises(ValueError):
F.deconvolution_1d(x, W, b)
def test_deconv3d(self):
(x, W, b) = self._get_data(3)
testing.assert_allclose(
F.deconvolution_nd(x, W, b).data, F.deconvolution_3d(x, W, b).data)
def test_deconv3d_invalid(self):
(x, W, b) = self._get_data(2)
with self.assertRaises(ValueError):
F.deconvolution_3d(x, W, b)
testing.run_module(__name__, __file__)
| 14,050
| 35.591146
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/connection_tests/test_bilinear.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
def _uniform(*shape):
return numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
@testing.parameterize(*testing.product({
'in_shapes': [((2,), (4,)), ((2, 1), (4, 2))],
'out_size': [3],
'batch_size': [2],
'test_partial': [True, False],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': ['never', 'always']},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestBilinearFunction(testing.FunctionTestCase):
def setUp(self):
self.e1_shape = (self.batch_size,) + self.in_shapes[0]
self.e2_shape = (self.batch_size,) + self.in_shapes[1]
self.e1_size = numpy.prod(self.in_shapes[0])
self.e2_size = numpy.prod(self.in_shapes[1])
self.check_backward_options = {
'atol': 1e-5, 'rtol': 1e-4}
self.check_double_backward_options = {
'atol': 1e-4, 'rtol': 1e-3}
def generate_inputs(self):
e1 = _uniform(*self.e1_shape)
e2 = _uniform(*self.e2_shape)
W = _uniform(self.e1_size, self.e2_size, self.out_size)
if self.test_partial:
return e1, e2, W
else:
V1 = _uniform(self.e1_size, self.out_size)
V2 = _uniform(self.e2_size, self.out_size)
b = _uniform(self.out_size)
return e1, e2, W, V1, V2, b
def forward_expected(self, inputs):
if self.test_partial:
e1, e2, W = inputs
V1 = None
V2 = None
b = None
else:
e1, e2, W, V1, V2, b = inputs
e1 = e1.reshape(e1.shape[0], -1)
e2 = e2.reshape(e2.shape[0], -1)
xp = backend.get_array_module(e1)
y_expect = xp.einsum('ij,ik,jkl->il', e1, e2, W)
flags = V1 is None, V2 is None, b is None
if any(flags):
if not all(flags):
raise ValueError(
'Test either all or none of the optional parameters.')
else:
y_expect += e1.dot(V1)
y_expect += e2.dot(V2)
y_expect += b
return y_expect,
def forward(self, inputs, device):
if self.test_partial:
e1, e2, W = inputs
V1 = None
V2 = None
b = None
else:
e1, e2, W, V1, V2, b = inputs
flags = V1 is None, V2 is None, b is None
if any(flags):
if not all(flags):
raise ValueError(
'Test either all or none of the optional parameters.')
y = functions.bilinear(e1, e2, W)
else:
y = functions.bilinear(e1, e2, W, V1, V2, b)
return y,
@attr.slow
class TestBilinearFunctionLarge(unittest.TestCase):
def setUp(self):
self.e1 = _uniform(256, 256)
self.e2 = _uniform(256, 256)
self.w = _uniform(256, 256, 256)
self.v1 = _uniform(256, 256)
self.v2 = _uniform(256, 256)
self.b = _uniform(256)
def test_cpu(self):
chainer.functions.bilinear(
self.e1, self.e2, self.w, self.v1, self.v2, self.b)
@attr.gpu
def test_gpu(self):
chainer.functions.bilinear(*map(cuda.to_gpu, (
self.e1, self.e2, self.w, self.v1, self.v2, self.b)))
class TestBilinearFunctionInvalidArgument(unittest.TestCase):
def setUp(self):
e1 = _uniform(3, 2)
e2 = _uniform(3, 4)
W = _uniform(2, 4, 5)
V1 = _uniform(2, 5)
self.e1 = chainer.Variable(e1)
self.e2 = chainer.Variable(e2)
self.W = chainer.Variable(W)
self.V1 = chainer.Variable(V1)
def test_invalid_full_partial_ambiguous(self):
with self.assertRaises(ValueError):
functions.bilinear(self.e1, self.e2, self.W, self.V1)
testing.run_module(__name__, __file__)
| 4,269
| 27.278146
| 74
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/connection_tests/test_depthwise_convolution_2d.py
|
import unittest
import numpy
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(*(testing.product({
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
})))
class TestDepthwiseConvolution2DFunction(unittest.TestCase):
def setUp(self):
in_channels = 3
channel_multiplier = 2
kh, kw = (3, 3)
self.stride = 2
self.pad = 1
self.W = numpy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels)),
(channel_multiplier, in_channels, kh, kw)).astype(self.W_dtype)
self.b = numpy.random.uniform(
-1, 1, in_channels * channel_multiplier).astype(self.x_dtype)
self.x = numpy.random.uniform(
-1, 1, (2, 3, 4, 3)).astype(self.x_dtype)
self.gy = numpy.random.uniform(
-1, 1, (2, 6, 2, 2)).astype(self.x_dtype)
self.check_forward_options = {}
self.check_backward_options = {'dtype': numpy.float64}
if self.x_dtype == numpy.float16 or self.W_dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-3, 'rtol': 5e-3}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 5e-3, 'rtol': 5e-3}
def check_forward(self, x_data, W_data, b_data):
args1 = (x_data, W_data)
args2 = (x_data, W_data)
if b_data is not None:
args1 = args1 + (b_data,)
b_data = sum(numpy.split(b_data, W_data.shape[1]))
args2 = args2 + (b_data,)
y1 = functions.depthwise_convolution_2d(
*args1, stride=self.stride, pad=self.pad)
arys = numpy.split(y1.array, self.W.shape[1], axis=1)
y1 = sum(arys)
y2 = functions.convolution_2d(
*args2, stride=self.stride, pad=self.pad).array
testing.assert_allclose(y1, y2, **self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(self.x, self.W, self.b)
def test_forward_cpu_nobias(self):
self.check_forward(self.x, self.W, None)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b))
@attr.gpu
def test_forward_gpu_nobias(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.W), None)
def check_backward(self, x_data, W_data, b_data, y_grad):
args = (x_data, W_data)
if b_data is not None:
args = args + (b_data,)
gradient_check.check_backward(
lambda *inputs: functions.depthwise_convolution_2d(
*inputs, stride=self.stride, pad=self.pad),
args, y_grad, **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.W, self.b, self.gy)
@condition.retry(3)
def test_backward_cpu_nobias(self):
self.check_backward(self.x, self.W, None, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_nobias(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
None, cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| 3,641
| 33.358491
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/connection_tests/test_dilated_convolution_2d.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*(testing.product({
'c_contiguous': [True],
'cover_all': [True, False],
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
}) + testing.product({
'c_contiguous': [False],
'cover_all': [False],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
})))
class TestDilatedConvolution2DFunction(unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
kh, kw = (3, 3)
self.stride = 2
self.pad = 2
self.dilate = 2
self.use_cudnn = 'always'
self.W = numpy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels)),
(out_channels, in_channels, kh, kw)).astype(self.W_dtype)
self.b = numpy.random.uniform(
-1, 1, out_channels).astype(self.x_dtype)
self.x = numpy.random.uniform(
-1, 1, (2, 3, 4, 3)).astype(self.x_dtype)
if self.cover_all:
self.gy = numpy.random.uniform(-1, 1,
(2, 2, 3, 2)).astype(self.x_dtype)
else:
self.gy = numpy.random.uniform(
-1, 1, (2, 2, 2, 2)).astype(self.x_dtype)
self.check_forward_options = {}
self.check_backward_options = {'atol': 1e-4, 'rtol': 1e-3}
if self.x_dtype == numpy.float16 or self.W_dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
@attr.gpu
def test_forward_consistency(self, nobias=False):
x_cpu = chainer.Variable(self.x)
W_cpu = chainer.Variable(self.W)
b_cpu = None if nobias else chainer.Variable(self.b)
y_cpu = F.dilated_convolution_2d(
x_cpu, W_cpu, b_cpu, stride=self.stride, pad=self.pad,
dilate=self.dilate, cover_all=self.cover_all)
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
W_gpu = chainer.Variable(cuda.to_gpu(self.W))
b_gpu = None if nobias else chainer.Variable(cuda.to_gpu(self.b))
with chainer.using_config('use_cudnn', self.use_cudnn):
y_gpu = F.dilated_convolution_2d(
x_gpu, W_gpu, b_gpu, stride=self.stride, pad=self.pad,
dilate=self.dilate, cover_all=self.cover_all)
testing.assert_allclose(
y_cpu.data, y_gpu.data.get(), **self.check_forward_options)
@attr.gpu
def test_forward_consistency_im2col(self):
self.use_cudnn = 'never'
self.test_forward_consistency()
@attr.gpu
def test_forward_consistency_im2col_nobias(self):
self.use_cudnn = 'never'
self.test_forward_consistency(nobias=True)
def check_backward(self, x_data, W_data, b_data, y_grad):
xp = backend.get_array_module(x_data)
if not self.c_contiguous:
x_data = xp.asfortranarray(x_data)
W_data = xp.asfortranarray(W_data)
y_grad = xp.asfortranarray(y_grad)
self.assertFalse(x_data.flags.c_contiguous)
self.assertFalse(W_data.flags.c_contiguous)
self.assertFalse(y_grad.flags.c_contiguous)
if b_data is not None:
b = xp.empty((len(b_data) * 2,), dtype=self.b.dtype)
b[::2] = b_data
b_data = b[::2]
self.assertFalse(b_data.flags.c_contiguous)
args = (x_data, W_data)
if b_data is not None:
args = args + (b_data,)
def f(*args):
return F.dilated_convolution_2d(*args, stride=self.stride,
pad=self.pad, dilate=self.dilate,
cover_all=self.cover_all)
with chainer.using_config('use_cudnn', self.use_cudnn):
gradient_check.check_backward(
f, args, y_grad, dtype=numpy.float64,
**self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.W, self.b, self.gy)
def test_backward_cpu_nobias(self):
self.check_backward(self.x, self.W, None, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b), cuda.to_gpu(self.gy))
@attr.gpu
def test_backward_gpu_nobias(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
None, cuda.to_gpu(self.gy))
@attr.gpu
def test_backward_gpu_im2col(self):
self.use_cudnn = 'never'
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b), cuda.to_gpu(self.gy))
@attr.gpu
def test_backward_gpu_im2col_nobias(self):
self.use_cudnn = 'never'
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
None, cuda.to_gpu(self.gy))
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestDilatedConvolution2DCudnnCall(unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
kh, kw = (3, 3)
self.stride = 2
self.pad = 2
self.dilate = 2
self.x = cuda.cupy.random.uniform(
-1, 1, (2, 3, 4, 3)).astype(self.dtype)
self.W = cuda.cupy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels)),
(out_channels, in_channels, kh, kw)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(
-1, 1, (2, 2, 2, 2)).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expect = chainer.should_use_cudnn('>=auto')
if cuda.cuda.cudnn.getVersion() < 6000:
self.expect = False
def forward(self):
x = chainer.Variable(self.x)
W = chainer.Variable(self.W)
return F.dilated_convolution_2d(
x, W, None, stride=self.stride, pad=self.pad, dilate=self.dilate)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cudnn.convolution_forward') as func:
self.forward()
self.assertEqual(func.called, self.expect)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
y.grad = self.gy
name = 'cupy.cudnn.convolution_backward_data'
with testing.patch(name) as func:
y.backward()
self.assertEqual(func.called, self.expect)
testing.run_module(__name__, __file__)
| 7,100
| 35.984375
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/connection_tests/test_embed_id.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer.functions.connection import embed_id
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import backend
@testing.parameterize(*testing.product_dict(
[
{'x': (0, 1, 2), 'w_shape': (5, 3), 'ignore_label': 1},
{'x': (2, 1, 2), 'w_shape': (6, 3), 'ignore_label': None},
{'x': (3, 1), 'w_shape': (4, 3), 'ignore_label': 3},
], [
{'w_dtype': numpy.float16},
{'w_dtype': numpy.float32},
{'w_dtype': numpy.float64},
], [
{'x_dtype': numpy.int16},
{'x_dtype': numpy.int32},
{'x_dtype': numpy.int64},
]
))
@testing.fix_random()
@backend.inject_backend_tests(
None,
# ChainerX tests
testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
# CPU tests
+ testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product([
[{'use_cuda': True}],
# Without cuDNN
testing.product({
'use_cudnn': ['never'],
})
# With cuDNN
+ testing.product({
'use_cudnn': ['always'],
'cudnn_deterministic': [True, False],
'autotune': [True, False],
})]))
class TestEmbedID(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
def generate_inputs(self):
x = numpy.array(self.x).astype(self.x_dtype)
W = numpy.random.uniform(-1, 1, self.w_shape).astype(self.w_dtype)
return x, W,
def forward(self, inputs, device):
x, W = inputs
out = chainer.functions.embed_id(x, W, self.ignore_label)
return out,
def forward_expected(self, inputs):
x, W = inputs
if self.ignore_label is not None:
mask = (x == self.ignore_label)
return numpy.where(mask[..., None], 0, W[numpy.where(mask, 0, x)]),
return W[x],
@testing.parameterize(
{'x_data': [0, 1, 0], 'ignore_label': None},
{'x_data': [[0, 1, 0], [1, 0, 1]], 'ignore_label': None},
{'x_data': [0, 1, -1], 'ignore_label': -1},
{'x_data': [[0, 1, -1], [-1, 0, 1]], 'ignore_label': -1},
{'x_data': [0, 1, 2], 'ignore_label': 2},
{'x_data': [[0, 1, 0], [1, 0, 1]], 'ignore_label': 1},
)
class TestEmbedIdGrad(unittest.TestCase):
n_unit = (4,)
w_shape = (4, 2)
def setUp(self):
self.x = numpy.array(self.x_data, dtype='i')
self.gy = numpy.random.uniform(
-1, 1, self.x.shape + (2,)).astype('f')
self.ggW = numpy.random.uniform(-1, 1, self.w_shape).astype('f')
def check_backward(self, x, gy, ggW):
return
def f(x, gy):
emb = embed_id.EmbedIDGrad(
self.w_shape, self.ignore_label)
return emb.apply((x, numpy.zeros(()), gy))[0]
gradient_check.check_backward(f, (x, gy), (ggW,))
def test_backward_cpu(self):
self.check_backward(self.x, self.gy, self.ggW)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggW))
testing.run_module(__name__, __file__)
| 3,449
| 27.991597
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/connection_tests/test_linear.py
|
import unittest
import numpy
import pytest
import chainer
from chainer import functions
from chainer import testing
from chainer.testing import backend
@testing.parameterize(*testing.product({
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
'x_shape': [{'n_batch_axes': 1, 'data_shape': (3,)},
{'n_batch_axes': 3, 'data_shape': (3, 5)},
],
'contiguous': ['C', None],
'nobias': [True, False],
}))
@backend.inject_backend_tests(
None,
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ [{
'use_cuda': True,
}]
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
]
)
class TestNonparameterizedLinear(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
if self.x_dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.n_batch_axes = self.x_shape['n_batch_axes']
def before_test(self, test_name):
# TODO(crcrpar): Remove this relaxation when
# a known issue in the reduction of ChainerX is resolved.
if test_name == 'test_forward':
if (self.x_dtype == numpy.float16 and
self.W_dtype == numpy.float16 and
self.n_batch_axes == 3 and
self.backend_config.use_chainerx and
self.backend_config.chainerx_device == 'native:0'):
self.check_forward_options['atol'] = 5e-3
def generate_inputs(self):
data_shape = self.x_shape['data_shape']
batch_shape = (4,) + (2,) * (self.n_batch_axes - 1)
x = numpy.random.uniform(
-1, 1, batch_shape + data_shape).astype(self.x_dtype)
input_size = numpy.prod(data_shape)
W = numpy.random.uniform(-1, 1, (2, input_size)).astype(self.W_dtype)
if self.nobias:
return x, W
else:
b = numpy.random.uniform(-1, 1, 2).astype(self.x_dtype)
return x, W, b
def forward_expected(self, inputs):
x, W = inputs[:2]
if self.n_batch_axes > 1:
batch_shape = x.shape[:self.n_batch_axes]
batch_size = numpy.prod(batch_shape)
x = x.reshape(batch_size, -1)
y = x.dot(W.T)
if not self.nobias:
y += inputs[-1]
if self.n_batch_axes > 1:
y = y.reshape(batch_shape + (-1,))
return y.astype(self.x_dtype),
def forward(self, inputs, device):
if self.nobias:
x, W = inputs
b = None
else:
x, W, b = inputs
y = functions.linear(x, W, b, n_batch_axes=self.n_batch_axes)
return y,
class TestLinearBackwardNoncontiguousGradOutputs(unittest.TestCase):
# NumPy raises an error when the inputs of dot operation are not
# contiguous. This test ensures this issue is correctly handled.
# (https://github.com/chainer/chainer/issues/2744)
# This test depdends on that backward() of F.sum generates
# a non-contiguous array.
def test_1(self):
with chainer.using_config('use_ideep', 'never'):
n_batches = 1 # important
in_dims = (2, 2)
out_dim = 3
x_shape = (n_batches,) + in_dims
w_shape = (out_dim, numpy.prod(in_dims),)
x = numpy.ones(x_shape, numpy.float32)
w = numpy.ones(w_shape, numpy.float32)
y = functions.linear(chainer.Variable(x), w)
z = functions.sum(y)
z.backward()
class TestLinearNBatchAxesBoundaryCondition(unittest.TestCase):
def setUp(self):
self.W = numpy.random.uniform(
-1, 1, (2, 15)).astype(numpy.float32)
self.x = numpy.random.uniform(
-1, 1, (3, 3, 5)).astype(numpy.float32)
def test_negative(self):
n_batch_axes = -1
with pytest.raises(ValueError):
functions.linear(self.x, self.W, n_batch_axes=n_batch_axes)
def test_zero(self):
n_batch_axes = 0
with pytest.raises(ValueError):
functions.linear(self.x, self.W, n_batch_axes=n_batch_axes)
testing.run_module(__name__, __file__)
| 4,516
| 32.213235
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/connection_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/connection_tests/test_local_convolution_2d.py
|
import unittest
import numpy
from six import moves
from chainer import cuda
from chainer.functions.connection import convolution_2d
from chainer.functions.connection import local_convolution_2d
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(*(testing.product({
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
})))
class TestLocalConvolution2DFunction(unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 6
kh, kw = (3, 3)
oh, ow = (2, 2)
self.stride = 1
self.W = numpy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels)),
(out_channels, oh, ow, in_channels, kh, kw)).astype(self.W_dtype)
self.b = numpy.random.uniform(
-1, 1, (out_channels, oh, ow,)).astype(self.x_dtype)
self.x = numpy.random.uniform(
-1, 1, (2, in_channels, 4, 4)).astype(self.x_dtype)
self.gy = numpy.random.uniform(
-1, 1, (2, out_channels, oh, ow)).astype(self.x_dtype)
self.check_forward_options = {}
self.check_backward_options = {'dtype': numpy.float64}
if self.x_dtype == numpy.float16 or self.W_dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-3, 'rtol': 5e-3}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 5e-3, 'rtol': 5e-3}
def check_forward(self, x_data, W_data, b_data):
# If all the filters are the same,
# the operation is equivalent to convolution_2d
for i in moves.range(W_data.shape[1]):
for j in moves.range(W_data.shape[2]):
W_data[:, i, j, ...] = W_data[:, 0, 0, ...]
args1 = (x_data, W_data)
args2 = (x_data, W_data[:, 0, 0, ...])
if b_data is not None:
for i in moves.range(b_data.shape[1]):
for j in moves.range(b_data.shape[2]):
b_data[:, i, j, ] = b_data[:, 0, 0, ]
args1 = args1 + (b_data,)
b_data2 = b_data[:, 0, 0, ]
args2 = args2 + (b_data2,)
f1 = local_convolution_2d.LocalConvolution2DFunction(self.stride)
y1 = f1.apply(args1)[0].data
f2 = convolution_2d.Convolution2DFunction(self.stride, 0)
y2 = f2.apply(args2)[0].data
testing.assert_allclose(y1, y2, **self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(self.x, self.W, self.b)
def test_forward_cpu_nobias(self):
self.check_forward(self.x, self.W, None)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b))
@attr.gpu
def test_forward_gpu_nobias(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.W), None)
def check_backward(self, x_data, W_data, b_data, y_grad):
args = (x_data, W_data)
if b_data is not None:
args = args + (b_data,)
gradient_check.check_backward(
local_convolution_2d.local_convolution_2d,
args, y_grad, **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.W, self.b, self.gy)
@condition.retry(3)
def test_backward_cpu_nobias(self):
self.check_backward(self.x, self.W, None, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_nobias(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
None, cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| 4,022
| 34.60177
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/connection_tests/test_deconvolution_2d.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
from chainer.testing import array
from chainer.testing import attr
from chainer.testing import parameterize
from chainer.utils import conv
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
@parameterize(*(testing.product([
testing.product({
'contiguous': ['C'],
'test_outsize': [True, False],
'nobias': [True],
'stride': [1, 2],
'dilate': [1],
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
'groups': [1, 2],
})
+ testing.product({
'contiguous': [None],
'test_outsize': [True],
'nobias': [False],
'stride': [1, 2],
'dilate': [1, 2],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
'groups': [1, 2],
}),
])))
@testing.inject_backend_tests(
None,
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product([
[{'use_cuda': True}],
# Without cuDNN
testing.product({
'use_cudnn': ['never'],
})
# With cuDNN
+ testing.product({
'use_cudnn': ['always'],
'cudnn_deterministic': [True, False],
'autotune': [True, False],
})])
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
}))
class TestDeconvolution2DFunction(testing.FunctionTestCase):
def setUp(self):
self.N = 2
self.inh, self.inw = 4, 3
self.in_channels_a_group = 3
self.out_channels_a_group = 2
self.in_channels = self.in_channels_a_group * self.groups
self.out_channels = self.out_channels_a_group * self.groups
self.ksize = 3
self.pad = 1
self.kh, self.kw = _pair(self.ksize)
self.sh, self.sw = _pair(self.stride)
self.ph, self.pw = _pair(self.pad)
outh = conv.get_deconv_outsize(self.inh, self.kh, self.sh,
self.ph, d=self.dilate)
outw = conv.get_deconv_outsize(self.inw, self.kw, self.sw,
self.pw, d=self.dilate)
self.outsize = (outh, outw) if self.test_outsize else None
if self.x_dtype == numpy.float16:
self.check_forward_options.update(atol=5e-3, rtol=5e-2)
self.check_backward_options.update(atol=5e-3, rtol=5e-2)
self.check_double_backward_options.update(atol=5e-3, rtol=5e-2)
elif self.W_dtype == numpy.float16:
self.check_backward_options.update(atol=5e-3, rtol=5e-2)
self.check_double_backward_options.update(atol=5e-3, rtol=5e-2)
def before_test(self, test_name):
# cuDNN 5 and 5.1 results suffer from precision issues
using_old_cudnn = (self.backend_config.xp is cuda.cupy
and self.backend_config.use_cudnn == 'always'
and cuda.cuda.cudnn.getVersion() < 6000)
if using_old_cudnn:
self.check_backward_options.update({
'atol': 1e-3, 'rtol': 1e-3})
self.check_double_backward_options.update({
'atol': 1e-2, 'rtol': 1e-2})
def generate_inputs(self):
W = numpy.random.normal(
0, numpy.sqrt(1. / (self.kh * self.kw * self.in_channels_a_group)),
(self.in_channels, self.out_channels_a_group, self.kh, self.kw)
).astype(self.W_dtype)
x = numpy.random.uniform(
-1, 1, (self.N, self.in_channels,
self.inh, self.inw)).astype(self.x_dtype)
if self.nobias:
return x, W
else:
b = numpy.random.uniform(
-1, 1, self.out_channels).astype(self.x_dtype)
return x, W, b
def forward_expected(self, inputs):
"""
Current forward_expected implementation depends on
F.deconvolution_2d itself and thus it's only capable
of checking consistency between backends, not absolute
correctness of computations
"""
if self.nobias:
x, W = inputs
b = None
else:
x, W, b = inputs
y_expected = F.deconvolution_2d(
x, W, b, stride=self.stride, pad=self.pad,
outsize=self.outsize, dilate=self.dilate,
groups=self.groups)
return y_expected.array,
def forward(self, inputs, device):
if self.nobias:
x, W = inputs
b = None
else:
x, W, b = inputs
y = F.deconvolution_2d(
x, W, b, stride=self.stride, pad=self.pad,
outsize=self.outsize, dilate=self.dilate,
groups=self.groups)
return y,
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'cudnn_deterministic': [True, False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'groups': [1, 2],
}))
@attr.cudnn
class TestDeconvolution2DCudnnCall(unittest.TestCase):
def setUp(self):
in_channels_a_group = 3
out_channels_a_group = 2
self.in_channels = in_channels_a_group * self.groups
self.out_channels = out_channels_a_group * self.groups
kh, kw = _pair(3)
sh, sw = _pair(1)
ph, pw = _pair(1)
self.W = cuda.cupy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels_a_group)),
(self.in_channels, out_channels_a_group, kh, kw)
).astype(self.dtype)
N = 2
inh, inw = 4, 3
outh = conv.get_deconv_outsize(inh, kh, sh, ph)
outw = conv.get_deconv_outsize(inw, kw, sw, pw)
self.x = cuda.cupy.random.uniform(
-1, 1, (N, self.in_channels, inh, inw)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(
-1, 1, (N, self.out_channels, outh, outw)).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.should_call_cudnn = chainer.should_use_cudnn('>=auto')
if self.groups > 1 and cuda.cuda.cudnn.getVersion() < 7000:
self.should_call_cudnn = False
def forward(self):
x = chainer.Variable(self.x)
W = chainer.Variable(self.W)
return F.deconvolution_2d(x, W, None, stride=1, pad=1,
groups=self.groups)
def test_call_cudnn_forward(self):
name = 'cupy.cudnn.convolution_backward_data'
with chainer.using_config('use_cudnn', self.use_cudnn):
with chainer.using_config('cudnn_deterministic',
self.cudnn_deterministic):
with testing.patch(name) as func:
self.forward()
self.assertEqual(func.called, self.should_call_cudnn)
def test_call_cudnn_backward(self):
data_func_name = 'cupy.cudnn.convolution_forward'
filter_func_name = 'cupy.cudnn.convolution_backward_filter'
with chainer.using_config('use_cudnn', self.use_cudnn):
with chainer.using_config('cudnn_deterministic',
self.cudnn_deterministic):
y = self.forward()
y.grad = self.gy
with testing.patch(data_func_name) as data_func, \
testing.patch(filter_func_name) as filter_func:
y.backward()
self.assertEqual(
data_func.called, self.should_call_cudnn)
self.assertEqual(
filter_func.called, self.should_call_cudnn)
@testing.parameterize(*testing.product({
'c_contiguous': [True, False],
'cudnn_deterministic': [True, False],
'nobias': [True, False],
'groups': [1, 2],
}))
@attr.gpu
@attr.cudnn
class TestDeconvolution2DFunctionCudnnDeterministic(unittest.TestCase):
def setUp(self):
self.stride = 2
self.pad = 1
batch_sz = 2
in_channels_a_group = 64
out_channels_a_group = 64
in_channels = in_channels_a_group * self.groups
out_channels = out_channels_a_group * self.groups
kh, kw = (3, 3)
in_h, in_w = (32, 128)
out_h, out_w = (63, 255)
# should be same types for cudnn test
x_dtype = numpy.float32
W_dtype = numpy.float32
self.W = numpy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels_a_group)),
(out_channels, in_channels_a_group, kh, kw)).astype(W_dtype)
self.b = numpy.random.uniform(-1, 1, out_channels).astype(x_dtype)
self.x = numpy.random.uniform(
-1, 1, (batch_sz, in_channels, in_h, in_w)).astype(x_dtype)
self.gy = numpy.random.uniform(
-1, 1, (batch_sz, out_channels, out_h, out_w)).astype(x_dtype)
def test_cudnn_deterministic(self):
x1, W1, b1, y1 = self._run()
x2, W2, b2, y2 = self._run()
cuda.cupy.testing.assert_array_equal(x1.grad, x2.grad)
cuda.cupy.testing.assert_array_equal(y1.data, y2.data)
cuda.cupy.testing.assert_array_equal(W1.grad, W2.grad)
def _contiguous(self, *inputs):
if self.c_contiguous:
return inputs
else:
return array._as_noncontiguous_array(inputs)
def _run(self):
with chainer.using_config('cudnn_deterministic', True):
# verify data continuity and move to gpu
x_data, W_data, b_data, gy_data = tuple(
cuda.to_gpu(data) for data in self._contiguous(
self.x, self.W, self.b, self.gy))
x, W, b, y = self._run_forward(x_data, W_data, b_data)
y.grad = gy_data
y.backward()
return x, W, b, y
def _run_forward(self, x_data, W_data, b_data):
x = chainer.Variable(x_data)
W = chainer.Variable(W_data)
b = None if self.nobias else chainer.Variable(b_data)
with chainer.using_config('use_cudnn', 'always'):
y = F.deconvolution_2d(x, W, b, stride=self.stride, pad=self.pad,
groups=self.groups)
return x, W, b, y
class TestDeconvolution2DInvalidDilation(unittest.TestCase):
n_batches = 2
in_channels = 3
out_channels = 2
dilate = 0
x_shape = (n_batches, in_channels, 10, 10)
w_shape = (in_channels, out_channels, 3, 3)
def check_invalid_dilation(self, x_data, w_data):
x = chainer.Variable(x_data)
w = chainer.Variable(w_data)
F.deconvolution_2d(x, w, dilate=self.dilate)
def test_invalid_dilation_cpu(self):
x = numpy.ones(self.x_shape, numpy.float32)
w = numpy.ones(self.w_shape, numpy.float32)
with self.assertRaises(ValueError):
with chainer.using_config('use_ideep', 'never'):
self.check_invalid_dilation(x, w)
@attr.ideep
def test_invalid_dilation_cpu_ideep(self):
x = numpy.ones(self.x_shape, numpy.float32)
w = numpy.ones(self.w_shape, numpy.float32)
with self.assertRaises(ValueError):
with chainer.using_config('use_ideep', 'always'):
self.check_invalid_dilation(x, w)
@attr.gpu
def test_invalid_dilation_gpu(self):
x = cuda.cupy.ones(self.x_shape, numpy.float32)
w = cuda.cupy.ones(self.w_shape, numpy.float32)
with self.assertRaises(ValueError):
with chainer.using_config('use_cudnn', 'never'):
self.check_invalid_dilation(x, w)
@attr.cudnn
def test_invalid_dilation_gpu_cudnn(self):
x = cuda.cupy.ones(self.x_shape, numpy.float32)
w = cuda.cupy.ones(self.w_shape, numpy.float32)
with self.assertRaises(ValueError):
with chainer.using_config('use_cudnn', 'cudnn'):
self.check_invalid_dilation(x, w)
@testing.parameterize(*(testing.product({
'n_batches': [2, 3],
'requires_x_grad': [True, False],
'requires_w_grad': [True, False],
'requires_b_grad': [True, False],
})))
class TestDeconvolution2DVariousGradTargets(unittest.TestCase):
in_channels = 3
out_channels = 2
def check_backward_succeed(self, x_data, w_data, b_data):
x = chainer.Variable(x_data, requires_grad=self.requires_x_grad)
w = chainer.Variable(w_data, requires_grad=self.requires_w_grad)
b = chainer.Variable(b_data, requires_grad=self.requires_b_grad)
y = F.deconvolution_2d(x, w, b)
F.sum(y).backward()
def test_backward_cpu(self):
x_shape = (self.n_batches, self.in_channels, 10, 10)
w_shape = (self.in_channels, self.out_channels, 5, 5)
x = numpy.ones(x_shape, numpy.float32)
w = numpy.ones(w_shape, numpy.float32)
b = numpy.ones(self.out_channels, numpy.float32)
with chainer.using_config('use_ideep', 'never'):
self.check_backward_succeed(x, w, b)
testing.run_module(__name__, __file__)
| 13,231
| 34.956522
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/connection_tests/test_shift.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.functions.connection import shift
@testing.parameterize(*(testing.product({
'shape': [(4, 3)],
'c_contiguous': [True, False],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'batches': [1, 2],
'ksize': [(3, 3), (3, 1), (1, 3), (1, 5), (5, 1)],
'n_channels': [9, 16, 25],
'dilate': [1],
}) + testing.product({
'shape': [(10, 8)],
'c_contiguous': [True, False],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'batches': [1, 2],
'ksize': [(5, 5)],
'n_channels': [25],
'dilate': [1],
}) + testing.product({
'shape': [(10, 8)],
'c_contiguous': [True, False],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'batches': [1, 2],
'ksize': [(7, 7), (7, 1), (1, 7)],
'n_channels': [100],
'dilate': [1],
}) + testing.product({
'shape': [(4, 3)],
'c_contiguous': [True, False],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'batches': [1, 2],
'ksize': [(3, 3), (3, 1), (1, 3), (1, 5), (5, 1)],
'n_channels': [16],
'dilate': [2, 3],
})))
class TestShiftFunction(unittest.TestCase):
def setUp(self):
h, w = self.shape
self.x = numpy.random.uniform(
-1, 1, (self.batches, self.n_channels, h, w)).astype(self.x_dtype)
self.gy = numpy.random.uniform(
-1, 1, (self.batches, self.n_channels, h, w)).astype(self.x_dtype)
@attr.gpu
def test_forward_consistency(self):
x_data = self.x
xp = backend.get_array_module(x_data)
if not self.c_contiguous:
x_data = xp.asfortranarray(x_data)
self.assertFalse(x_data.flags.c_contiguous)
x_cpu = chainer.Variable(x_data)
y_cpu = shift.shift(
x_cpu, ksize=self.ksize, dilate=self.dilate)
x_gpu = chainer.Variable(cuda.to_gpu(x_data))
y_gpu = shift.shift(
x_gpu, ksize=self.ksize, dilate=self.dilate)
testing.assert_allclose(
y_cpu.data, y_gpu.data.get(), atol=5e-4, rtol=5e-3)
def check_backward(self, x_data, y_grad):
xp = backend.get_array_module(x_data)
if not self.c_contiguous:
x_data = xp.asfortranarray(x_data)
y_grad = xp.asfortranarray(y_grad)
self.assertFalse(x_data.flags.c_contiguous)
self.assertFalse(y_grad.flags.c_contiguous)
gradient_check.check_backward(
lambda x: shift.shift(x, ksize=self.ksize, dilate=self.dilate),
x_data, y_grad, dtype='d', atol=5e-4, rtol=5e-3)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| 3,094
| 29.343137
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/evaluation_tests/test_classification_summary.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions as F
from chainer import testing
from chainer.testing import attr
def recall(preds, ts, dtype, label_num, ignore_label):
tp = numpy.zeros((label_num,), dtype=numpy.int32)
support = numpy.zeros((label_num,), dtype=numpy.int32)
for p, t in zip(preds.ravel(), ts.ravel()):
if t == ignore_label:
continue
support[t] += 1
if p == t:
tp[t] += 1
return dtype(tp) / support
def precision(preds, ts, dtype, label_num, ignore_label):
tp = numpy.zeros((label_num,), dtype=numpy.int32)
relevant = numpy.zeros((label_num,), dtype=numpy.int32)
for p, t in zip(preds.ravel(), ts.ravel()):
if t == ignore_label:
continue
relevant[p] += 1
if p == t:
tp[p] += 1
return dtype(tp) / relevant
def fbeta_score(precision, recall, beta=1.0):
beta_square = beta * beta
return ((1 + beta_square) * precision * recall /
(beta_square * precision + recall))
def support(ts, dtype, label_num, ignore_label):
ret = numpy.zeros((label_num,), dtype=numpy.int32)
for t in ts.ravel():
if t == ignore_label:
continue
ret[t] += 1
return ret
# Suppose label_num is 3 so that all valid label should be in [0, 3).\,
# then, the typical output of this function is as follows:
# has_ignore_label \ ignore_label | -1 | 0 |
# yes | 0,1,-1,2,-1... | 0,1,2,2,0,1... |
# no | 0,1,2,2,0,1... | 1,2,1,1,2,1... |
def make_ground_truth(label_num, shape, ignore_label, has_ignore_label, dtype):
if (ignore_label == -1) != (has_ignore_label): # xor
lower = 0
elif ignore_label == 0 and not has_ignore_label:
lower = 1
else:
lower = -1
t = numpy.random.randint(lower, label_num, shape)
return t.astype(dtype)
@testing.parameterize(*(
testing.product_dict(
[{'y_shape': (100, 3), 't_shape': (100,)},
{'y_shape': (100, 3, 5), 't_shape': (100, 5)}],
[{'dtype': numpy.float32}],
[{'beta': 1.0},
{'beta': 2.0}],
[{'label_num': 3},
{'label_num': None}],
[{'ignore_label': -1},
{'ignore_label': 0}],
[{'has_ignore_label': True},
{'has_ignore_label': False}],
[{'label_dtype': numpy.int32}]
) + testing.product_dict(
[{'y_shape': (100, 3), 't_shape': (100,)}],
[{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64}],
[{'beta': 1.0}],
[{'label_num': 3}],
[{'ignore_label': 0}],
[{'has_ignore_label': True}],
[{'label_dtype': numpy.int8},
{'label_dtype': numpy.int16},
{'label_dtype': numpy.int32},
{'label_dtype': numpy.int64}]
)
))
class TestClassificationSummary(unittest.TestCase):
def setUp(self):
t_upper = 3 if self.label_num is None else self.label_num
self.y = numpy.random.uniform(-1, 1, self.y_shape).astype(self.dtype)
self.t = make_ground_truth(t_upper, self.t_shape,
self.ignore_label, self.has_ignore_label,
self.label_dtype)
self.check_forward_options = {}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
def check_forward(self, xp):
y = chainer.Variable(xp.asarray(self.y))
t = chainer.Variable(xp.asarray(self.t))
pred = self.y.argmax(axis=1).reshape(self.t.shape)
with numpy.errstate(invalid='ignore'):
p_expect = precision(pred, self.t, self.dtype,
3, self.ignore_label)
r_expect = recall(pred, self.t, self.dtype,
3, self.ignore_label)
fbeta_expect = fbeta_score(p_expect, r_expect, self.beta)
s_expect = support(self.t, self.dtype,
3, self.ignore_label)
# The resultants can include NaN values depending of the inputs.
# In such case, temporarily disable debug mode to avoid NaN error.
# TODO(niboshi): separate test cases which can cause NaN and remove
# this trick.
include_nan = (numpy.isnan(p_expect).any()
or numpy.isnan(r_expect).any()
or numpy.isnan(fbeta_expect).any())
def forward():
return F.classification_summary(
y, t, self.label_num, self.beta, self.ignore_label)
if include_nan:
with chainer.using_config('debug', False), \
numpy.errstate(invalid='ignore'):
outputs = forward()
else:
outputs = forward()
p_actual, r_actual, fbeta_actual, s_actual = outputs
chainer.testing.assert_allclose(p_actual.data, p_expect,
**self.check_forward_options)
chainer.testing.assert_allclose(r_actual.data, r_expect,
**self.check_forward_options)
chainer.testing.assert_allclose(fbeta_actual.data, fbeta_expect,
**self.check_forward_options)
chainer.testing.assert_allclose(s_actual.data, s_expect,
**self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(numpy)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.cupy)
testing.run_module(__name__, __file__)
| 5,709
| 34.246914
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/evaluation_tests/test_accuracy.py
|
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import force_array
from chainer.utils import type_check
def accuracy(x, t, ignore_label):
x_ = numpy.rollaxis(x, 1, x.ndim).reshape(t.size, -1)
t_ = t.ravel()
if ignore_label is not None:
count = 0
for i in six.moves.range(t_.size):
pred = x_[i].argmax()
if t_[i] != ignore_label and pred == t_[i]:
count += 1
total = (t_ != ignore_label).sum()
else:
count = 0
for i in six.moves.range(t_.size):
pred = x_[i].argmax()
if pred == t_[i]:
count += 1
total = t_.size
if total == 0:
return 0.0
else:
return float(count) / total
@testing.parameterize(
*testing.product_dict(
[{'x_shape': (10, 3), 't_shape': (10,)},
{'x_shape': (10, 3, 1), 't_shape': (10,)},
{'x_shape': (10, 3, 1, 1), 't_shape': (10,)},
{'x_shape': (10, 3, 5), 't_shape': (10, 5)},
{'x_shape': (10, 3, 5, 4), 't_shape': (10, 5, 4)},
{'x_shape': (10, 3, 5, 4, 1), 't_shape': (10, 5, 4)},
{'x_shape': (10, 3, 5, 4, 1, 1), 't_shape': (10, 5, 4)}],
[{'ignore_label': None, 't_data': 'randint'},
{'ignore_label': 0, 't_data': 'randint'},
{'ignore_label': 0, 't_data': 'zero'}],
[{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64}],
[{'label_dtype': numpy.int8},
{'label_dtype': numpy.int16},
{'label_dtype': numpy.int32},
{'label_dtype': numpy.int64}]
)
)
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestAccuracy(testing.FunctionTestCase):
def setUp(self):
self.skip_backward_test = True
self.skip_double_backward_test = True
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 1e-4, 'rtol': 1e-3})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
if self.t_data == 'randint':
t = numpy.random.randint(
3, size=self.t_shape).astype(self.label_dtype)
elif self.t_data == 'zero':
t = numpy.zeros(self.t_shape).astype(self.label_dtype)
return x, t
def forward(self, inputs, device):
x, t = inputs
return functions.accuracy(x, t, self.ignore_label),
def forward_expected(self, inputs):
x, t = inputs
expected = accuracy(x, t, self.ignore_label)
expected = force_array(expected, self.dtype)
return expected,
@testing.parameterize(
{'x_shape': (10, 3), 't_shape': (4,)},
{'x_shape': (10, 3, 2), 't_shape': (10,)},
{'x_shape': (10, 3, 1, 2), 't_shape': (10,)},
{'x_shape': (10, 3, 4), 't_shape': (10, 5)},
{'x_shape': (10, 3, 5, 2), 't_shape': (10, 5)},
{'x_shape': (10, 3, 5, 1, 2), 't_shape': (10, 5)},
)
class TestInvalidShape(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1,
self.x_shape).astype(numpy.float32)
self.t = numpy.random.randint(3, size=self.t_shape).astype(numpy.int32)
def check_invalid_shape(self, xp):
x = chainer.Variable(xp.asarray(self.x))
t = chainer.Variable(xp.asarray(self.t))
with self.assertRaises(type_check.InvalidType):
functions.accuracy(x, t)
def test_invalid_shape_cpu(self):
self.check_invalid_shape(numpy)
@attr.gpu
def test_invalid_shape_gpu(self):
self.check_invalid_shape(cuda.cupy)
testing.run_module(__name__, __file__)
| 4,114
| 28.818841
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/evaluation_tests/test_r2_score.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import force_array
from chainer.utils import type_check
def r2_score(pred, true, sample_weight=None, multioutput='uniform_average'):
SS_res = numpy.asarray(
numpy.sum((pred - true) ** 2, axis=0))
SS_tot = numpy.asarray(
numpy.sum((true - numpy.mean(true, axis=0)) ** 2, axis=0))
if multioutput == 'uniform_average':
if numpy.any(SS_tot == 0):
return 0.0
else:
return (1 - SS_res / SS_tot).mean()
elif multioutput == 'raw_values':
if numpy.any(SS_tot == 0):
# Assign dummy value to avoid zero-division
SS_tot_iszero = SS_tot == 0
SS_tot[SS_tot_iszero] = 1
return numpy.where(SS_tot_iszero, 0.0, 1 - SS_res / SS_tot)
else:
return 1 - SS_res / SS_tot
@testing.parameterize(
*testing.product_dict(
[{'x_shape': (10,), 't_shape': (10,)},
{'x_shape': (10, 1), 't_shape': (10, 1)},
{'x_shape': (10, 5), 't_shape': (10, 5)},
{'x_shape': (10, 5, 4), 't_shape': (10, 5, 4)}],
[{'t_input': 'random'}, {'t_input': 'zero'}],
[{'multioutput': 'uniform_average'},
{'multioutput': 'raw_values'}],
[{'sample_weight': None}],
[{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64}]
)
)
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestAccuracy(testing.FunctionTestCase):
def setUp(self):
self.skip_backward_test = True
self.skip_double_backward_test = True
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 1e-2, 'rtol': 1e-2})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
if self.t_input == 'random':
t = numpy.random.uniform(-1, 1, self.t_shape).astype(self.dtype)
else:
t = numpy.zeros(self.t_shape).astype(self.dtype)
return x, t
def forward(self, inputs, device):
x, t = inputs
y = functions.r2_score(x, t, self.sample_weight, self.multioutput)
return y,
def forward_expected(self, inputs):
x, t = inputs
expected = r2_score(x, t, sample_weight=self.sample_weight,
multioutput=self.multioutput)
expected = force_array(expected, self.dtype)
return expected,
@testing.parameterize(
{'x_shape': (10, 3), 't_shape': (4,)},
{'x_shape': (10, 3, 2), 't_shape': (10,)},
{'x_shape': (10, 3, 1, 2), 't_shape': (10,)},
{'x_shape': (10, 3, 4), 't_shape': (10, 5)},
{'x_shape': (10, 3, 5, 2), 't_shape': (10, 5)},
{'x_shape': (10, 3, 5, 1, 2), 't_shape': (10, 5)},
)
class TestInvalidShape(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1,
self.x_shape).astype(numpy.float32)
self.t = numpy.random.randint(3, size=self.t_shape).astype(numpy.int32)
def check_invalid_shape(self, xp):
x = chainer.Variable(xp.asarray(self.x))
t = chainer.Variable(xp.asarray(self.t))
with self.assertRaises(type_check.InvalidType):
chainer.functions.accuracy(x, t)
def test_invalid_shape_cpu(self):
self.check_invalid_shape(numpy)
@attr.gpu
def test_invalid_shape_gpu(self):
self.check_invalid_shape(cuda.cupy)
testing.run_module(__name__, __file__)
| 3,942
| 29.565891
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/evaluation_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/evaluation_tests/test_binary_accuracy.py
|
import unittest
import numpy
import six
import chainer
from chainer import functions
from chainer import testing
from chainer.utils import force_array
from chainer.utils import type_check
@testing.parameterize(*testing.product({
'shape': [(9, 11), (99,)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'label_dtype': [numpy.int8, numpy.int16, numpy.int32, numpy.int64],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestBinaryAccuracy(testing.FunctionTestCase):
def setUp(self):
self.skip_backward_test = True
self.skip_double_backward_test = True
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 1e-4, 'rtol': 1e-3})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
t = numpy.random.randint(-1, 2, self.shape).astype(self.label_dtype)
return x, t
def forward(self, inputs, device):
x, t = inputs
return functions.binary_accuracy(x, t),
def forward_expected(self, inputs):
x, t = inputs
count = 0
correct = 0
x_flatten = x.ravel()
t_flatten = t.ravel()
for i in six.moves.range(t_flatten.size):
if t_flatten[i] == -1:
continue
pred = int(x_flatten[i] >= 0)
if pred == t_flatten[i]:
correct += 1
count += 1
expected = float(correct) / count
expected = force_array(expected, self.dtype)
return expected,
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestBinaryAccuracyIgnoreAll(testing.FunctionTestCase):
def setUp(self):
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
shape = (5, 4)
x = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
t = -numpy.ones(shape).astype(numpy.int32)
return x, t
def forward(self, inputs, device):
x, t = inputs
return functions.binary_accuracy(x, t),
def forward_expected(self, inputs):
return force_array(0.0, self.dtype),
class TestBinaryAccuracyTypeError(unittest.TestCase):
def test_invalid_shape(self):
x = chainer.Variable(numpy.zeros((3, 2, 5), dtype=numpy.float32))
t = chainer.Variable(numpy.zeros((2, 3, 5), dtype=numpy.int32))
with self.assertRaises(type_check.InvalidType):
chainer.functions.binary_accuracy(x, t)
def test_invalid_type(self):
x = chainer.Variable(numpy.zeros((3, 2, 5), dtype=numpy.float32))
t = chainer.Variable(numpy.zeros((3, 2, 5), dtype=numpy.float32))
with self.assertRaises(type_check.InvalidType):
chainer.functions.binary_accuracy(x, t)
testing.run_module(__name__, __file__)
| 3,561
| 26.19084
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/util_tests/test_forget.py
|
import functools
import unittest
import numpy
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer import variable
@testing.parameterize(*testing.product({
'out_len': [1, 2],
}))
class TestForget(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.y = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.gz0 = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.gz1 = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.ggx = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.ggy = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
def check_forward(self, x_data, y_data):
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
if self.out_len == 1:
z = functions.forget(lambda x, y: (x + y + x,), x, y)
testing.assert_allclose(x_data + y_data + x_data, z.data)
elif self.out_len == 2:
z = functions.forget(lambda x, y: (x + y + x, x * y), x, y)
testing.assert_allclose(x_data + y_data + x_data, z[0].data)
testing.assert_allclose(x_data * y_data, z[1].data)
def test_forward_cpu(self):
self.check_forward(self.x, self.y)
def check_backward(self, x_data, y_data, *gz_data):
def f(x, y):
if self.out_len == 1:
return functions.forget(lambda x, y: (x + y + x), x, y)
elif self.out_len == 2:
return functions.forget(lambda x, y: (x + y + x, x * y), x, y)
gradient_check.check_backward(
f, (x_data, y_data), gz_data, **self.check_backward_options)
def test_backward_cpu(self):
if self.out_len == 1:
self.check_backward(self.x, self.y, self.gz0)
elif self.out_len == 2:
self.check_backward(self.x, self.y, self.gz0, self.gz1)
@attr.gpu
def test_backward_gpu(self):
if self.out_len == 1:
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.y),
cuda.to_gpu(self.gz0))
elif self.out_len == 2:
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.y),
cuda.to_gpu(self.gz0), cuda.to_gpu(self.gz1))
class TestForgetError(unittest.TestCase):
def setUp(self):
self.v = chainer.Variable(numpy.zeros(1))
def test_not_callable(self):
with self.assertRaises(TypeError):
functions.forget(1)
def test_invalid_type(self):
with six.assertRaisesRegex(self, RuntimeError, 'int'):
functions.forget(lambda: 1)
def test_invalid_tuple_type_1st(self):
with six.assertRaisesRegex(self, RuntimeError, '1st.*int'):
functions.forget(lambda: (1,))
def test_invalid_tuple_type_2nd(self):
with six.assertRaisesRegex(self, RuntimeError, '2nd.*int'):
functions.forget(lambda: (self.v, 1))
def test_invalid_tuple_type_3rd(self):
with six.assertRaisesRegex(self, RuntimeError, '3rd.*int'):
functions.forget(lambda: (self.v, self.v, 1))
def test_invalid_tuple_type_4th(self):
with six.assertRaisesRegex(self, RuntimeError, '4th.*int'):
functions.forget(lambda: (self.v,) * 3 + (1,))
def test_invalid_tuple_type_11th(self):
with six.assertRaisesRegex(self, RuntimeError, '11th.*int'):
functions.forget(lambda: (self.v,) * 10 + (1,))
def test_invalid_tuple_type_12th(self):
with six.assertRaisesRegex(self, RuntimeError, '12th.*int'):
functions.forget(lambda: (self.v,) * 11 + (1,))
def test_invalid_tuple_type_13th(self):
with six.assertRaisesRegex(self, RuntimeError, '13th.*int'):
functions.forget(lambda: (self.v,) * 12 + (1,))
class TestForgetDoubleBackpropError(unittest.TestCase):
def setUp(self):
self.v = chainer.Variable(numpy.zeros(1))
def test_invalid_double_backprop(self):
with self.assertRaises(RuntimeError):
x = functions.forget(lambda v: v, self.v)
x.grad_var = variable.Variable(numpy.ones_like(x.data))
x.backward(enable_double_backprop=True)
class TestForgetGrad(unittest.TestCase):
def test_variable_grad(self):
x = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
x = variable.Variable(x)
w = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
w = variable.Variable(w)
y = functions.forget(lambda a, b: a + b, x, w)
y.grad_var = variable.Variable(numpy.ones_like(y.data))
y.backward()
assert isinstance(x.grad_var, variable.Variable)
assert isinstance(w.grad_var, variable.Variable)
def test_link_grad(self):
class Model(chainer.link.Chain):
def __init__(self):
super(Model, self).__init__()
with self.init_scope():
self.link = links.Linear(None, 10)
def forward(self, x):
return functions.forget(self.link, x)
model = Model()
model.cleargrads()
x = numpy.random.uniform(-1, 1, (64, 768)).astype(numpy.float32)
x = variable.Variable(x, requires_grad=True)
y = functions.sum(model(x))
y.backward()
assert isinstance(model.link.W.grad_var, variable.Variable)
assert isinstance(model.link.b.grad_var, variable.Variable)
@testing.parameterize(*testing.product({
'link_name': ['bn', 'brn', 'dbn'],
'finetune': [False, True],
}))
class TestBatchNormalization(unittest.TestCase):
def test_bn(self):
class Model(chainer.link.Chain):
def __init__(self, link_name, finetune, forget=False):
super(Model, self).__init__()
with self.init_scope():
if link_name == 'bn':
self.link = links.BatchNormalization(3)
elif link_name == 'brn':
# defaults rmax=1, dmax=0 are so trivial that BRN
# becomes BN
self.link = links.BatchRenormalization(
3, rmax=2.0, dmax=1.0)
elif link_name == 'dbn':
self.link = links.DecorrelatedBatchNormalization(
3, groups=3)
self.forget = forget
self.finetune = finetune
def forward(self, x):
if self.forget:
return functions.forget(
functools.partial(self.link, finetune=self.finetune),
x)
else:
return self.link(x, finetune=self.finetune)
x = numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32)
model1 = Model(self.link_name, self.finetune, forget=False)
model2 = Model(self.link_name, self.finetune, forget=True)
# Update the models' internal statistics
x1 = chainer.Variable(x)
y = model1(x1)
y.grad = gy
y.backward()
x2 = chainer.Variable(x)
y = model2(x2)
y.grad = gy
y.backward()
numpy.testing.assert_almost_equal(x1.grad, x2.grad)
# Check if the outputs of the models are the same during test time
with chainer.using_config('train', False):
y1 = model1(x)
y2 = model2(x)
numpy.testing.assert_almost_equal(y1.data, y2.data)
if self.finetune:
assert model1.link.N == 1
assert model2.link.N == 1
testing.run_module(__name__, __file__)
| 8,097
| 34.517544
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/util_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/rnn_tests/test_function_n_step_rnn.py
|
import numpy
import chainer.functions as F
from chainer import testing
from chainer.testing import backend
def _relu(x):
expected = x.copy()
for i in numpy.ndindex(x.shape):
if x[i] < 0:
expected[i] = 0
return expected
def array(shape, dtype):
return numpy.random.uniform(-1, 1, shape).astype(dtype)
@testing.parameterize(*testing.product_dict(
[
{'n_layers': 1, 'hidden_size': 2, 'input_size': 1,
'batches': (1, 1, 1), 'activation': 'relu'},
{'n_layers': 1, 'hidden_size': 2, 'input_size': 1,
'batches': (1, 1, 1), 'activation': 'tanh'},
{'n_layers': 2, 'hidden_size': 2, 'input_size': 3,
'batches': (3, 2, 1), 'activation': 'relu'},
{'n_layers': 2, 'hidden_size': 2, 'input_size': 3,
'batches': (3, 2, 1), 'activation': 'tanh'},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
]))
@testing.fix_random()
@backend.inject_backend_tests(
None,
# ChainerX tests
testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
# CPU tests
+ testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product([
[{'use_cuda': True}],
# Without cuDNN
testing.product({
'use_cudnn': ['never'],
})
+ testing.product({
'use_cudnn': ['always'],
'cudnn_deterministic': [True, False],
'autotune': [True, False],
})]))
class TestNStepRNN(testing.FunctionTestCase):
dodge_nondifferentiable = True
def setUp(self):
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}
if self.dtype == numpy.float16:
self.check_forward_options.update({'rtol': 5e-2})
self.check_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.skip_double_backward_test = True
def generate_inputs(self):
h_shape = (self.n_layers, self.batches[0], self.hidden_size)
dtype = self.dtype
h = array(h_shape, dtype)
in_size = self.input_size
out_size = self.hidden_size
xs = [array((self.batches[b], in_size), dtype)
for b in range(len(self.batches))]
def w_in(i, j):
return in_size if i == 0 and j < 1 else out_size
inputs = []
inputs.append(h)
for i in range(len(self.batches)):
inputs.append(xs[i])
for n in range(self.n_layers):
for i in range(2):
inputs.append(array((out_size, w_in(n, i)), dtype))
for i in range(2):
inputs.append(array((out_size,), dtype))
return tuple(inputs)
def process_inputs(self, inputs):
h = inputs[0]
xs = inputs[1: 1 + len(self.batches)]
ws = []
bs = []
index = 1 + len(self.batches)
for n in range(self.n_layers):
ws.append(inputs[index: index + 2])
bs.append(inputs[index + 2: index + 4])
index += 4
return h, ws, bs, xs
def forward(self, inputs, device):
h, ws, bs, xs = self.process_inputs(inputs)
out = F.n_step_rnn(self.n_layers, 0.0, h, ws,
bs, xs, self.activation)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
def forward_expected(self, inputs):
h, ws, bs, xs = self.process_inputs(inputs)
e_hy = h.copy()
ys = []
for ind in range(len(xs)):
x = xs[ind]
batch = x.shape[0]
for layer in range(self.n_layers):
w = ws[layer]
b = bs[layer]
h_prev = e_hy[layer, :batch]
if self.activation == 'tanh':
e_h = numpy.tanh(x.dot(w[0].T) +
h_prev.dot(w[1].T) + b[0] + b[1])
elif self.activation == 'relu':
e_h = _relu(x.dot(w[0].T) +
h_prev.dot(w[1].T) + b[0] + b[1])
e_hy[layer, :batch] = e_h
x = e_h
ys.append(x)
rets = []
rets.append(e_hy)
for i in range(len(ys)):
rets.append(ys[i])
return tuple(rets)
@testing.parameterize(*testing.product_dict(
[
{'n_layers': 1, 'hidden_size': 2, 'input_size': 1,
'batches': (1, 1, 1), 'activation': 'relu'},
{'n_layers': 1, 'hidden_size': 2, 'input_size': 1,
'batches': (1, 1, 1), 'activation': 'tanh'},
{'n_layers': 2, 'hidden_size': 2, 'input_size': 3,
'batches': (3, 2, 1), 'activation': 'relu'},
{'n_layers': 2, 'hidden_size': 2, 'input_size': 3,
'batches': (3, 2, 1), 'activation': 'tanh'},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
]))
@testing.fix_random()
@backend.inject_backend_tests(
None,
# ChainerX tests
testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
# CPU tests
+ testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product([
[{'use_cuda': True}],
testing.product({
'use_cudnn': ['always'],
'cudnn_deterministic': [True, False],
'autotune': [True, False],
})]))
class TestNStepBiRNN(testing.FunctionTestCase):
dodge_nondifferentiable = True
def setUp(self):
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.skip_double_backward_test = True
def generate_inputs(self):
h_shape = (self.n_layers * 2, self.batches[0], self.hidden_size)
dtype = self.dtype
h = array(h_shape, dtype)
in_size = self.input_size
out_size = self.hidden_size
xs = [array((self.batches[b], in_size), dtype)
for b in range(len(self.batches))]
def w_in(i, j):
if i == 0 and j < 1:
return in_size
elif i > 0 and j < 1:
return out_size * 2
else:
return out_size
inputs = []
inputs.append(h)
for i in range(len(self.batches)):
inputs.append(xs[i])
for n in range(self.n_layers):
for direction in (0, 1):
for i in range(2):
inputs.append(array((out_size, w_in(n, i)), dtype))
for i in range(2):
inputs.append(array((out_size,), dtype))
return tuple(inputs)
def process_inputs(self, inputs):
h = inputs[0]
xs = inputs[1: 1 + len(self.batches)]
ws = []
bs = []
index = 1 + len(self.batches)
for n in range(self.n_layers):
ws.append(inputs[index: index + 2])
bs.append(inputs[index + 2: index + 4])
ws.append(inputs[index + 4: index + 6])
bs.append(inputs[index + 6: index + 8])
index += 8
return h, ws, bs, xs
def forward(self, inputs, device):
h, ws, bs, xs = self.process_inputs(inputs)
out = F.n_step_birnn(self.n_layers, 0.0, h, ws,
bs, xs, self.activation)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
def forward_expected(self, inputs):
h, ws, bs, xs = self.process_inputs(inputs)
xs_next = xs
e_hy = h.copy()
for layer in range(self.n_layers):
# forward
di = 0
xf = []
layer_idx = layer * 2 + di
w = ws[layer_idx]
b = bs[layer_idx]
for ind in range(len(xs)):
x = xs_next[ind]
batch = x.shape[0]
h_prev = e_hy[layer_idx, :batch]
if self.activation == 'tanh':
e_h = numpy.tanh(x.dot(w[0].T) +
h_prev.dot(w[1].T) + b[0] + b[1])
elif self.activation == 'relu':
e_h = _relu(x.dot(w[0].T) +
h_prev.dot(w[1].T) + b[0] + b[1])
e_hy[layer_idx, :batch] = e_h
xf.append(e_h)
# backward
di = 1
xb = []
layer_idx = layer * 2 + di
w = ws[layer_idx]
b = bs[layer_idx]
for ind in reversed(range(len(xs))):
x = xs_next[ind]
batch = x.shape[0]
h_prev = e_hy[layer_idx, :batch]
if self.activation == 'tanh':
e_h = numpy.tanh(x.dot(w[0].T) +
h_prev.dot(w[1].T) + b[0] + b[1])
elif self.activation == 'relu':
e_h = _relu(x.dot(w[0].T) +
h_prev.dot(w[1].T) + b[0] + b[1])
e_hy[layer_idx, :batch] = e_h
xb.append(e_h)
xb.reverse()
xs_next = [numpy.concatenate([hfi, hbi], axis=1)
for (hfi, hbi) in zip(xf, xb)]
rets = []
rets.append(e_hy)
for x in xs_next:
rets.append(x)
return tuple(rets)
testing.run_module(__name__, __file__)
| 9,839
| 30.741935
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/rnn_tests/test_function_lstm.py
|
import unittest
import numpy
import six
from chainer.backends import cuda
import chainer.functions as F
from chainer.functions.rnn import lstm
from chainer import gradient_check
from chainer import testing
from chainer.testing import backend
def sigmoid(x):
return numpy.tanh(x * 0.5) * 0.5 + 0.5
def _shaped_random(shape, dtype):
return numpy.random.uniform(-1, 1, shape).astype(dtype)
def inject_backend_tests(method_names):
decorator = backend.inject_backend_tests(
method_names,
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ [{'use_cuda': True}])
return decorator
class LSTMTestBase(object):
dodge_nondifferentiable = True
dtype = numpy.float32
grad_outputs = (True, True)
grad_grad_inputs = (True, True)
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
def generate_inputs(self):
c = _shaped_random(self.c_shape, self.dtype)
x = _shaped_random(self.x_shape, self.dtype)
return c, x,
def forward(self, inputs, device):
c, x = inputs
c, h = F.lstm(c, x)
return c, h,
def forward_expected(self, inputs):
c, x = inputs
batch = x.shape[0]
def _extract_gates(x):
r = x.reshape((len(x), x.shape[1] // 4, 4) + x.shape[2:])
return [r[:, :, i] for i in six.moves.range(4)]
a, i, f, o = _extract_gates(x)
a = numpy.tanh(a)
i = sigmoid(i)
f = sigmoid(f)
o = sigmoid(o)
c_exp = numpy.zeros_like(c)
c_exp[:batch] = a * i + f * c[:batch]
h_exp = o * numpy.tanh(c_exp[:batch])
c_exp[batch:] = c[batch:]
return c_exp, h_exp,
def generate_grad_outputs(self, outputs_template):
grad_out = []
c = outputs_template[0]
h = outputs_template[1]
c_shape = c.shape
h_shape = h.shape
if self.grad_outputs[0] is True:
grad_out.append(_shaped_random(c_shape, c.dtype))
else:
grad_out.append(None)
if self.grad_outputs[1] is True:
grad_out.append(_shaped_random(h_shape, h.dtype))
else:
grad_out.append(None)
return tuple(grad_out)
def generate_grad_grad_inputs(self, inputs_template):
grad_grad_in = []
c = inputs_template[0]
x = inputs_template[1]
c_shape = c.shape
x_shape = x.shape
if self.grad_grad_inputs[0] is True:
grad_grad_in.append(_shaped_random(c_shape, c.dtype))
else:
grad_grad_in.append(None)
if self.grad_grad_inputs[1] is True:
grad_grad_in.append(_shaped_random(x_shape, x.dtype))
else:
grad_grad_in.append(None)
return tuple(grad_grad_in)
@testing.fix_random()
@backend.inject_backend_tests(
None,
# ChainerX tests
testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
# CPU tests
+ testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product([
[{'use_cuda': True}],
# Without cuDNN
testing.product({
'use_cudnn': ['never'],
})
# With cuDNN
+ testing.product({
'use_cudnn': ['always'],
'cudnn_deterministic': [True, False],
'autotune': [True, False],
})]))
@testing.parameterize(*testing.product_dict(
[
{'c_shape': (10, 3), 'x_shape': (10, 12)},
{'c_shape': (20, 32, 4), 'x_shape': (16, 128, 4)},
{'c_shape': (32, 100, 3, 5), 'x_shape': (32, 400, 3, 5)},
{'c_shape': (16, 20), 'x_shape': (2, 80)},
{'c_shape': (16, 20), 'x_shape': (0, 80)},
{'c_shape': (0, 0), 'x_shape': (0, 0)},
{'c_shape': (8, 0), 'x_shape': (2, 0)},
], [
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
]
))
class TestLSTM(LSTMTestBase, testing.FunctionTestCase):
pass
@testing.fix_random()
@backend.inject_backend_tests(
None,
# ChainerX tests
testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0'],
})
# CPU tests
+ testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
}))
@testing.parameterize(*testing.product_dict(
[
{'c_shape': (10, 3), 'x_shape': (10, 12)},
], [
{'grad_outputs': (True, True)},
{'grad_outputs': (True, False)},
{'grad_outputs': (False, True)},
], [
{'grad_grad_inputs': (True, True)},
{'grad_grad_inputs': (True, False)},
{'grad_grad_inputs': (False, True)},
],
))
class TestLSTMGradOutputs(LSTMTestBase, testing.FunctionTestCase):
pass
@testing.parameterize(*(testing.product({
'batch': [3, 2, 0],
'dtype': [numpy.float32],
}) + testing.product({
'batch': [3],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
})))
@testing.fix_random()
@inject_backend_tests(['test_backward'])
class TestLSTMGrad(unittest.TestCase):
def setUp(self):
hidden_shape = (3, 2, 4)
dtype = self.dtype
x_shape = (self.batch, 8, 4)
y_shape = (self.batch, 2, 4)
c_prev = numpy.random.uniform(
-1, 1, hidden_shape).astype(dtype)
x = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
c_next = numpy.random.uniform(-1, 1, hidden_shape).astype(dtype)
gc = numpy.random.uniform(-1, 1, hidden_shape).astype(dtype)
gh = numpy.random.uniform(-1, 1, y_shape).astype(dtype)
ggc_prev = numpy.random.uniform(-1, 1, hidden_shape).astype(dtype)
ggx = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
self.inputs = [c_prev, x, c_next, gc, gh]
self.grad_outputs = [ggc_prev, ggx]
self.check_backward_options = {'dtype': numpy.float64}
if self.dtype == numpy.float16:
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-3, 'rtol': 1e-2}
def check_backward(self, inputs, grad_outputs, backend_config):
if backend_config.use_cuda:
inputs = cuda.to_gpu(inputs)
grad_outputs = cuda.to_gpu(grad_outputs)
with backend_config:
gradient_check.check_backward(
lstm.LSTMGrad(), inputs, grad_outputs,
**self.check_backward_options)
def test_backward(self, backend_config):
self.check_backward(self.inputs, self.grad_outputs, backend_config)
testing.run_module(__name__, __file__)
| 7,077
| 27.772358
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/rnn_tests/test_function_tree_lstm.py
|
import numpy
import six
from chainer import functions
from chainer import testing
from chainer.testing import backend
def _sigmoid(x):
half = x.dtype.type(0.5)
return numpy.tanh(x * half) * half + half
def _shaped_random_array(shape, dtype):
return numpy.random.uniform(-1, 1, shape).astype(dtype)
@testing.parameterize(*testing.product_dict(
[
{'c_dim': 2, 'num_c': 1, 'batch_size': 3},
{'c_dim': 0, 'num_c': 4, 'batch_size': 5},
{'c_dim': 12, 'num_c': 1, 'batch_size': 0},
], [
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
]
))
@testing.fix_random()
@backend.inject_backend_tests(
None,
# ChainerX tests
testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
# CPU tests
+ testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product([
[{'use_cuda': True}],
# Without cuDNN
testing.product({
'use_cudnn': ['never'],
})
# With cuDNN
+ testing.product({
'use_cudnn': ['always'],
'cudnn_deterministic': [True, False],
'autotune': [True, False],
})]))
class TestTreeLSTM(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 5e-3, 'atol': 5e-2})
self.skip_double_backward_test = True
def generate_inputs(self):
c_dim = self.c_dim
num_c = self.num_c
batch_size = self.batch_size
c_shape = (batch_size, c_dim)
input_shape = (batch_size, c_dim * (num_c + 3))
inputs = []
for i in range(num_c):
inputs.append(_shaped_random_array(c_shape, self.dtype))
inputs.append(_shaped_random_array(input_shape, self.dtype))
return tuple(inputs)
def forward(self, inputs, device):
out = functions.tree_lstm(*list(inputs))
return out
def forward_expected(self, inputs):
def _extract_gates(x, n_split=5):
r = x.reshape(
(x.shape[0], n_split, x.shape[1] // n_split) + x.shape[2:])
return [r[:, i, :] for i in six.moves.range(n_split)]
cs, x = inputs[:-1], inputs[-1]
n_ary = len(cs)
gates = _extract_gates(x, 3 + n_ary)
a, i, o = gates[:3]
fs = gates[3:]
a = numpy.tanh(a)
i = _sigmoid(i)
o = _sigmoid(o)
fs = [_sigmoid(f) for f in fs]
c = a * i + sum(f * c for f, c in zip(fs, cs))
h = o * numpy.tanh(c)
return c, h
testing.run_module(__name__, __file__)
| 2,970
| 27.295238
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/rnn_tests/test_function_n_step_lstm.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
from chainer.testing import attr
from chainer.testing import backend
from chainer.testing import condition
def rand_vector(shape):
# return cuda.cupy.random.randint(-2, 2, shape).astype('f')
return cuda.cupy.random.uniform(-1, 1, shape).astype('f')
def sigmoid(x):
return numpy.tanh(x * 0.5) * 0.5 + 0.5
def array(shape, dtype):
return numpy.random.uniform(-1, 1, shape).astype(dtype)
def _stack_weight(ws):
# TODO(unno): Input of the current LSTM implementation is shuffled
w = F.stack(ws, axis=1)
shape = w.shape
return F.reshape(w, (shape[0] * shape[1],) + shape[2:])
def count_close(x, y, atol=1e-4):
assert x.shape == y.shape
return int(sum(abs(x - y) / abs(x) < atol))
def lstm_without_dropout(n_layer, dropout, hx, cx, ws, bs, xs):
xws = [_stack_weight([w[2], w[0], w[1], w[3]]) for w in ws]
hws = [_stack_weight([w[6], w[4], w[5], w[7]]) for w in ws]
xbs = [_stack_weight([b[2], b[0], b[1], b[3]]) for b in bs]
hbs = [_stack_weight([b[6], b[4], b[5], b[7]]) for b in bs]
xs = [xs[i] for i in range(3)]
ys = []
for x in xs:
cx_next = []
hx_next = []
for layer in range(n_layer):
c = cx[layer]
h = hx[layer]
if layer != 0:
# Only multiply ratio
x = x * (1 / (1.0 - dropout))
lstm_in = F.linear(x, xws[layer], xbs[layer]) + \
F.linear(h, hws[layer], hbs[layer])
c_new, h_new = F.lstm(c, lstm_in)
cx_next.append(c_new)
hx_next.append(h_new)
x = h_new
cx = cx_next
hx = hx_next
ys.append(x)
cy = F.stack(cx)
hy = F.stack(hx)
return hy, cy, ys
@testing.parameterize(*testing.product_dict(
[
{'n_layers': 1, 'hidden_size': 2,
'input_size': 1, 'batches': (1, 1, 1)},
{'n_layers': 2, 'hidden_size': 2,
'input_size': 3, 'batches': (3, 2, 1)},
{'n_layers': 4, 'hidden_size': 6,
'input_size': 3, 'batches': (5, 3, 1)},
{'n_layers': 5, 'hidden_size': 10,
'input_size': 6, 'batches': (6, 5, 3)},
]))
@testing.fix_random()
@backend.inject_backend_tests(
None,
# ChainerX tests
testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
# CPU tests
+ testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product([
[{'use_cuda': True}],
testing.product({
'use_cudnn': ['always'],
'cudnn_deterministic': [True, False],
'autotune': [True, False],
})]))
class TestNStepLSTM(testing.FunctionTestCase):
dodge_nondifferentiable = True
def setUp(self):
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 5e-3, 'atol': 5e-2})
self.skip_double_backward_test = True
def generate_inputs(self):
h_shape = (self.n_layers, self.batches[0], self.hidden_size)
dtype = numpy.float32
h = array(h_shape, dtype)
c = array(h_shape, dtype)
in_size = self.input_size
out_size = self.hidden_size
xs = []
for b in range(len(self.batches)):
xs.append(array((self.batches[b], in_size), dtype))
def w_in(i, j):
return in_size if i == 0 and j < 4 else out_size
inputs = []
inputs.append(h)
inputs.append(c)
for i in range(len(self.batches)):
inputs.append(xs[i])
for n in range(self.n_layers):
for i in range(8):
inputs.append(array((out_size, w_in(n, i)), dtype))
for i in range(8):
inputs.append(array((out_size,), dtype))
return tuple(inputs)
def process_inputs(self, inputs):
h = inputs[0]
c = inputs[1]
xs = inputs[2: 2 + len(self.batches)]
ws = []
bs = []
index = 2 + len(self.batches)
for n in range(self.n_layers):
ws.append(inputs[index: index + 8])
bs.append(inputs[index + 8: index + 16])
index += 16
return h, c, ws, bs, xs
def forward(self, inputs, device):
h, c, ws, bs, xs = self.process_inputs(inputs)
if h.array.dtype == numpy.float64:
with chainer.using_config('use_cudnn', 'never'):
out = F.n_step_lstm(self.n_layers, 0.0, h, c, ws, bs, xs)
else:
out = F.n_step_lstm(self.n_layers, 0.0, h, c, ws, bs, xs)
rets = []
rets.append(out[0])
rets.append(out[1])
for i in range(len(out[2])):
rets.append(out[2][i])
return tuple(rets)
def forward_expected(self, inputs):
h, c, ws, bs, xs = self.process_inputs(inputs)
e_hy = h.copy()
e_cy = c.copy()
ys = []
for ind in range(len(xs)):
x = xs[ind]
batch = x.shape[0]
for layer in range(self.n_layers):
w = ws[layer]
b = bs[layer]
h_prev = e_hy[layer, :batch]
c_prev = e_cy[layer, :batch]
i = sigmoid(x.dot(w[0].T) + h_prev.dot(w[4].T) + b[0] + b[4])
f = sigmoid(x.dot(w[1].T) + h_prev.dot(w[5].T) + b[1] + b[5])
c_bar = numpy.tanh(
x.dot(w[2].T) + h_prev.dot(w[6].T) + b[2] + b[6])
o = sigmoid(x.dot(w[3].T) + h_prev.dot(w[7].T) + b[3] + b[7])
e_c = (f * c_prev + i * c_bar)
e_h = o * numpy.tanh(e_c)
e_hy[layer, :batch] = e_h
e_cy[layer, :batch] = e_c
x = e_h
ys.append(x)
rets = []
rets.append(e_hy)
rets.append(e_cy)
for i in range(len(ys)):
rets.append(ys[i])
return tuple(rets)
@testing.parameterize(*testing.product_dict(
[
{'n_layers': 1, 'hidden_size': 2,
'input_size': 1, 'batches': (1, 1, 1)},
{'n_layers': 2, 'hidden_size': 2,
'input_size': 3, 'batches': (3, 2, 1)},
{'n_layers': 4, 'hidden_size': 6,
'input_size': 3, 'batches': (5, 3, 1)},
{'n_layers': 5, 'hidden_size': 10,
'input_size': 6, 'batches': (6, 5, 3)},
]))
@testing.fix_random()
@backend.inject_backend_tests(
None,
# ChainerX tests
testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
# CPU tests
+ testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product([
[{'use_cuda': True}],
testing.product({
'use_cudnn': ['always'],
'cudnn_deterministic': [True, False],
'autotune': [True, False],
})]))
class TestNStepBiLSTM(testing.FunctionTestCase):
dodge_nondifferentiable = True
def setUp(self):
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 5e-3, 'atol': 5e-2})
self.skip_double_backward_test = True
def generate_inputs(self):
h_shape = (self.n_layers * 2, self.batches[0], self.hidden_size)
dtype = numpy.float32
h = array(h_shape, dtype)
c = array(h_shape, dtype)
in_size = self.input_size
out_size = self.hidden_size
xs = []
for b in range(len(self.batches)):
xs.append(array((self.batches[b], in_size), dtype))
def w_in(i, j):
if i == 0 and j < 4:
return in_size
elif i > 0 and j < 4:
return out_size * 2
else:
return out_size
inputs = []
inputs.append(h)
inputs.append(c)
for i in range(len(self.batches)):
inputs.append(xs[i])
for n in range(self.n_layers):
for direction in (0, 1):
for i in range(8):
inputs.append(array((out_size, w_in(n, i)), dtype))
for i in range(8):
inputs.append(array((out_size,), dtype))
return tuple(inputs)
def process_inputs(self, inputs):
h = inputs[0]
c = inputs[1]
xs = inputs[2:2 + len(self.batches)]
ws = []
bs = []
index = 2 + len(self.batches)
for n in range(self.n_layers):
ws.append(inputs[index: index + 8])
bs.append(inputs[index + 8: index + 16])
ws.append(inputs[index + 16: index + 24])
bs.append(inputs[index + 24: index + 32])
index += 32
return h, c, ws, bs, xs
def forward(self, inputs, device):
h, c, ws, bs, xs = self.process_inputs(inputs)
if h.array.dtype == numpy.float64:
with chainer.using_config('use_cudnn', 'never'):
out = F.n_step_bilstm(self.n_layers, 0.0, h, c, ws, bs, xs)
else:
out = F.n_step_bilstm(self.n_layers, 0.0, h, c, ws, bs, xs)
rets = []
rets.append(out[0])
rets.append(out[1])
for i in range(len(out[2])):
rets.append(out[2][i])
return tuple(rets)
def forward_expected(self, inputs):
h, c, ws, bs, xs = self.process_inputs(inputs)
xs_next = xs
e_hy = h.copy()
e_cy = c.copy()
for layer in range(self.n_layers):
# forward
di = 0
xf = []
layer_idx = layer * 2 + di
w = ws[layer_idx]
b = bs[layer_idx]
for ind in range(len(xs)):
x = xs_next[ind]
batch = x.shape[0]
h_prev = e_hy[layer_idx, :batch]
c_prev = e_cy[layer_idx, :batch]
i = sigmoid(x.dot(w[0].T) + h_prev.dot(w[4].T) + b[0] + b[4])
f = sigmoid(x.dot(w[1].T) + h_prev.dot(w[5].T) + b[1] + b[5])
c_bar = numpy.tanh(
x.dot(w[2].T) + h_prev.dot(w[6].T) + b[2] + b[6])
o = sigmoid(x.dot(w[3].T) + h_prev.dot(w[7].T) + b[3] + b[7])
e_c = (f * c_prev + i * c_bar)
e_h = o * numpy.tanh(e_c)
e_hy[layer_idx, :batch] = e_h
e_cy[layer_idx, :batch] = e_c
xf.append(e_h)
# backward
di = 1
xb = []
layer_idx = layer * 2 + di
w = ws[layer_idx]
b = bs[layer_idx]
for ind in reversed(range(len(xs))):
x = xs_next[ind]
batch = x.shape[0]
h_prev = e_hy[layer_idx, :batch]
c_prev = e_cy[layer_idx, :batch]
i = sigmoid(x.dot(w[0].T) + h_prev.dot(w[4].T) + b[0] + b[4])
f = sigmoid(x.dot(w[1].T) + h_prev.dot(w[5].T) + b[1] + b[5])
c_bar = numpy.tanh(
x.dot(w[2].T) + h_prev.dot(w[6].T) + b[2] + b[6])
o = sigmoid(x.dot(w[3].T) + h_prev.dot(w[7].T) + b[3] + b[7])
e_c = (f * c_prev + i * c_bar)
e_h = o * numpy.tanh(e_c)
e_hy[layer_idx, :batch] = e_h
e_cy[layer_idx, :batch] = e_c
xb.append(e_h)
xb.reverse()
xs_next = [numpy.concatenate([hfi, hbi], axis=1) for (hfi, hbi) in
zip(xf, xb)]
rets = []
rets.append(e_hy)
rets.append(e_cy)
for x in xs_next:
rets.append(x)
return tuple(rets)
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
}))
@attr.cudnn
class TestNStepLSTMDropout(unittest.TestCase):
batch = 20
length = 3
in_size = 1
out_size = 1
n_layers = 2
dropout = 0.3
n_tests = 100
def setUp(self):
self.xs = [rand_vector((self.batch, self.in_size))
for _ in range(self.length)]
h_shape = (self.n_layers, self.batch, self.out_size)
self.cx = rand_vector(h_shape)
self.hx = rand_vector(h_shape)
self.ws = []
self.bs = []
for i in range(self.n_layers):
weights = []
biases = []
for j in range(8):
if i == 0 and j < 4:
w_in = self.in_size
else:
w_in = self.out_size
weights.append(rand_vector((self.out_size, w_in)))
biases.append(rand_vector((self.out_size,)))
self.ws.append(weights)
self.bs.append(biases)
def assert_count(self, actual, expect):
self.assertTrue(expect * 0.8 < actual < expect * 1.2)
@condition.retry(5)
def test_forward_dropout_count(self):
y_counts = [0] * self.length
h_counts = [0] * self.n_layers
c_counts = [0] * self.n_layers
for _ in range(self.n_tests):
hy1, cy1, ys1 = lstm_without_dropout(
self.n_layers, self.dropout, self.hx, self.cx, self.ws,
self.bs, self.xs)
with chainer.using_config('use_cudnn', self.use_cudnn):
hy2, cy2, ys2 = F.n_step_lstm(
self.n_layers, self.dropout, self.hx, self.cx, self.ws,
self.bs, self.xs)
for i in range(self.length):
y_counts[i] += count_close(ys1[i].data, ys2[i].data)
for i in range(self.n_layers):
h_counts[i] += count_close(hy1[i].data, hy2[i].data)
c_counts[i] += count_close(cy1[i].data, cy2[i].data)
total = self.batch * self.n_tests
for i in range(self.length):
self.assert_count(
y_counts[i],
total * (1 - self.dropout) ** ((self.n_layers - 1) * (i + 1)))
for i in range(self.n_layers):
self.assert_count(
h_counts[i], total * (1 - self.dropout) ** (self.length * i))
self.assert_count(
c_counts[i], total * (1 - self.dropout) ** (self.length * i))
testing.run_module(__name__, __file__)
| 14,662
| 31.657016
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/rnn_tests/test_function_n_step_gru.py
|
import numpy
import chainer
import chainer.functions as F
from chainer import testing
from chainer.testing import backend
def sigmoid(x):
return numpy.tanh(x * 0.5) * 0.5 + 0.5
def array(shape, dtype):
return numpy.random.uniform(-1, 1, shape).astype(dtype)
@testing.parameterize(*testing.product_dict(
[
{'n_layers': 1, 'hidden_size': 2,
'input_size': 1, 'batches': (1, 1, 1)},
{'n_layers': 2, 'hidden_size': 2,
'input_size': 3, 'batches': (3, 2, 1)},
{'n_layers': 4, 'hidden_size': 6,
'input_size': 3, 'batches': (5, 3, 1)},
{'n_layers': 5, 'hidden_size': 10,
'input_size': 6, 'batches': (6, 5, 3)},
]))
@testing.fix_random()
@backend.inject_backend_tests(
None,
# ChainerX tests
testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
# CPU tests
+ testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product([
[{'use_cuda': True}],
# Without cuDNN
testing.product({
'use_cudnn': ['never'],
})
# With cuDNN
+ testing.product({
'use_cudnn': ['always'],
'cudnn_deterministic': [True, False],
'autotune': [True, False],
})]))
class TestNStepGRU(testing.FunctionTestCase):
dodge_nondifferentiable = True
def setUp(self):
self.skip_double_backward_test = True
def generate_inputs(self):
h_shape = (self.n_layers, self.batches[0], self.hidden_size)
dtype = numpy.float32
h = array(h_shape, dtype)
in_size = self.input_size
out_size = self.hidden_size
xs = [array((self.batches[b], in_size), dtype)
for b in range(len(self.batches))]
def w_in(i, j):
return in_size if i == 0 and j < 3 else out_size
inputs = []
inputs.append(h)
for i in range(len(self.batches)):
inputs.append(xs[i])
for n in range(self.n_layers):
for i in range(6):
inputs.append(array((out_size, w_in(n, i)), dtype))
for i in range(6):
inputs.append(array((out_size,), dtype))
return tuple(inputs)
def process_inputs(self, inputs):
h = inputs[0]
xs = inputs[1:1 + len(self.batches)]
ws = []
bs = []
index = 1 + len(self.batches)
for n in range(self.n_layers):
ws.append(inputs[index: index + 6])
bs.append(inputs[index + 6: index + 12])
index += 12
return h, ws, bs, xs
def forward(self, inputs, device):
h, ws, bs, xs = self.process_inputs(inputs)
if h.array.dtype == numpy.float64:
with chainer.using_config('use_cudnn', 'never'):
out = F.n_step_gru(self.n_layers, 0.0, h, ws, bs, xs)
else:
out = F.n_step_gru(self.n_layers, 0.0, h, ws, bs, xs)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
def forward_expected(self, inputs):
h, ws, bs, xs = self.process_inputs(inputs)
e_hy = h.copy()
ys = []
for ind in range(len(xs)):
x = xs[ind]
batch = x.shape[0]
for layer in range(self.n_layers):
w = ws[layer]
b = bs[layer]
h_prev = e_hy[layer, :batch]
# GRU
z = sigmoid(x.dot(w[1].T) + h_prev.dot(w[4].T) + b[1] + b[4])
r = sigmoid(x.dot(w[0].T) + h_prev.dot(w[3].T) + b[0] + b[3])
h_bar = numpy.tanh(x.dot(w[2].T) +
r *
((h_prev).dot(w[5].T) + b[5]) + b[2])
e_h = (1 - z) * h_bar + z * h_prev
e_hy[layer, :batch] = e_h
x = e_h
ys.append(x)
rets = []
rets.append(e_hy)
for i in range(len(ys)):
rets.append(ys[i])
return tuple(rets)
@testing.parameterize(*testing.product_dict(
[
{'n_layers': 1, 'hidden_size': 2,
'input_size': 1, 'batches': (1, 1, 1)},
{'n_layers': 2, 'hidden_size': 2,
'input_size': 3, 'batches': (3, 2, 1)},
{'n_layers': 4, 'hidden_size': 6,
'input_size': 3, 'batches': (5, 3, 1)},
{'n_layers': 5, 'hidden_size': 10,
'input_size': 6, 'batches': (6, 5, 3)},
]))
@testing.fix_random()
@backend.inject_backend_tests(
None,
# ChainerX tests
testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
# CPU tests
+ testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product([
[{'use_cuda': True}],
# Without cuDNN
testing.product({
'use_cudnn': ['never'],
})
# With cuDNN
+ testing.product({
'use_cudnn': ['always'],
'cudnn_deterministic': [True, False],
'autotune': [True, False],
})]))
class TestNStepBiGRU(testing.FunctionTestCase):
dodge_nondifferentiable = True
def setUp(self):
self.skip_double_backward_test = True
def generate_inputs(self):
h_shape = (self.n_layers * 2, self.batches[0], self.hidden_size)
dtype = numpy.float32
h = array(h_shape, dtype)
in_size = self.input_size
out_size = self.hidden_size
xs = [array((self.batches[b], in_size), dtype)
for b in range(len(self.batches))]
def w_in(i, j):
if i == 0 and j < 3:
return in_size
elif i > 0 and j < 3:
return out_size * 2
else:
return out_size
inputs = []
inputs.append(h)
for i in range(len(self.batches)):
inputs.append(xs[i])
for n in range(self.n_layers):
for direction in (0, 1):
for i in range(6):
inputs.append(array((out_size, w_in(n, i)), dtype))
for i in range(6):
inputs.append(array((out_size,), dtype))
return tuple(inputs)
def process_inputs(self, inputs):
h = inputs[0]
xs = inputs[1:1 + len(self.batches)]
ws = []
bs = []
index = 1 + len(self.batches)
for n in range(self.n_layers):
ws.append(inputs[index: index + 6])
bs.append(inputs[index + 6: index + 12])
ws.append(inputs[index + 12: index + 18])
bs.append(inputs[index + 18: index + 24])
index += 24
return h, ws, bs, xs
def forward(self, inputs, device):
h, ws, bs, xs = self.process_inputs(inputs)
if h.array.dtype == numpy.float64:
with chainer.using_config('use_cudnn', 'never'):
out = F.n_step_bigru(self.n_layers, 0.0, h, ws, bs, xs)
else:
out = F.n_step_bigru(self.n_layers, 0.0, h, ws, bs, xs)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
def forward_expected(self, inputs):
h, ws, bs, xs = self.process_inputs(inputs)
xs_next = xs
e_hy = h.copy()
for layer in range(self.n_layers):
# forward
di = 0
xf = []
layer_idx = layer * 2 + di
w = ws[layer_idx]
b = bs[layer_idx]
for ind in range(len(xs)):
x = xs_next[ind]
batch = x.shape[0]
h_prev = e_hy[layer_idx, :batch]
# GRU
z = sigmoid(x.dot(w[1].T) + h_prev.dot(w[4].T) + b[1] + b[4])
r = sigmoid(x.dot(w[0].T) + h_prev.dot(w[3].T) + b[0] + b[3])
h_bar = numpy.tanh(x.dot(w[2].T) +
r *
((h_prev).dot(w[5].T) + b[5]) + b[2])
e_h = (1 - z) * h_bar + z * h_prev
e_hy[layer_idx, :batch] = e_h
xf.append(e_h)
# backward
di = 1
xb = []
layer_idx = layer * 2 + di
w = ws[layer_idx]
b = bs[layer_idx]
for ind in reversed(range(len(xs))):
x = xs_next[ind]
batch = x.shape[0]
h_prev = e_hy[layer_idx, :batch]
# GRU
z = sigmoid(x.dot(w[1].T) + h_prev.dot(w[4].T) + b[1] + b[4])
r = sigmoid(x.dot(w[0].T) + h_prev.dot(w[3].T) + b[0] + b[3])
h_bar = numpy.tanh(x.dot(w[2].T) +
r *
((h_prev).dot(w[5].T) + b[5]) + b[2])
e_h = (1 - z) * h_bar + z * h_prev
e_hy[layer_idx, :batch] = e_h
xb.append(e_h)
xb.reverse()
xs_next = [numpy.concatenate([hfi, hbi], axis=1) for (hfi, hbi) in
zip(xf, xb)]
rets = []
rets.append(e_hy)
for x in xs_next:
rets.append(x)
return tuple(rets)
testing.run_module(__name__, __file__)
| 9,476
| 30.380795
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/rnn_tests/test_function_slstm.py
|
import unittest
import numpy
import six
from chainer.backends import cuda
from chainer import functions
from chainer.functions.rnn import slstm
from chainer import gradient_check
from chainer import testing
from chainer.testing import backend
def _sigmoid(x):
half = x.dtype.type(0.5)
return numpy.tanh(x * half) * half + half
def inject_backend_tests(method_names):
decorator = backend.inject_backend_tests(
method_names,
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
}) +
# GPU tests
[{'use_cuda': True}])
return decorator
@testing.parameterize(*testing.product_dict(
[
{'shape': (5, 6, 2)},
{'shape': (8, 9, 4, 5)},
{'shape': (1, 0, 5)},
], [
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
], [
{'grad_outputs': (True, True)},
{'grad_outputs': (True, False)},
{'grad_outputs': (False, True)},
], [
{'flat': True},
{'flat': False},
]
))
@testing.fix_random()
@backend.inject_backend_tests(
None,
# ChainerX tests
testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
# CPU tests
+ testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product([
[{'use_cuda': True}],
# Without cuDNN
testing.product({
'use_cudnn': ['never'],
})
# With cuDNN
+ testing.product({
'use_cudnn': ['always'],
'cudnn_deterministic': [True, False],
'autotune': [True, False],
})]))
class TestSLSTM(testing.FunctionTestCase):
dodge_nondifferentiable = True
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
def generate_inputs(self):
x_shape = []
x_shape.append(self.shape[0])
x_shape.append(4 * self.shape[1])
for i in range(2, len(self.shape)):
x_shape.append(self.shape[i])
x_shape = tuple(x_shape)
c1 = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
c2 = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
x1 = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
x2 = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
if self.flat:
return c1[..., 0], c2[..., 0], x1[..., 0], x2[..., 0],
else:
return c1, c2, x1, x2,
def forward(self, inputs, device):
c1, c2, x1, x2 = inputs
out = functions.slstm(c1, c2, x1, x2)
return out
def forward_expected(self, inputs):
c_prev1, c_prev2, x1, x2 = inputs
def _extract_gates(x):
r = x.reshape((x.shape[0], x.shape[1] // 4, 4) + x.shape[2:])
return (r[:, :, i] for i in six.moves.range(4))
a1_in, i1_in, f1_in, o1_in = _extract_gates(x1)
a2_in, i2_in, f2_in, o2_in = _extract_gates(x2)
c_expect = _sigmoid(i1_in) * numpy.tanh(a1_in) + \
_sigmoid(i2_in) * numpy.tanh(a2_in) + \
_sigmoid(f1_in) * c_prev1 + \
_sigmoid(f2_in) * c_prev2
h_expect = _sigmoid(o1_in + o2_in) * numpy.tanh(c_expect)
return c_expect, h_expect
def generate_grad_outputs(self, outputs_template):
grad_out = []
c = outputs_template[0]
h = outputs_template[1]
c_shape = c.shape
h_shape = h.shape
if self.grad_outputs[0] is True:
grad_out.append(numpy.random.uniform(-1, 1,
h_shape).astype(h.dtype))
else:
grad_out.append(None)
if self.grad_outputs[1] is True:
grad_out.append(numpy.random.uniform(-1, 1,
c_shape).astype(c.dtype))
else:
grad_out.append(None)
return tuple(grad_out)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@inject_backend_tests(['test_backward'])
class TestSLSTMGrad(unittest.TestCase):
def setUp(self):
c_prev1 = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
c_prev2 = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
x1 = numpy.random.uniform(-1, 1, (3, 8, 4)).astype(self.dtype)
x2 = numpy.random.uniform(-1, 1, (3, 8, 4)).astype(self.dtype)
c_next = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
gc = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
gh = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
ggc_prev1 = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
ggc_prev2 = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
ggx1 = numpy.random.uniform(-1, 1, (3, 8, 4)).astype(self.dtype)
ggx2 = numpy.random.uniform(-1, 1, (3, 8, 4)).astype(self.dtype)
self.inputs = [c_prev1, c_prev2, x1, x2, c_next, gc, gh]
self.grad_outputs = [ggc_prev1, ggc_prev2, ggx1, ggx2]
self.check_backward_options = {'dtype': numpy.float64}
if self.dtype == numpy.float16:
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-3, 'rtol': 1e-2}
def check_backward(self, inputs, grad_outputs, backend_config):
if backend_config.use_cuda:
inputs = cuda.to_gpu(inputs)
grad_outputs = cuda.to_gpu(grad_outputs)
with backend_config:
gradient_check.check_backward(
slstm.SLSTMGrad(), inputs, grad_outputs,
**self.check_backward_options)
def test_backward(self, backend_config):
self.check_backward(self.inputs, self.grad_outputs, backend_config)
testing.run_module(__name__, __file__)
| 6,206
| 31.328125
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_floor.py
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestFloor(testing.FunctionTestCase):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
while True:
x = numpy.random.uniform(
-10.0, 10.0, self.shape).astype(self.dtype)
if (numpy.abs(x - numpy.round(x)) > 1e-2).all():
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.floor(x)
return y,
def forward_expected(self, inputs):
x, = inputs
expected = numpy.floor(x)
expected = numpy.asarray(expected)
return expected,
testing.run_module(__name__, __file__)
| 1,223
| 21.666667
| 60
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_sqrt.py
|
import unittest
import numpy
import chainer.functions as F
from chainer import testing
# sqrt
def make_data(shape, dtype):
x = numpy.random.uniform(0.1, 5, shape).astype(dtype)
gy = numpy.random.uniform(-1, 1, shape).astype(dtype)
ggx = numpy.random.uniform(-1, 1, shape).astype(dtype)
return x, gy, ggx
@testing.unary_math_function_unittest(
F.sqrt,
make_data=make_data,
backward_options={'eps': 1e-3},
double_backward_options={'eps': 1e-3},
)
class TestSqrt(unittest.TestCase):
pass
# rsqrt
def rsqrt(x, dtype):
return numpy.reciprocal(numpy.sqrt(x, dtype=dtype), dtype=dtype)
@testing.unary_math_function_unittest(
F.rsqrt,
func_expected=rsqrt,
make_data=make_data,
forward_options={'atol': 1e-2},
backward_options={'eps': 1e-2, 'atol': 1e-2, 'rtol': 1e-2},
double_backward_options={'eps': 1e-2, 'atol': 1e-1, 'rtol': 1e-1},
)
class TestRsqrt(unittest.TestCase):
pass
testing.run_module(__name__, __file__)
| 992
| 20.12766
| 70
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_average.py
|
import unittest
import numpy
import six
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer import utils
@testing.parameterize(*(
testing.product({
'shape': [(3, 2, 4)],
'axis': [None, 0, 1, 2, -1, (0, 1), (1, -1)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_weights': [True, False],
'keepdims': [True, False],
'use_variable_method': [True, False],
}) +
testing.product({
'shape': [()],
'axis': [None],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_weights': [True, False],
'keepdims': [True, False],
'use_variable_method': [True, False],
})))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
}))
class TestAverage(testing.FunctionTestCase):
def setUp(self):
self.skip_double_backward_test = True
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 5e-3, 'rtol': 5e-3})
self.check_backward_options.update({'atol': 1e-2, 'rtol': 1e-1})
else:
self.check_backward_options.update({'atol': 1e-2, 'rtol': 1e-2})
def before_test(self, test_name):
if self.use_weights and isinstance(self.axis, tuple):
# This condition is not supported
raise unittest.SkipTest(
'Tuple axis is not supported when weights is given')
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
if self.axis is None:
w_shape = self.shape
elif isinstance(self.axis, int):
axis = self.axis
if axis < 0:
ndim = len(self.shape)
axis += ndim
w_shape = self.shape[axis],
else:
w_shape = tuple(self.shape[a] for a in self.axis)
# Sample weights. Weights should not sum to 0.
while True:
w = numpy.random.uniform(-2, 2, w_shape).astype(self.dtype)
w_sum_eps = 1.0 if self.dtype == numpy.float16 else 5e-2
if abs(w.sum()) > w_sum_eps:
break
return x, w
def forward(self, inputs, device):
x, w = inputs
if not self.use_weights:
w = None
if self.use_variable_method:
y = x.mean(axis=self.axis, weights=w, keepdims=self.keepdims)
else:
y = functions.average(
x, axis=self.axis, weights=w, keepdims=self.keepdims)
return y,
def forward_expected(self, inputs):
x, w = inputs
if not self.use_weights:
w = None
y_expect = numpy.average(x, axis=self.axis, weights=w)
if self.keepdims:
# numpy.average does not support keepdims
axis = self.axis
if axis is None:
axis = list(six.moves.range(x.ndim))
elif isinstance(axis, int):
axis = axis,
shape = list(x.shape)
for i in six.moves.range(len(shape)):
if i in axis or i - len(shape) in axis:
shape[i] = 1
y_expect = y_expect.reshape(shape)
y_expect = utils.force_array(y_expect, dtype=self.dtype)
return y_expect,
@testing.parameterize(*(
testing.product({
'shape': [(30, 20, 40)],
'axis': [None, 0, 1, 2, -1, (0, 1), (1, -1)],
'dtype': [numpy.float16],
'use_weights': [False], # np.average overflows when `weights` is used
'keepdims': [True, False],
})
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
}))
@attr.slow
@testing.with_requires('numpy>=1.12') # NumPy #8222
class TestAverageOverflowingSum(testing.FunctionTestCase):
def setUp(self):
self.check_forward_options.update({'atol': 1e-2, 'rtol': 2e-3})
self.check_backward_options.update({'atol': 1e-2, 'rtol': 1e-2})
self.check_double_backward_options.update({'atol': 1e-2, 'rtol': 1e-2})
def generate_inputs(self):
x = numpy.random.uniform(3000, 7000, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.average(
x, self.axis, keepdims=self.keepdims)
return y,
def forward_expected(self, inputs):
x, = inputs
y_expect = numpy.mean(
x.astype(numpy.float64), self.axis, keepdims=self.keepdims
).astype(self.dtype)
return utils.force_array(y_expect),
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestAverageDuplicateValueInAxis(unittest.TestCase):
def test_duplicate_value(self):
x = numpy.random.uniform(-1, 1, 24).reshape(2, 3, 4).astype(self.dtype)
with self.assertRaises(ValueError):
functions.average(x, axis=(0, 0))
def test_duplicate_value_negative(self):
x = numpy.random.uniform(-1, 1, 24).reshape(2, 3, 4).astype(self.dtype)
with self.assertRaises(ValueError):
functions.average(x, axis=(1, -2))
def test_weights_and_axis(self):
x = numpy.random.uniform(-1, 1, 24).reshape(2, 3, 4).astype(self.dtype)
w = numpy.random.uniform(-1, 1, 6).reshape(2, 3).astype(self.dtype)
with self.assertRaises(ValueError):
functions.average(x, axis=(0, 1), weights=w)
testing.run_module(__name__, __file__)
| 6,055
| 30.378238
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_minimum.py
|
import unittest
import numpy
import chainer
from chainer import functions
from chainer import testing
from chainer.utils import type_check
@testing.parameterize(*testing.product({
'shape': [
# x1, x2, y
((3, 2), (3, 2), (3, 2)),
((), (), ()),
((3, 2), (3, 1), (3, 2)),
((2,), (3, 2), (3, 2)),
],
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestMinimum(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
eps = 1e-2
self.check_forward_options.update({'atol': 1e-4, 'rtol': 1e-3})
self.check_backward_options.update({
'atol': 1e-2, 'rtol': 1e-2})
self.check_double_backward_options.update({
'atol': 1e-2, 'rtol': 1e-2})
else:
eps = 1e-3
self.check_backward_options['eps'] = eps
self.check_double_backward_options['eps'] = eps
def generate_inputs(self):
x1_shape, x2_shape, y_shape = self.shape
x1 = numpy.random.uniform(-1, 1, x1_shape).astype(self.dtype)
x2 = numpy.random.uniform(-1, 1, x2_shape).astype(self.dtype)
return x1, x2
def forward(self, inputs, device):
x1, x2 = inputs
return functions.minimum(x1, x2),
def forward_expected(self, inputs):
x1, x2 = inputs
expected = numpy.minimum(x1, x2)
expected = numpy.asarray(expected)
return expected.astype(self.dtype),
@testing.parameterize(*testing.product({
'dtype1': [numpy.float16, numpy.float32, numpy.float64],
'dtype2': [numpy.float16, numpy.float32, numpy.float64]
}))
class TestMinimumInconsistentTypes(unittest.TestCase):
def test_minimum_inconsistent_types(self):
if self.dtype1 == self.dtype2:
return
x1_data = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype1)
x2_data = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype2)
x1 = chainer.Variable(x1_data)
x2 = chainer.Variable(x2_data)
with self.assertRaises(type_check.InvalidType):
functions.minimum(x1, x2)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))
class TestMinimumInconsistentShapes(unittest.TestCase):
def test_minimum_inconsistent_shapes(self):
x1_data = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype)
x2_data = numpy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
x1 = chainer.Variable(x1_data)
x2 = chainer.Variable(x2_data)
with self.assertRaises(type_check.InvalidType):
functions.minimum(x1, x2)
testing.run_module(__name__, __file__)
| 3,220
| 29.67619
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_sign.py
|
import numpy
from chainer import functions
from chainer import testing
from chainer import utils
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestSign(testing.FunctionTestCase):
skip_backward_test = True
skip_double_backward_test = True
def setUp(self):
self.check_forward_options.update({'atol': 1e-7, 'rtol': 1e-7})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
# Avoid non-differentiable point
x[(abs(x) < 1e-2)] = 1
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.sign(x)
return y,
def forward_expected(self, inputs):
x, = inputs
expected = numpy.sign(x)
expected = utils.force_array(expected)
return expected,
testing.run_module(__name__, __file__)
| 1,375
| 22.724138
| 71
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_exponential_m1.py
|
import numpy
from chainer import functions
from chainer import testing
from chainer import utils
@testing.parameterize(*testing.product({
'dtype': [numpy.float32],
'shape': [(), (3, 2)],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class Expm1FunctionTest(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options.update(
{'atol': 1e-3, 'rtol': 1e-2})
self.check_double_backward_options.update(
{'atol': 1e-3, 'rtol': 1e-2})
def generate_inputs(self):
x = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.expm1(x),
def forward_expected(self, inputs):
x, = inputs
expected = numpy.expm1(x)
expected = utils.force_array(expected)
return expected,
testing.run_module(__name__, __file__)
| 1,234
| 21.87037
| 70
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_bias.py
|
import unittest
import numpy
import chainer
from chainer import functions
from chainer import testing
from chainer import utils
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestBias(testing.FunctionTestCase):
skip_double_backward_test = True
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
return x1, x2
def forward(self, inputs, device):
x1, x2 = inputs
axis = 1
return functions.bias(x1, x2, axis),
def forward_expected(self, inputs):
x1, x2 = inputs
expected = numpy.copy(x1)
for i, j, k in numpy.ndindex(expected.shape):
expected[i, j, k] += x2[j]
expected = utils.force_array(expected)
return expected,
class TestBiasInvalidShape(unittest.TestCase):
def test_bias_invalid_shape(self):
x1 = chainer.Variable(numpy.zeros((3, 2, 3), numpy.float32))
x2 = chainer.Variable(numpy.zeros((2), numpy.float32))
axis = 0
with chainer.using_config('debug', True):
with self.assertRaises(AssertionError):
functions.bias(x1, x2, axis)
testing.run_module(__name__, __file__)
| 1,580
| 23.703125
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_logarithm_1p.py
|
import numpy
from chainer import functions
from chainer import testing
from chainer.utils import force_array
@testing.parameterize(*testing.product({
'shape': [(), (3, 2)],
'dtype': [numpy.float32]
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class Log1pFunctionTest(testing.FunctionTestCase):
def generate_inputs(self):
x = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.log1p(x),
def forward_expected(self, inputs):
x, = inputs
expected = numpy.log1p(x)
expected = force_array(expected)
return expected,
testing.run_module(__name__, __file__)
| 1,038
| 20.645833
| 70
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_minmax.py
|
import unittest
import numpy
import chainer
from chainer import functions
from chainer import testing
from chainer import utils
@testing.parameterize(*testing.product({
'function_name': ['max', 'min'],
'shape': [(3, 2, 4)],
'dtype': [numpy.float32],
'axis': [
None,
0, 1, 2, # axis
-1, # negative_axis
(0, 1), # multi_axis
(1, 0), # multi_axis_invert
(0, -1), # negative_multi_axis
(-2, 0), # negative_multi_axis_invert
],
'keepdims': [True, False],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestMinMax(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options.update({
'eps': 1e-5, 'atol': 1e-3, 'rtol': 1e-2})
self.check_double_backward_options.update({
'eps': 1e-5, 'atol': 1e-3, 'rtol': 1e-2})
def generate_inputs(self):
eps = 1e-5
# Sample x with single maximum/minimum value
while True:
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
if self.function_name == 'max':
y = x.max(axis=self.axis, keepdims=True)
if not numpy.all((x > y - 2 * eps).sum(axis=self.axis) == 1):
continue
elif self.function_name == 'min':
y = x.min(axis=self.axis, keepdims=True)
if not numpy.all((x < y + 2 * eps).sum(axis=self.axis) == 1):
continue
return x,
def forward(self, inputs, device):
x, = inputs
function = getattr(functions, self.function_name)
y = function(x, axis=self.axis, keepdims=self.keepdims)
return y,
def forward_expected(self, inputs):
x, = inputs
function = getattr(numpy, 'a' + self.function_name)
expected = function(x, axis=self.axis, keepdims=self.keepdims)
expected = utils.force_array(expected)
return expected,
@testing.parameterize(*testing.product({
'function_name': ['max', 'min'],
}))
class TestMinMaxInvalid(unittest.TestCase):
def setUp(self):
self.function = getattr(functions, self.function_name)
self.x = numpy.array([1], dtype=numpy.float32)
def test_invalid_axis_type(self):
with self.assertRaises(TypeError):
self.function(self.x, [0])
def test_invalid_axis_type_in_tuple(self):
with self.assertRaises(TypeError):
self.function(self.x, (1, 'x'))
def test_duplicate_axis(self):
with self.assertRaises(ValueError):
self.function(self.x, (0, 0))
def test_pos_neg_duplicate_axis(self):
x_data = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(numpy.float32)
x = chainer.Variable(x_data)
with self.assertRaises(ValueError):
self.function(x, axis=(1, -2))
@testing.parameterize(*testing.product({
'function_name': ['argmax', 'argmin'],
'axis': [None, 0, 1, 2, -1, -2, -3],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'shape': [(3, 2, 4)],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestArgMinMax(testing.FunctionTestCase):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
function = getattr(functions, self.function_name)
y = function(x, axis=self.axis)
y = functions.cast(y, numpy.int64)
return y,
def forward_expected(self, inputs):
x, = inputs
function = getattr(numpy, self.function_name)
expected = function(x, axis=self.axis)
expected = utils.force_array(expected)
return expected,
@testing.parameterize(*testing.product({
'function_name': ['argmax', 'argmin'],
}))
class TestArgMinMaxInvalid(unittest.TestCase):
def setUp(self):
self.function = getattr(functions, self.function_name)
self.x = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(numpy.float32)
def test_invalid_axis_type(self):
with self.assertRaises(TypeError):
self.function(self.x, [0])
def test_invalid_axis_type_in_tuple(self):
with self.assertRaises(TypeError):
self.function(self.x, (1, 'x'))
testing.run_module(__name__, __file__)
| 5,027
| 27.247191
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_digamma.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
def _digamma_cpu(x, dtype):
from scipy import special
return numpy.vectorize(special.digamma, otypes=[dtype])(x)
def _digamma_gpu(x, dtype):
return cuda.to_gpu(_digamma_cpu(cuda.to_cpu(x), dtype))
def _digamma_expected(x, dtype):
if backend.get_array_module(x) is numpy:
return _digamma_cpu(x, dtype)
else:
return _digamma_gpu(x, dtype)
def make_data(shape, dtype):
x = numpy.random.uniform(1., 10., shape).astype(dtype)
gy = numpy.random.uniform(-1., 1., shape).astype(dtype)
ggx = numpy.random.uniform(-1., 1., shape).astype(dtype)
return x, gy, ggx
@testing.unary_math_function_unittest(
F.digamma,
func_expected=_digamma_expected,
make_data=make_data,
backward_options={'eps': 1e-3, 'atol': 5e-2, 'rtol': 1e-4,
'dtype': numpy.float64},
double_backward_options={'eps': 1e-3, 'atol': 5e-2, 'rtol': 1e-4,
'dtype': numpy.float64}
)
@testing.with_requires('scipy')
class TestDiGamma(unittest.TestCase):
pass
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))
@testing.without_requires('scipy')
class TestDiGammaExceptions(unittest.TestCase):
def setUp(self):
self.x, self.gy, self.ggx = make_data(self.shape, self.dtype)
self.func = F.digamma
def check_forward(self, x_data):
x = chainer.Variable(x_data)
with self.assertRaises(ImportError):
self.func(x)
def test_forward_cpu(self):
self.check_forward(self.x)
testing.run_module(__name__, __file__)
| 1,801
| 25.115942
| 69
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_sparse_matmul.py
|
import unittest
import numpy
import chainer
from chainer import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer import utils
from chainer.utils import type_check
_scipy_available = True
try:
from scipy import sparse # NOQA
except ImportError:
_scipy_available = False
def _setup_tensor(_min, _max, shape, dtype, threshold=None):
y = numpy.random.uniform(_min, _max, shape).astype(dtype)
if threshold is not None:
y[y < threshold] = 0
return y
@testing.parameterize(*testing.product_dict(
[
{'m': 2, 'n': 3, 'k': 4},
{'m': 3, 'n': 4, 'k': 2},
],
[
{'transa': False}, {'transa': True},
],
[
{'transb': False}, {'transb': True},
],
[
{'nbatch': 0}, {'nbatch': 1}, {'nbatch': 4},
],
[
{'a_dtype': numpy.float16},
{'a_dtype': numpy.float32},
{'a_dtype': numpy.float64},
],
[
{'b_dtype': numpy.float16},
{'b_dtype': numpy.float32},
{'b_dtype': numpy.float64},
]
))
class TestCooMatMul(unittest.TestCase):
def setUp(self):
a_shape = self._set_shape([self.m, self.k], self.transa)
b_shape = self._set_shape([self.k, self.n], self.transb)
c_shape = self._set_shape([self.m, self.n], False)
self.c_dtype = numpy.result_type(self.a_dtype, self.b_dtype)
self.a = _setup_tensor(.5, 1, a_shape, self.a_dtype, .75)
self.b = _setup_tensor(.5, 1, b_shape, self.b_dtype, .75)
self.gc = _setup_tensor(-1, 1, c_shape, self.c_dtype)
self.gga = _setup_tensor(.5, 1, a_shape, self.a_dtype)
self.gga[numpy.where(self.a < .75)] = 0
self.ggb = _setup_tensor(.5, 1, b_shape, self.b_dtype)
self.ggb[numpy.where(self.b < .75)] = 0
self.forward_answer = self._matmul(self.a, self.b)
def _set_shape(self, shape, trans):
if trans:
shape = [shape[1], shape[0]]
if self.nbatch > 0:
shape = [self.nbatch, shape[0], shape[1]]
return shape
def _matmul(self, a, b):
if self.transa:
a = a.swapaxes(-1, -2)
if self.transb:
b = b.swapaxes(-1, -2)
if hasattr(numpy, 'matmul'):
return numpy.matmul(a, b)
elif a.ndim == 2:
return numpy.dot(a, b)
else:
return numpy.einsum('...ij,...jk->...ik', a, b)
#
# SPDN: sparse A * dense B
#
def check_SPDN_forward(self, a_data, b_data, atol=1e-4, rtol=1e-5):
sp_a = utils.to_coo(a_data, requires_grad=True)
b = chainer.Variable(b_data)
c = F.sparse_matmul(sp_a, b, transa=self.transa, transb=self.transb)
testing.assert_allclose(self.forward_answer, c.data, atol, rtol)
def test_SPDN_sparse_matmul_forward_cpu(self):
if not _scipy_available:
return
if self.a_dtype == numpy.float16 or self.b_dtype == numpy.float16:
self.check_SPDN_forward(self.a, self.b, atol=1e-3, rtol=1e-3)
else:
self.check_SPDN_forward(self.a, self.b)
@attr.gpu
def test_SPDN_sparse_matmul_forward_gpu(self):
a = cuda.to_gpu(self.a)
b = cuda.to_gpu(self.b)
if self.a_dtype == numpy.float16 or self.b_dtype == numpy.float16:
self.check_SPDN_forward(a, b, atol=1e-3, rtol=1e-3)
else:
self.check_SPDN_forward(a, b)
def check_SPDN_backward(self, a_data, b_data, c_grad, atol, rtol):
sp_a = utils.to_coo(a_data)
func = F.math.sparse_matmul.CooMatMul(
sp_a.row, sp_a.col, sp_a.shape, sp_a.order,
transa=self.transa, transb=self.transb, transc=False)
def op(a, b):
return func.apply((a, b))[0]
gradient_check.check_backward(
op, (sp_a.data.data, b_data), c_grad, atol=atol, rtol=rtol,
dtype=numpy.float32)
def test_SPDN_sparse_matmul_backward_cpu(self):
if not _scipy_available:
return
self.check_SPDN_backward(
self.a, self.b, self.gc, atol=1e-2, rtol=1e-2)
@attr.gpu
def test_SPDN_sparse_matmul_backward_gpu(self):
self.check_SPDN_backward(
cuda.to_gpu(self.a), cuda.to_gpu(self.b),
cuda.to_gpu(self.gc), atol=1e-2, rtol=1e-2)
def check_SPDN_double_backward(
self, a_data, b_data, c_grad, a_grad_grad, b_grad_grad,
atol, rtol):
sp_a = utils.to_coo(a_data)
sp_gga = utils.to_coo(a_grad_grad)
func = F.math.sparse_matmul.CooMatMul(
sp_a.row, sp_a.col, sp_a.shape, sp_a.order,
transa=self.transa, transb=self.transb, transc=False)
def op(a, b):
return func.apply((a, b))[0]
gradient_check.check_double_backward(
op, (sp_a.data.data, b_data),
c_grad, (sp_gga.data.data, b_grad_grad),
atol=atol, rtol=rtol, dtype=numpy.float32)
def test_SPDN_sparse_matmul_double_backward_cpu(self):
if not _scipy_available:
return
self.check_SPDN_double_backward(
self.a, self.b, self.gc, self.gga, self.ggb,
atol=1e-2, rtol=1e-2)
@attr.gpu
def test_SPDN_sparse_matmul_double_backward_gpu(self):
self.check_SPDN_double_backward(
cuda.to_gpu(self.a), cuda.to_gpu(self.b),
cuda.to_gpu(self.gc), cuda.to_gpu(self.gga),
cuda.to_gpu(self.ggb), atol=1e-2, rtol=1e-2)
#
# DNSP: dense A * sparse B
#
def check_DNSP_forward(self, a_data, b_data, atol=1e-4, rtol=1e-5):
a = chainer.Variable(a_data)
sp_b = utils.to_coo(b_data, requires_grad=True)
c = F.sparse_matmul(a, sp_b, transa=self.transa, transb=self.transb)
testing.assert_allclose(self.forward_answer, c.data, atol, rtol)
def test_DNSP_sparse_matmul_forward_cpu(self):
if not _scipy_available:
return
if self.a_dtype == numpy.float16 or self.b_dtype == numpy.float16:
self.check_DNSP_forward(self.a, self.b, atol=1e-3, rtol=1e-3)
else:
self.check_DNSP_forward(self.a, self.b)
@attr.gpu
def test_DNSP_sparse_matmul_forward_gpu(self):
a = cuda.to_gpu(self.a)
b = cuda.to_gpu(self.b)
if self.a_dtype == numpy.float16 or self.b_dtype == numpy.float16:
self.check_DNSP_forward(a, b, atol=1e-3, rtol=1e-3)
else:
self.check_DNSP_forward(a, b)
def check_DNSP_backward(self, a_data, b_data, c_grad, atol, rtol):
sp_b = utils.to_coo(b_data)
func = F.math.sparse_matmul.CooMatMul(
sp_b.row, sp_b.col, sp_b.shape, sp_b.order,
transa=not self.transb, transb=not self.transa, transc=True)
def op(b, a):
return func.apply((b, a))[0]
gradient_check.check_backward(
op, (sp_b.data.data, a_data), c_grad, atol=atol, rtol=rtol,
dtype=numpy.float32)
def test_DNSP_tensordot_backward_cpu(self):
if not _scipy_available:
return
self.check_DNSP_backward(
self.a, self.b, self.gc, atol=1e-2, rtol=1e-2)
@attr.gpu
def test_DNSP_tensordot_backward_gpu(self):
self.check_DNSP_backward(
cuda.to_gpu(self.a), cuda.to_gpu(self.b),
cuda.to_gpu(self.gc), atol=1e-2, rtol=1e-2)
def check_DNSP_double_backward(
self, a_data, b_data, c_grad, a_grad_grad, b_grad_grad,
atol, rtol):
sp_b = utils.to_coo(b_data)
sp_ggb = utils.to_coo(b_grad_grad)
func = F.math.sparse_matmul.CooMatMul(
sp_b.row, sp_b.col, sp_b.shape, sp_b.order,
transa=not self.transb, transb=not self.transa, transc=True)
def op(b, a):
return func.apply((b, a))[0]
gradient_check.check_double_backward(
op, (sp_b.data.data, a_data),
c_grad, (sp_ggb.data.data, a_grad_grad),
atol=atol, rtol=rtol, dtype=numpy.float32)
def test_DNSP_sparse_matmul_double_backward_cpu(self):
if not _scipy_available:
return
self.check_DNSP_double_backward(
self.a, self.b, self.gc, self.gga, self.ggb,
atol=1e-2, rtol=1e-2)
@attr.gpu
def test_DNSP_sparse_matmul_double_backward_gpu(self):
self.check_DNSP_double_backward(
cuda.to_gpu(self.a), cuda.to_gpu(self.b),
cuda.to_gpu(self.gc), cuda.to_gpu(self.gga),
cuda.to_gpu(self.ggb), atol=1e-2, rtol=1e-2)
@testing.parameterize(*testing.product_dict(
[
{'transa': False}, {'transa': True},
],
[
{'transb': False}, {'transb': True},
],
))
class TestCooMatMulInvalid(unittest.TestCase):
def test_invalid_ndim(self):
a = _setup_tensor(.5, 1, (2, 3, 3), numpy.float32, .75)
b = _setup_tensor(.5, 1, (3, 3), numpy.float32, .75)
sp_a = utils.to_coo(a)
sp_b = utils.to_coo(b)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(sp_a, b, self.transa, self.transb)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(a, sp_b, self.transa, self.transb)
def test_invalid_nbatch(self):
a = _setup_tensor(.5, 1, (2, 3, 3), numpy.float32, .75)
b = _setup_tensor(.5, 1, (3, 3, 3), numpy.float32, .75)
sp_a = utils.to_coo(a)
sp_b = utils.to_coo(b)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(sp_a, b, self.transa, self.transb)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(a, sp_b, self.transa, self.transb)
def test_invalid_shape(self):
a = _setup_tensor(.5, 1, (1, 2, 3), numpy.float32, .75)
b = _setup_tensor(.5, 1, (1, 4, 5), numpy.float32, .75)
sp_a = utils.to_coo(a)
sp_b = utils.to_coo(b)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(sp_a, b, self.transa, self.transb)
with self.assertRaises(type_check.InvalidType):
F.sparse_matmul(a, sp_b, self.transa, self.transb)
def test_invalid_inputs(self):
a = _setup_tensor(.5, 1, (1, 3, 3), numpy.float32, .75)
b = _setup_tensor(.5, 1, (1, 3, 3), numpy.float32, .75)
sp_a = utils.to_coo(a)
sp_b = utils.to_coo(b)
with self.assertRaises(ValueError):
F.sparse_matmul(sp_a, sp_b, self.transa, self.transb)
with self.assertRaises(ValueError):
F.sparse_matmul(a, b, self.transa, self.transb)
testing.run_module(__name__, __file__)
| 10,693
| 34.528239
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_trigonometric.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'func_name': ['cos', 'sin', 'tan'],
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TrigonometricFunctionsTest(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype)
self.func = getattr(F, self.func_name)
camel_name = self.func_name[0].upper() + self.func_name[1:]
self.func_class = getattr(
chainer.functions.math.trigonometric, camel_name)
self.np_func = getattr(numpy, self.func_name)
if self.dtype == numpy.float16:
self.backward_options = {'eps': 1e-3, 'atol': 1e-2, 'rtol': 1e-2}
self.double_backward_options = {
'eps': 1e-3, 'atol': 1e-2, 'rtol': 1e-2}
else:
self.backward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.double_backward_options = {'atol': 1e-4, 'rtol': 1e-3}
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = self.func(x)
testing.assert_allclose(
self.np_func(self.x), y.data, atol=1e-4, rtol=1e-4)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
self.func, x_data, y_grad, dtype='d', **self.backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, x_grad_grad):
gradient_check.check_double_backward(
self.func, x_data, y_grad, x_grad_grad, dtype='d',
**self.double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(cuda.to_gpu(
self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
def test_label(self):
self.assertEqual(self.func_class().label, self.func_name)
def make_data(shape, dtype):
x = numpy.random.uniform(-.9, .9, shape).astype(dtype)
gy = numpy.random.uniform(-1, 1, shape).astype(dtype)
ggx = numpy.random.uniform(-.9, .9, shape).astype(dtype)
return x, gy, ggx
@testing.unary_math_function_unittest(
F.arcsin,
make_data=make_data,
forward_options={'atol': 1e-3, 'rtol': 1e-3},
double_backward_options={'eps': 1e-3},
)
class TestArcsin(unittest.TestCase):
pass
@testing.unary_math_function_unittest(
F.arccos,
make_data=make_data,
forward_options={'atol': 1e-3, 'rtol': 1e-3},
double_backward_options={'eps': 1e-3},
)
class TestArccos(unittest.TestCase):
pass
@testing.unary_math_function_unittest(F.arctan, make_data=make_data)
class TestArctan(unittest.TestCase):
pass
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestArctan2(unittest.TestCase):
def setUp(self):
self.x1 = numpy.random.uniform(
-10.0, 10.0, self.shape).astype(self.dtype)
self.x2 = numpy.random.uniform(
-10.0, 10.0, self.shape).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.ggx1 = numpy.random.uniform(
-10.0, 10.0, self.shape).astype(self.dtype)
self.ggx2 = numpy.random.uniform(
-10.0, 10.0, self.shape).astype(self.dtype)
if self.dtype == numpy.float16:
self.backward_options = {
'eps': 1e-3, 'atol': 2 ** -4, 'rtol': 2 ** -4}
self.double_backward_options = {
'eps': 1e-3, 'atol': 2 ** -4, 'rtol': 2 ** -4}
else:
self.backward_options = {
'atol': 1e-3, 'rtol': 1e-3}
self.double_backward_options = {
'atol': 1e-3, 'rtol': 1e-3}
# Avoid non-differentiable point
self.x1[(abs(self.x1) < 1e-2) & (self.x2 < 0)] = 1
self.ggx1[(abs(self.ggx1) < 1e-2) & (self.ggx2 < 0)] = 1
def check_forward(self, x1_data, x2_data):
y = F.arctan2(x1_data, x2_data)
numpy.testing.assert_array_less(
cuda.to_cpu(y.data),
numpy.full(y.shape, numpy.pi))
numpy.testing.assert_array_less(
numpy.full(y.shape, -numpy.pi),
cuda.to_cpu(y.data))
testing.assert_allclose(
numpy.arctan2(self.x1, self.x2), y.data, atol=1e-4, rtol=1e-4)
def test_forward_cpu(self):
self.check_forward(self.x1, self.x2)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x1), cuda.to_gpu(self.x2))
def check_backward(self, x1_data, x2_data, y_grad):
gradient_check.check_backward(
F.arctan2, (x1_data, x2_data), y_grad, dtype='d',
**self.backward_options)
def test_backward_cpu(self):
self.check_backward(self.x1, self.x2, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x1),
cuda.to_gpu(self.x2),
cuda.to_gpu(self.gy))
def check_double_backward(
self, x1_data, x2_data, y_grad, x1_grad_grad, x2_grad_grad):
gradient_check.check_double_backward(
F.arctan2, (x1_data, x2_data), y_grad,
(x1_grad_grad, x2_grad_grad), dtype='d',
**self.double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(
self.x1, self.x2, self.gy, self.ggx1, self.ggx2)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x1),
cuda.to_gpu(self.x2),
cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx1),
cuda.to_gpu(self.ggx2))
testing.run_module(__name__, __file__)
| 6,529
| 32.316327
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_square.py
|
import unittest
import chainer.functions as F
from chainer import testing
@testing.unary_math_function_unittest(F.square)
class TestSquare(unittest.TestCase):
pass
testing.run_module(__name__, __file__)
| 212
| 15.384615
| 47
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_cholesky.py
|
import numpy
import chainer.functions as F
from chainer import testing
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64],
'shape': [(5, 5), (1, 1)]
}))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# TODO(niboshi): Add ChainerX tests
)
class TestCholesky(testing.FunctionTestCase):
def random_matrix(self, shape, dtype, scale, sym=False):
m, n = shape[-2:]
dtype = numpy.dtype(dtype)
assert dtype.kind in 'iufc'
low_s, high_s = scale
bias = None
if dtype.kind in 'iu':
err = numpy.sqrt(m * n) / 2.
low_s += err
high_s -= err
if dtype.kind in 'u':
assert sym, (
'generating nonsymmetric matrix with uint cells is not'
' supported')
# (singular value of numpy.ones((m, n))) <= \sqrt{mn}
high_s = bias = high_s / (1 + numpy.sqrt(m * n))
assert low_s <= high_s
a = numpy.random.standard_normal(shape)
u, s, vh = numpy.linalg.svd(a)
new_s = numpy.random.uniform(low_s, high_s, s.shape)
if sym:
assert m == n
new_a = numpy.einsum('...ij,...j,...kj', u, new_s, u)
else:
new_a = numpy.einsum('...ij,...j,...jk', u, new_s, vh)
if bias is not None:
new_a += bias
if dtype.kind in 'iu':
new_a = numpy.rint(new_a)
return new_a.astype(dtype)
def setUp(self):
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-3}
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-3, 'eps': 1e-4}
self.check_double_backward_options = {
'atol': 1e-3, 'rtol': 1e-3, 'eps': 1e-4}
def generate_inputs(self):
a = self.random_matrix(self.shape, self.dtype, scale=(1e-2, 2.0),
sym=True)
return a,
def forward_expected(self, inputs):
a, = inputs
a = 0.5 * (a + a.T)
y_expect = numpy.linalg.cholesky(a)
return y_expect.astype(self.dtype),
def forward(self, inputs, device):
a, = inputs
a = 0.5 * (a + a.T)
y = F.cholesky(a)
return y,
testing.run_module(__name__, __file__)
| 2,420
| 28.52439
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_fix.py
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestFix(testing.FunctionTestCase):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
while True:
x = numpy.random.uniform(
-10.0, 10.0, self.shape).astype(self.dtype)
if (numpy.abs(x - numpy.round(x)) > 1e-2).all():
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.fix(x)
return y,
def forward_expected(self, inputs):
x, = inputs
expected = numpy.fix(x)
expected = numpy.asarray(expected)
return expected,
testing.run_module(__name__, __file__)
| 1,217
| 21.555556
| 60
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.