repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
chainer
|
chainer-master/tests/chainer_tests/optimizers_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/optimizers_tests/test_optimizers_by_linear_model.py
|
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer.backends import intel64
import chainer.functions as F
from chainer import initializers
import chainer.links as L
from chainer import optimizers
from chainer import testing
from chainer.testing import attr
from chainer.testing import backend
from chainer.testing import condition
# TODO(niboshi): This is temporary workaround for skipping test not working
# with testing.condition.
# See: https://github.com/chainer/chainer/issues/4272
class Skipped(Exception):
pass
class LinearModel(object):
UNIT_NUM = 10
BATCH_SIZE = 32
EPOCH = 100
def __init__(self, optimizer, dtype, use_placeholder):
self.dtype = dtype
weight = initializers.HeNormal(1 / numpy.sqrt(2), dtype)
bias = initializers.Constant(0, dtype)
in_size = None if use_placeholder else self.UNIT_NUM
self.model = L.Linear(in_size, 2, initialW=weight, initial_bias=bias)
self.optimizer = optimizer
# true parameters
self.w = numpy.random.uniform(
-1, 1, (self.UNIT_NUM, 1)).astype(dtype)
self.b = numpy.random.uniform(-1, 1, (1, )).astype(dtype)
def _train_linear_classifier(self, model, optimizer, backend_config):
def _make_label(x):
a = (numpy.dot(x, self.w) + self.b).reshape((self.BATCH_SIZE, ))
t = numpy.empty_like(a).astype(numpy.int32)
t[a >= 0] = 0
t[a < 0] = 1
return t
def _make_dataset(batch_size, unit_num, dtype):
x_data = numpy.random.uniform(
-1, 1, (batch_size, unit_num)).astype(dtype)
t_data = _make_label(x_data)
x_data = backend_config.get_array(x_data)
t_data = backend_config.get_array(t_data)
x = chainer.Variable(x_data)
t = chainer.Variable(t_data, requires_grad=False)
return x, t
for _ in six.moves.range(self.EPOCH):
x, t = _make_dataset(self.BATCH_SIZE, self.UNIT_NUM, self.dtype)
model.cleargrads()
y = model(x)
loss = F.softmax_cross_entropy(y, t)
loss.backward()
optimizer.update()
x_test, t_test = _make_dataset(
self.BATCH_SIZE, self.UNIT_NUM, self.dtype)
y_test = model(x_test)
return F.accuracy(y_test, t_test)
def accuracy(self, backend_config, loss_scaling=False):
model = self.model
optimizer = self.optimizer
optimizer.setup(model)
_optimizer_loss_scaling(optimizer, loss_scaling)
if backend_config.use_ideep == 'always':
if not intel64.is_ideep_available():
# TODO(niboshi): This is temporary workaround.
# See the comment on Skipped.
raise Skipped('ideep is required to run this test.')
model.to_device(backend_config.device)
with chainer.using_device(backend_config.device):
return self._train_linear_classifier(
model, optimizer, backend_config)
def _optimizer_loss_scaling(optimizer, loss_scaling):
if loss_scaling not in [False, 'dynamic', 'static']:
msg = 'loss_scaling must be False, \'dynamic\' or \'static\'.'
raise ValueError(msg)
if loss_scaling == 'dynamic':
optimizer.loss_scaling()
elif loss_scaling == 'static':
optimizer.loss_scaling(scale=10.0)
_inject_backend_tests = (
backend.inject_backend_tests(
['test_linear_model'],
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ [{'use_cuda': True}]
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
]))
class OptimizerTestBase(object):
loss_scaling = False
def create(self):
raise NotImplementedError()
def setUp(self):
self.model = LinearModel(self.create(), self.dtype,
self.use_placeholder)
def skip_loss_scaling(self, backend_config=None):
if self.loss_scaling is not False:
if self.dtype != numpy.float16:
msg = 'loss_scaling is tested when dtype is float16.'
return True, msg
if backend_config is not None and not backend_config.use_cuda:
msg = 'loss_scaling is tested when use_cuda is True.'
return True, msg
return False, None
@condition.retry(10)
def test_linear_model(self, backend_config):
skip, msg = self.skip_loss_scaling(backend_config)
if skip:
return unittest.SkipTest(msg)
try:
accuracy = self.model.accuracy(backend_config,
self.loss_scaling)
except Skipped:
# TODO(niboshi): This is temporary workaround.
# See the comment on Skipped.
return
with backend_config:
assert accuracy.data > 0.9
@attr.multi_gpu(2)
@condition.retry(10)
def test_linear_model_multi_gpu(self):
backend_config = backend.BackendConfig(
{'use_cuda': True, 'cuda_device': 1})
skip, msg = self.skip_loss_scaling(backend_config)
if skip:
return unittest.SkipTest(msg)
with cuda.Device(0):
accuracy = self.model.accuracy(backend_config)
self.assertGreater(cuda.to_cpu(accuracy.data), 0.9)
@attr.multi_gpu(2)
def test_model_setup_multi_gpu(self):
skip, msg = self.skip_loss_scaling()
if skip:
return unittest.SkipTest(msg)
with cuda.Device(0):
model = self.model.model
optimizer = self.model.optimizer
with testing.assert_warns(DeprecationWarning):
model.to_gpu(1)
optimizer.setup(model)
_optimizer_loss_scaling(optimizer, self.loss_scaling)
# Initialize the optimizer state by running an update
for param in optimizer.target.params(False):
param.cleargrad()
param.update()
for v in six.itervalues(param.update_rule.state):
self.assertEqual(int(param.data.device), int(v.device))
def test_initialize(self):
skip, msg = self.skip_loss_scaling()
if skip:
return unittest.SkipTest(msg)
model = self.model.model
assert isinstance(model, chainer.Link)
optimizer = self.create()
optimizer.setup(model)
_optimizer_loss_scaling(optimizer, self.loss_scaling)
msg = 'optimization target must be a link'
with six.assertRaisesRegex(self, TypeError, msg):
optimizer.setup('xxx')
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_placeholder': [False, True],
'loss_scaling': [False, 'static', 'dynamic'],
}))
@_inject_backend_tests
class TestAdaDelta(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.AdaDelta(eps=1e-5)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_placeholder': [False, True],
'loss_scaling': [False, 'static', 'dynamic'],
}))
@_inject_backend_tests
class TestAdaGrad(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.AdaGrad(0.1)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_placeholder': [False, True],
'loss_scaling': [False, 'static', 'dynamic'],
'amsgrad': [False, True],
'adabound': [False, True],
}))
@_inject_backend_tests
class TestAdam(OptimizerTestBase, unittest.TestCase):
def create(self):
kwargs = {
'amsgrad': self.amsgrad,
'adabound': self.adabound,
}
return optimizers.Adam(0.05, **kwargs)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_placeholder': [False, True],
'loss_scaling': [False, 'static', 'dynamic'],
}))
@_inject_backend_tests
class TestCorrectedMomentumSGD(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.CorrectedMomentumSGD(0.1)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_placeholder': [False, True],
'loss_scaling': [False, 'static', 'dynamic'],
}))
@_inject_backend_tests
class TestMomentumSGD(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.MomentumSGD(0.1)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_placeholder': [False, True],
'loss_scaling': [False, 'static', 'dynamic'],
}))
@_inject_backend_tests
class TestMSVAG(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.MSVAG(0.1)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_placeholder': [False, True],
'loss_scaling': [False, 'static', 'dynamic'],
}))
@_inject_backend_tests
class NesterovAG(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.NesterovAG(0.1)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_placeholder': [False, True],
'eps_inside_sqrt': [False, True],
'loss_scaling': [False, 'static', 'dynamic'],
}))
@_inject_backend_tests
class TestRMSprop(OptimizerTestBase, unittest.TestCase):
def create(self):
kwargs = {'eps_inside_sqrt': self.eps_inside_sqrt}
if self.dtype == numpy.float16:
kwargs['eps'] = 1e-6
return optimizers.RMSprop(0.1, **kwargs)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_placeholder': [False, True],
'loss_scaling': [False, 'static', 'dynamic'],
}))
@_inject_backend_tests
class TestRMSpropGraves(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.RMSpropGraves(0.1)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_placeholder': [False, True],
'loss_scaling': [False, 'static', 'dynamic'],
}))
@_inject_backend_tests
class TestSGD(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.SGD(0.1)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_placeholder': [False, True],
'loss_scaling': [False, 'static', 'dynamic'],
}))
@_inject_backend_tests
class TestSMORMS3(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.SMORMS3(0.1)
testing.run_module(__name__, __file__)
| 11,047
| 30.838617
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/optimizers_tests/test_optimizers.py
|
import pickle
import unittest
import numpy as np
import six
import chainer
from chainer import functions as F
from chainer import optimizers
from chainer import testing
_all_optimizers = [
'AdaDelta',
'AdaGrad',
'Adam',
'AdamW',
'AMSGrad',
'AdaBound',
'AMSBound',
'CorrectedMomentumSGD',
'MomentumSGD',
'MSVAG',
'NesterovAG',
'RMSprop',
'RMSpropGraves',
'SGD',
'SMORMS3',
]
_parameterize_optimizers = testing.parameterize(*testing.product({
'optimizer_impl': [getattr(chainer.optimizers, o) for o in _all_optimizers]
}))
class SimpleChain(chainer.Chain):
def __init__(self, shape=()):
super(SimpleChain, self).__init__()
w_np = np.asarray(np.random.randn(*shape)).astype(np.float32)
with self.init_scope():
self.w = chainer.Parameter(w_np, name='w')
def __call__(self, x):
return F.sum((x - self.w) ** 2)
class TestAllOptimizersCoverage(unittest.TestCase):
# Checks _all_optimizers covers all the built-in optimizers.
def test_all_optimizers_coverage(self):
module = chainer.optimizers
module_optimizers = []
for name in dir(module):
obj = getattr(module, name)
if (isinstance(obj, type) and issubclass(obj, chainer.Optimizer)):
module_optimizers.append(name)
assert sorted(_all_optimizers) == sorted(module_optimizers)
@testing.backend.inject_backend_tests(
None,
[
# CPU
{},
# Intel
{'use_ideep': True},
# CUDA
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
@testing.parameterize(*(
# Optimizers constructed with default arguments
[
{
'optimizer': o,
'kwargs': {}
}
for o in _all_optimizers]
# https://chainer/chainer/issues/7424
+ [
{
'optimizer': 'Adam',
'kwargs': {'weight_decay_rate': 0.5},
}]
))
@testing.parameterize(*testing.product(
{'shape': [(2, 3), (), (1, 0, 2)]}
))
class TestOptimizer(unittest.TestCase):
def test_optimizer(self, backend_config):
device = backend_config.device
target = SimpleChain(self.shape)
target.to_device(device)
optimizer_cls = getattr(chainer.optimizers, self.optimizer)
optimizer = optimizer_cls(**self.kwargs)
optimizer.setup(target)
x_np = np.asarray(np.random.randn(*self.shape)).astype(np.float32)
x = chainer.Variable(device.send(x_np))
# Just ensures no error occurs. No numerical check is performed.
optimizer.update(target, x)
@_parameterize_optimizers
class TestOptimizerHyperparameter(unittest.TestCase):
def setUp(self):
self.target = chainer.Link()
with self.target.init_scope():
self.target.w = chainer.Parameter()
def create(self, *args, **kwargs):
self.optimizer = self.optimizer_impl(*args, **kwargs)
self.optimizer.setup(self.target)
def get_hyperparam(self, name):
return getattr(self.target.w.update_rule.hyperparam, name)
def test_hyperparams(self):
# TODO(niboshi): The following optimizers do not pass this test
# because their __init__ do not accept some hyperparameters.
# The test should be fixed.
if self.optimizer_impl in (
chainer.optimizers.AdamW,
chainer.optimizers.AMSGrad,
chainer.optimizers.AdaBound,
chainer.optimizers.AMSBound,
):
raise unittest.SkipTest(
'The optimizer is incompatible with this test')
self.create()
default = self.optimizer.hyperparam.get_dict()
for name, default_value in six.iteritems(default):
self.create()
self.assertEqual(self.get_hyperparam(name), default_value)
new_value = default_value + 0.1
self.create(**{name: new_value})
self.assertEqual(self.get_hyperparam(name), new_value)
class WeightSaveHook(object):
name = 'WeightSaveHook'
call_for_each_param = True
def __init__(self):
self.value = None
def __call__(self, rule, param):
p, g = param.data, param.grad
if p is None or g is None:
return
self.value = np.copy(p)
@_parameterize_optimizers
class TestOptimizerHooks(unittest.TestCase):
def setUp(self):
self.target = SimpleChain()
def create(self, *args, **kwargs):
self.optimizer = self.optimizer_impl(*args, **kwargs)
self.optimizer.setup(self.target)
def get_hyperparam(self, name):
return getattr(self.target.w.update_rule.hyperparam, name)
def test_hooks(self):
w_pre = np.copy(self.target.w.data)
h_pre = WeightSaveHook()
h_post = WeightSaveHook()
self.create()
self.optimizer.add_hook(h_pre, timing='pre')
self.optimizer.add_hook(h_post, name='WeightSaveHookPost',
timing='post')
x = chainer.Variable(np.array(5., dtype=np.float32))
self.optimizer.update(self.target, x)
w_post = np.copy(self.target.w.data)
self.assertEqual(w_pre, h_pre.value)
self.assertEqual(w_post, h_post.value)
self.assertNotEqual(h_pre.value, h_post.value)
def test_hooks_auto(self):
w_pre = np.copy(self.target.w.data)
h_pre = WeightSaveHook()
h_pre.timing = 'pre'
h_post = WeightSaveHook()
h_post.timing = 'post'
self.create()
self.optimizer.add_hook(h_pre, timing='auto')
self.optimizer.add_hook(h_post, name='WeightSaveHookPost',
timing='auto')
x = chainer.Variable(np.array(5., dtype=np.float32))
self.optimizer.update(self.target, x)
w_post = np.copy(self.target.w.data)
self.assertEqual(w_pre, h_pre.value)
self.assertEqual(w_post, h_post.value)
self.assertNotEqual(h_pre.value, h_post.value)
@_parameterize_optimizers
class TestOptimizerPickable(unittest.TestCase):
def setUp(self):
self.target = SimpleChain()
def create(self, *args, **kwargs):
self.optimizer = self.optimizer_impl(*args, **kwargs)
self.optimizer.setup(self.target)
def get_hyperparam(self, name):
return getattr(self.target.w.update_rule.hyperparam, name)
def test_new_pickle(self):
self.create()
pickled_opt = pickle.dumps(self.optimizer)
x = chainer.Variable(np.array(5., dtype=np.float32))
self.optimizer.update(self.target, x)
w_post = np.copy(self.target.w.data)
# Pickle has saved a copy of the target
opt = pickle.loads(pickled_opt)
opt.update(opt.target, x)
pickled_w_post = np.copy(opt.target.w.data)
self.assertEqual(w_post, pickled_w_post)
def test_updated_pickle(self):
self.create()
x = chainer.Variable(np.array(5., dtype=np.float32))
self.optimizer.update(self.target, x)
pickled_opt = pickle.dumps(self.optimizer)
self.optimizer.update(self.target, x)
w_post = np.copy(self.target.w.data)
# Pickle has saved a copy of the target
opt = pickle.loads(pickled_opt)
opt.update(opt.target, x)
pickled_w_post = np.copy(opt.target.w.data)
self.assertEqual(w_post, pickled_w_post)
@_parameterize_optimizers
class TestOptimizerLossScaling(unittest.TestCase):
def setUp(self):
self.target = SimpleChain()
def create(self, *args, **kwargs):
self.optimizer = self.optimizer_impl(*args, **kwargs)
self.optimizer.setup(self.target)
def test_invalid_configs(self):
self.create()
with self.assertRaises(ValueError):
self.optimizer.loss_scaling(interval=0)
with self.assertRaises(ValueError):
self.optimizer.loss_scaling(scale=-1)
@testing.backend.inject_backend_tests(
None,
[
# CPU
{},
# Intel
{'use_ideep': True},
# CUDA
{'use_cuda': True, 'cuda_device': 0},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
]
)
class TestAdamW(unittest.TestCase):
def test_adam_w(self, backend_config):
xp = backend_config.xp
device = backend_config.device
link = chainer.Link(x=(1,))
link.to_device(device)
opt = optimizers.Adam(eta=0.5, weight_decay_rate=0.1)
opt.setup(link)
link.x.data.fill(1)
link.x.grad = device.send(xp.ones_like(link.x.data))
opt.update()
# compare against the value computed with v5 impl
testing.assert_allclose(link.x.data, np.array([0.9495]),
atol=1e-7, rtol=1e-7)
@testing.backend.inject_backend_tests(
None,
[
# CPU
{},
# Intel
{'use_ideep': True},
# CUDA
{'use_cuda': True, 'cuda_device': 0},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
]
)
class TestAMSGrad(unittest.TestCase):
def test_amsgrad(self, backend_config):
device = backend_config.device
link = chainer.Link(x=(4,))
x = link.x
x.data.fill(0)
link.to_device(device)
opt = optimizers.Adam(alpha=0.01, beta2=0.7, amsgrad=True)
opt.setup(link)
x.grad = device.send(np.array([1, -1, 10, -10], np.float32))
opt.update()
testing.assert_allclose(
x.update_rule.state['v'],
[0.3, 0.3, 30, 30],
atol=1e-7, rtol=1e-7)
testing.assert_allclose(
x.data,
[-0.01, 0.01, -0.01, 0.01],
atol=1e-7, rtol=1e-7)
x.grad = device.send(np.array([-10, -10, -1, -1], np.float32))
opt.update()
testing.assert_allclose(
x.update_rule.state['v'],
[30.21, 30.21, 21.3, 21.3],
atol=1e-7, rtol=1e-7)
testing.assert_allclose(
x.update_rule.state['vhat'],
[30.21, 30.21, 30, 30],
atol=1e-7, rtol=1e-7)
testing.assert_allclose(
x.data,
# result with NumPy
[-0.00377703, 0.01745388, -0.01548985, 0.01686232],
atol=1e-7, rtol=1e-7)
testing.run_module(__name__, __file__)
| 10,781
| 28.140541
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/caffe_tests/test_caffe_function.py
|
import os
import tempfile
import unittest
import warnings
import mock
import numpy
import six
import chainer
from chainer import links
from chainer import testing
# The caffe submodule relies on protobuf which under protobuf==3.7.0 and
# Python 3.7 raises a DeprecationWarning from the collections module.
with warnings.catch_warnings():
warnings.filterwarnings(action='ignore', category=DeprecationWarning)
from chainer.links import caffe
from chainer.links.caffe.caffe_function import caffe_pb
def _iter_init(param, data):
if isinstance(data, list):
for d in data:
if hasattr(param, 'add'):
param.add()
if isinstance(d, (list, dict)):
_iter_init(param[-1], d)
else:
param[-1] = d
else:
param.append(d)
elif isinstance(data, dict):
for k, d in data.items():
if isinstance(d, (list, dict)):
_iter_init(getattr(param, k), d)
else:
setattr(param, k, d)
else:
setattr(param, data)
def _make_param(data):
param = caffe_pb.NetParameter()
_iter_init(param, data)
return param
class TestCaffeFunctionBase(unittest.TestCase):
def setUp(self):
param = _make_param(self.data)
# The name can be used to open the file a second time,
# while the named temporary file is still open on the Windows.
with tempfile.NamedTemporaryFile(delete=False) as f:
self.temp_file_path = f.name
f.write(param.SerializeToString())
def tearDown(self):
os.remove(self.temp_file_path)
def init_func(self):
self.func = caffe.CaffeFunction(self.temp_file_path)
class TestCaffeFunctionBaseMock(TestCaffeFunctionBase):
def setUp(self):
outs = []
for shape in self.out_shapes:
out_data = numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
outs.append(chainer.Variable(out_data))
self.outputs = tuple(outs)
ret_value = outs[0] if len(outs) == 1 else tuple(outs)
m = mock.MagicMock(name=self.func_name, return_value=ret_value)
self.patch = mock.patch(self.func_name, m)
self.mock = self.patch.start()
super(TestCaffeFunctionBaseMock, self).setUp()
def tearDown(self):
super(TestCaffeFunctionBaseMock, self).tearDown()
self.patch.stop()
def call(self, inputs, outputs):
invars = []
for shape in self.in_shapes:
data = numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
invars.append(chainer.Variable(data))
self.inputs = invars
with chainer.using_config('train', False):
out = self.func(inputs=dict(zip(inputs, invars)),
outputs=outputs)
self.assertEqual(len(out), len(self.outputs))
for actual, expect in zip(out, self.outputs):
self.assertIs(actual, expect)
class TestConcat(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.concat'
in_shapes = [(3, 2, 3), (3, 2, 3)]
out_shapes = [(3, 2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Concat',
'bottom': ['x', 'y'],
'top': ['z'],
'concat_param': {
'axis': 2
}
}
]
}
def test_concat(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x', 'y'], ['z'])
self.mock.assert_called_once_with(
(self.inputs[0], self.inputs[1]), axis=2)
class TestConvolution(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.Convolution2D.forward'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Convolution',
'bottom': ['x'],
'top': ['y'],
'convolution_param': {
'kernel_size': [2],
'stride': [3],
'pad': [4],
'group': 3,
'bias_term': True,
},
'blobs': [
{
'num': 6,
'channels': 4,
'data': list(range(96))
},
{
'data': list(range(6))
}
]
}
]
}
def test_convolution(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
f = self.func.l1
self.assertIsInstance(f, links.Convolution2D)
for i in range(3): # 3 == group
in_slice = slice(i * 4, (i + 1) * 4) # 4 == channels
out_slice = slice(i * 2, (i + 1) * 2) # 2 == num / group
w = f.W.data[out_slice, in_slice]
numpy.testing.assert_array_equal(
w.flatten(), range(i * 32, (i + 1) * 32))
numpy.testing.assert_array_equal(
f.b.data, range(6))
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestDeconvolution(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.Deconvolution2D.__call__'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Deconvolution',
'bottom': ['x'],
'top': ['y'],
'convolution_param': {
'kernel_size': [2],
'stride': [3],
'pad': [4],
'group': 3,
'bias_term': True,
},
'blobs': [
{
'num': 6,
'channels': 4,
'data': list(range(96))
},
{
'data': list(range(12))
}
]
}
]
}
def test_deconvolution(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
f = self.func.l1
self.assertIsInstance(f, links.Deconvolution2D)
for i in range(3): # 3 == group
in_slice = slice(i * 4, (i + 1) * 4) # 4 == channels
out_slice = slice(i * 2, (i + 1) * 2) # 2 == num / group
w = f.W.data[out_slice, in_slice]
numpy.testing.assert_array_equal(
w.flatten(), range(i * 32, (i + 1) * 32))
numpy.testing.assert_array_equal(
f.b.data, range(12))
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestData(TestCaffeFunctionBase):
data = {
'layer': [
{
'name': 'l1',
'type': 'Data',
}
]
}
def test_data(self):
self.init_func()
self.assertEqual(len(self.func.layers), 0)
class TestDropout(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.dropout'
in_shapes = [(3, 2, 3)]
out_shapes = [(3, 2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Dropout',
'bottom': ['x'],
'top': ['y'],
'dropout_param': {
'dropout_ratio': 0.25
}
}
]
}
def test_dropout(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(
self.inputs[0], ratio=0.25)
class TestInnerProduct(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.Linear.forward'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'InnerProduct',
'bottom': ['x'],
'top': ['y'],
'inner_product_param': {
'bias_term': True,
'axis': 1
},
'blobs': [
# weight
{
'shape': {
'dim': [2, 3]
},
'data': list(range(6)),
},
# bias
{
'shape': {
'dim': [2]
},
'data': list(range(2)),
}
]
}
]
}
def test_linear(self):
self.init_func()
f = self.func.l1
self.assertIsInstance(f, links.Linear)
numpy.testing.assert_array_equal(
f.W.data, numpy.array([[0, 1, 2], [3, 4, 5]], dtype=numpy.float32))
numpy.testing.assert_array_equal(
f.b.data, numpy.array([0, 1], dtype=numpy.float32))
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestInnerProductDim4(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.Linear.forward'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'InnerProduct',
'bottom': ['x'],
'top': ['y'],
'inner_product_param': {
'bias_term': False,
'axis': 1
},
'blobs': [
# weight
{
'shape': {
'dim': [4, 5, 2, 3]
},
# when `ndim` == 4, `data` stored shape[2] x shape[3]
# data
'data': list(range(6)),
}
]
}
]
}
def test_linear(self):
self.init_func()
f = self.func.l1
self.assertIsInstance(f, links.Linear)
numpy.testing.assert_array_equal(
f.W.data, numpy.array([[0, 1, 2], [3, 4, 5]], dtype=numpy.float32))
self.assertIsNone(f.b)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestInnerProductInvalidDim(TestCaffeFunctionBase):
data = {
'layer': [
{
'name': 'l1',
'type': 'InnerProduct',
'blobs': [
{
'shape': {
'dim': [2, 3, 4, 5, 6] # 5-dim is not supported
},
},
]
}
]
}
def test_linear(self):
with self.assertRaises(RuntimeError):
self.init_func()
class TestInnerProductNonDefaultAxis(TestCaffeFunctionBase):
data = {
'layer': [
{
'name': 'l1',
'type': 'InnerProduct',
'inner_product_param': {
'axis': 0 # non-default axis
}
}
]
}
def test_linear(self):
with self.assertRaises(RuntimeError):
self.init_func()
class TestLRN(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.local_response_normalization'
in_shapes = [(3, 2, 3)]
out_shapes = [(3, 2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'LRN',
'bottom': ['x'],
'top': ['y'],
'lrn_param': {
'local_size': 4,
'alpha': 0.5,
'beta': 0.25,
'norm_region': 0, # ACROSS_CHANNELS
'k': 0.5
},
}
]
}
def test_lrn(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(
self.inputs[0], n=4, k=0.5, alpha=0.5 / 4, beta=0.25)
class TestLRNWithinChannel(TestCaffeFunctionBase):
data = {
'layer': [
{
'name': 'l1',
'type': 'LRN',
'lrn_param': {
'norm_region': 1, # WITHIN_CHANNELS is not supported
},
}
]
}
def test_lrn(self):
with self.assertRaises(RuntimeError):
self.init_func()
class TestMaxPooling(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.max_pooling_2d'
in_shapes = [(3, 2, 3)]
out_shapes = [(3, 2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Pooling',
'bottom': ['x'],
'top': ['y'],
'pooling_param': {
'pool': 0, # MAX
'kernel_h': 2,
'kernel_w': 3,
'stride_h': 4,
'stride_w': 5,
'pad_h': 6,
'pad_w': 7,
}
}
]
}
def test_max_pooling(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(
self.inputs[0], (2, 3), stride=(4, 5), pad=(6, 7))
class TestAveragePooling(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.average_pooling_2d'
in_shapes = [(3, 2, 3)]
out_shapes = [(3, 2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Pooling',
'bottom': ['x'],
'top': ['y'],
'pooling_param': {
'pool': 1, # AVE
'kernel_size': 2,
'stride': 4,
'pad': 6,
}
}
]
}
def test_max_pooling(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(
self.inputs[0], 2, stride=4, pad=6)
class TestGlobalPooling(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.max_pooling_2d'
in_shapes = [(3, 2, 3, 4)]
out_shapes = [(3, 2, 3, 4)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Pooling',
'bottom': ['x'],
'top': ['y'],
'pooling_param': {
'pool': 0, # MAX
'global_pooling': True,
}
}
]
}
def test_global_pooling(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(
self.inputs[0], (3, 4), stride=1, pad=0)
class TestStochasticPooling(TestCaffeFunctionBase):
data = {
'layer': [
{
'name': 'l1',
'type': 'Pooling',
'pooling_param': {
'pool': 2, # STOCHASTIC is not supported
}
}
]
}
def test_stochastic_pooling(self):
with self.assertRaises(RuntimeError):
self.init_func()
class TestReLU(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.relu'
in_shapes = [(3, 2, 3)]
out_shapes = [(3, 2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'ReLU',
'bottom': ['x'],
'top': ['y'],
'relu_param': {
'negative_slope': 0
}
}
]
}
def test_lrn(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestLeakyReLU(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.leaky_relu'
in_shapes = [(3, 2, 3)]
out_shapes = [(3, 2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'ReLU',
'bottom': ['x'],
'top': ['y'],
'relu_param': {
'negative_slope': 0.5
}
}
]
}
def test_lrn(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0], slope=0.5)
class TestReshape(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.reshape'
in_shapes = [(3, 2, 3)]
out_shapes = [(3, 6)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Reshape',
'bottom': ['x'],
'top': ['y'],
'reshape_param': {
'shape': {
'dim': [3, 6]
}
}
}
]
}
def test_reshape(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0], shape=[3, 6])
class TestBatchNorm(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.BatchNormalization.forward'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'BatchNorm',
'bottom': ['x'],
'top': ['y'],
'blobs': [
# For average mean.
{
'shape': {
'dim': [3],
},
'data': list(six.moves.range(3)),
},
# For average variance.
{
'shape': {
'dim': [3],
},
'data': list(six.moves.range(3)),
},
],
'batch_norm_param': {
'use_global_stats': False,
}
}
]
}
def test_batchnorm(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0], finetune=False)
class TestBatchNormUsingGlobalStats(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.BatchNormalization.forward'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'BatchNorm',
'bottom': ['x'],
'top': ['y'],
'blobs': [
# For average mean.
{
'shape': {
'dim': [3],
},
'data': list(six.moves.range(3)),
},
# For average variance.
{
'shape': {
'dim': [3],
},
'data': list(six.moves.range(3)),
},
],
'batch_norm_param': {
'use_global_stats': True,
}
}
]
}
def test_batchnorm(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0], finetune=False)
class TestEltwiseProd(TestCaffeFunctionBaseMock):
func_name = 'chainer.variable.Variable.__mul__'
in_shapes = [(2, 3), (2, 3), (2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Eltwise',
'bottom': ['x1', 'x2', 'x3'],
'top': ['y'],
'eltwise_param': {
'operation': 0, # PROD
},
}
]
}
def test_eltwise_prod(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x1', 'x2', 'x3'], ['y'])
self.mock.assert_has_calls([mock.call(self.inputs[1]),
mock.call(self.inputs[2])])
class TestEltwiseSum(TestCaffeFunctionBaseMock):
func_name = 'chainer.variable.Variable.__add__'
in_shapes = [(2, 3), (2, 3), (2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Eltwise',
'bottom': ['x1', 'x2', 'x3'],
'top': ['y'],
'eltwise_param': {
'operation': 1, # SUM
},
}
]
}
def test_eltwise_sum(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x1', 'x2', 'x3'], ['y'])
self.mock.assert_has_calls([mock.call(self.inputs[1]),
mock.call(self.inputs[2])])
class TestEltwiseSumCoeff(TestCaffeFunctionBaseMock):
func_name = 'chainer.variable.Variable.__add__'
in_shapes = [(2, 3), (2, 3), (2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Eltwise',
'bottom': ['x1', 'x2', 'x3'],
'top': ['y'],
'eltwise_param': {
'operation': 1, # SUM
'coeff': list(six.moves.range(3)),
},
}
]
}
def test_eltwise_sum(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x1', 'x2', 'x3'], ['y'])
self.assertEqual(self.mock.call_count, 2)
class TestEltwiseSumInvalidCoeff(TestCaffeFunctionBaseMock):
func_name = 'chainer.variable.Variable.__add__'
in_shapes = [(2, 3), (2, 3), (2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Eltwise',
'bottom': ['x1', 'x2', 'x3'],
'top': ['y'],
'eltwise_param': {
'operation': 1, # SUM
# not same as number of bottoms
'coeff': list(six.moves.range(2)),
},
}
]
}
def test_eltwise_sum(self):
self.init_func()
with self.assertRaises(AssertionError):
self.call(['x1', 'x2', 'x3'], ['y'])
class TestEltwiseMax(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.maximum'
in_shapes = [(2, 3), (2, 3), (2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Eltwise',
'bottom': ['x1', 'x2', 'x3'],
'top': ['y'],
'eltwise_param': {
'operation': 2, # MAX
},
}
]
}
def test_eltwise_max(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x1', 'x2', 'x3'], ['y'])
self.mock.assert_has_calls(
[mock.call(self.inputs[0], self.inputs[1]),
mock.call(self.outputs[0], self.inputs[2])])
class TestScale(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.Scale.forward'
in_shapes = [(2, 3), (2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Scale',
'bottom': ['x', 'y'],
'top': ['z'],
'scale_param': {
'axis': 0,
}
}
]
}
def test_scale(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x', 'y'], ['z'])
self.mock.assert_called_once_with(self.inputs[0], self.inputs[1])
class TestScaleOneBottom(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.Scale.forward'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Scale',
'bottom': ['x'],
'top': ['y'],
'blobs': [
{
'shape': {
'dim': [2, 3],
},
'data': list(six.moves.range(6)),
}
],
'scale_param': {
'axis': 0,
}
}
]
}
def test_scale_one_bottom(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestScaleWithBias(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.Scale.forward'
in_shapes = [(2, 3), (2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Scale',
'bottom': ['x', 'y'],
'top': ['z'],
'blobs': [
{
'shape': {
'dim': [2, 3],
},
'data': list(six.moves.range(6)),
}
],
'scale_param': {
'axis': 0,
'bias_term': True,
}
}
]
}
def test_scale_with_bias(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.assertTrue(hasattr(self.func.l1, 'bias'))
self.call(['x', 'y'], ['z'])
self.mock.assert_called_once_with(self.inputs[0], self.inputs[1])
class TestScaleOneBottomWithBias(TestCaffeFunctionBaseMock):
func_name = 'chainer.links.Scale.forward'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Scale',
'bottom': ['x'],
'top': ['y'],
'blobs': [
# For W parameter.
{
'shape': {
'dim': [2, 3],
},
'data': list(six.moves.range(6)),
},
# For bias.
{
'shape': {
'dim': [2, 3],
},
'data': list(six.moves.range(6)),
}
],
'scale_param': {
'axis': 0,
'bias_term': True,
}
}
]
}
def test_scale_one_bottom_with_bias(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.assertTrue(hasattr(self.func.l1, 'bias'))
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestSlice(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.split_axis'
in_shapes = [(3, 4, 3)]
out_shapes = [(3, 2, 3), (3, 2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Slice',
'bottom': ['x'],
'top': ['y1', 'y2'],
'slice_param': {
'axis': 1
}
}
]
}
def test_slice(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y1', 'y2'])
self.mock.assert_called_once_with(
self.inputs[0],
indices_or_sections=2,
axis=1
)
class TestSliceNoAxis(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.split_axis'
in_shapes = [(4, 6, 4)]
out_shapes = [(2, 6, 4), (2, 6, 4)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Slice',
'bottom': ['x'],
'top': ['y1', 'y2'],
'slice_param': {
'slice_dim': 0
}
}
]
}
def test_slice_no_axis(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y1', 'y2'])
self.mock.assert_called_once_with(
self.inputs[0],
indices_or_sections=2,
axis=0
)
class TestSliceNoAxisNoSliceDim(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.split_axis'
in_shapes = [(4, 6, 4)]
out_shapes = [(4, 3, 4), (4, 3, 4)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Slice',
'bottom': ['x'],
'top': ['y1', 'y2'],
}
]
}
def test_slice_no_axis_no_slice_dim(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y1', 'y2'])
self.mock.assert_called_once_with(
self.inputs[0],
indices_or_sections=2,
axis=1
)
class TestSliceSlicePoint(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.split_axis'
in_shapes = [(4, 8, 6)]
out_shapes = [(4, 3, 6), (4, 2, 6), (4, 3, 6)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Slice',
'bottom': ['x'],
'top': ['y1', 'y2', 'y3'],
'slice_param': {
'axis': 1,
'slice_point': [3, 5]
}
}
]
}
def test_slice_slice_point(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y1', 'y2', 'y3'])
self.mock.assert_called_once_with(
self.inputs[0],
indices_or_sections=[3, 5],
axis=1
)
class TestSigmoid(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.sigmoid'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Sigmoid',
'bottom': ['x'],
'top': ['y'],
}
]
}
def test_sigmoid(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestSoftmax(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.softmax'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Softmax',
'bottom': ['x'],
'top': ['y'],
}
]
}
def test_softmax(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestSoftmaxCaffeEngine(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.softmax'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Softmax',
'softmax_param': {
'engine': 1, # CAFFE
},
'bottom': ['x'],
'top': ['y'],
}
]
}
def test_softmax_caffe_engine(self):
# TODO(beam2d): Check if the mock is called with
# chainer.config.use_cudnn == False
self.init_func()
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestSoftmaxcuDnnEngine(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.softmax'
in_shapes = [(2, 3)]
out_shapes = [(2, 3)]
data = {
'layer': [
{
'name': 'l1',
'type': 'Softmax',
'softmax_param': {
'engine': 2, # CUDNN
},
'bottom': ['x'],
'top': ['y'],
}
]
}
def test_softmax_cuDNN_engine(self):
# TODO(beam2d): Check if the mock is called with
# chainer.config.use_cudnn == True
self.init_func()
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestSoftmaxInvalidAxis(TestCaffeFunctionBase):
data = {
'layer': [
{
'name': 'l1',
'type': 'Softmax',
'softmax_param': {
'axis': 0, # invalid axis
}
}
]
}
def test_softmax_invalid_axis(self):
with self.assertRaises(RuntimeError):
self.init_func()
class TestSoftmaxWithLoss(TestCaffeFunctionBaseMock):
func_name = 'chainer.functions.softmax_cross_entropy'
in_shapes = [(3, 2, 3)]
out_shapes = [()]
data = {
'layer': [
{
'name': 'l1',
'type': 'SoftmaxWithLoss',
'bottom': ['x'],
'top': ['y'],
}
]
}
def test_softmax_with_loss(self):
self.init_func()
self.assertEqual(len(self.func.layers), 1)
self.call(['x'], ['y'])
self.mock.assert_called_once_with(self.inputs[0])
class TestSoftmaxWithLossInvalidAxis(TestCaffeFunctionBase):
data = {
'layer': [
{
'name': 'l1',
'type': 'SoftmaxWithLoss',
'softmax_param': {
'axis': 0, # invalid axis
}
}
]
}
def test_softmax_with_loss_invalid_axis(self):
with self.assertRaises(RuntimeError):
self.init_func()
class TestSplit(TestCaffeFunctionBase):
data = {
'layer': [
{
'name': 'l1',
'type': 'Split',
'bottom': ['x'],
'top': ['y', 'z'],
}
]
}
def test_split(self):
self.init_func()
self.assertEqual(self.func.split_map, {'y': 'x', 'z': 'x'})
testing.run_module(__name__, __file__)
| 35,121
| 25.151899
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/caffe_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/activation_tests/test_simplified_dropconnect.py
|
import os
import tempfile
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer.serializers import npz
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
def gen_mask(ratio, shape):
return numpy.random.rand(*shape) >= ratio
@testing.parameterize(*testing.product({
'in_shape': [(3,), (3, 2, 2)],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_batchwise_mask': [True, False],
}))
class TestSimplifiedDropconnect(unittest.TestCase):
out_size = 2
ratio = 0.5
def setUp(self):
in_size = numpy.prod(self.in_shape)
self.link = links.SimplifiedDropconnect(
in_size, self.out_size,
initialW=chainer.initializers.Normal(1, self.W_dtype),
initial_bias=chainer.initializers.Normal(1, self.x_dtype))
self.link.cleargrads()
x_shape = (4,) + self.in_shape
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.x_dtype)
self.gy = numpy.random.uniform(
-1, 1, (4, self.out_size)).astype(self.x_dtype)
W = self.link.W.data
b = self.link.b.data
if self.use_batchwise_mask:
mask_shape = (4,) + self.link.W.shape
else:
mask_shape = self.link.W.shape
self.mask = gen_mask(self.ratio, mask_shape)
W = (W * self.mask) * (1. / (1 - self.ratio))
x = self.x.reshape(4, -1)
# numpy 1.9 does not support matmul.
# So we use numpy.einsum instead of numpy.matmul.
if self.use_batchwise_mask:
self.y_expect = numpy.einsum('ijk,ikl->ijl',
W, x[:, :, None]).reshape(4, -1) + b
else:
self.y_expect = numpy.einsum('jk,ikl->ijl',
W, x[:, :, None]).reshape(4, -1) + b
self.check_forward_options = {}
self.check_backward_options = {'atol': 1e-4, 'rtol': 1e-3}
if self.x_dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_backward_options = {'atol': 1e-2, 'rtol': 5e-2}
elif self.W_dtype == numpy.float16:
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
def check_forward(self, x_data, mask):
x = chainer.Variable(x_data)
y = self.link(x, train=True, mask=mask,
use_batchwise_mask=self.use_batchwise_mask)
self.assertEqual(y.data.dtype, self.x_dtype)
testing.assert_allclose(self.y_expect, y.data,
**self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(self.x, self.mask)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.mask))
def link_wrapper(self, *data):
return self.link(x=data[0], train=True, mask=data[1],
use_batchwise_mask=self.use_batchwise_mask)
def check_backward(self, x_data, y_grad, mask):
gradient_check.check_backward(
self.link_wrapper, (x_data, mask), y_grad,
(self.link.W, self.link.b),
dtype='d', **self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy, self.mask)
@attr.gpu
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy),
cuda.to_gpu(self.mask))
class TestSimplifiedDropconnectParameterShapePlaceholder(unittest.TestCase):
in_size = 3
in_shape = (in_size,)
out_size = 2
in_size_or_none = None
ratio = 0.5
def setUp(self):
self.link = links.SimplifiedDropconnect(self.in_size_or_none,
self.out_size)
temp_x = numpy.random.uniform(-1, 1,
(4, self.in_size)).astype(numpy.float32)
self.link(chainer.Variable(temp_x))
W = self.link.W.data
W[...] = numpy.random.uniform(-1, 1, W.shape)
b = self.link.b.data
b[...] = numpy.random.uniform(-1, 1, b.shape)
self.link.cleargrads()
mask_shape = (4, self.out_size, self.in_size)
self.mask = gen_mask(self.ratio, mask_shape)
x_shape = (4,) + self.in_shape
self.x = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
self.gy = numpy.random.uniform(
-1, 1, (4, self.out_size)).astype(numpy.float32)
W = (W * self.mask) * (1. / (1 - self.ratio))
# numpy 1.9 does not support matmul.
# So we use numpy.einsum instead of numpy.matmul.
self.y_expect = numpy.einsum('ijk,ikl->ijl',
W, self.x[:, :, None]).reshape(4, -1) + b
def check_forward(self, x_data, mask):
x = chainer.Variable(x_data)
y = self.link(x, train=True, mask=mask, use_batchwise_mask=True)
self.assertEqual(y.data.dtype, numpy.float32)
testing.assert_allclose(self.y_expect, y.data)
def test_forward_cpu(self):
self.check_forward(self.x, self.mask)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.mask))
def link_wrapper(self, *data):
return self.link(x=data[0], train=True, mask=data[1],
use_batchwise_mask=True)
def check_backward(self, x_data, y_grad, mask):
gradient_check.check_backward(
self.link_wrapper, (x_data, mask), y_grad,
(self.link.W, self.link.b), dtype='d',
atol=1e-4, rtol=1e-3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy, self.mask)
@attr.gpu
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy),
cuda.to_gpu(self.mask))
def test_serialization(self):
lin1 = links.SimplifiedDropconnect(None, self.out_size)
x = chainer.Variable(self.x)
# Must call the link to initialize weights.
lin1(x)
w1 = lin1.W.data
fd, temp_file_path = tempfile.mkstemp()
os.close(fd)
npz.save_npz(temp_file_path, lin1)
lin2 = links.SimplifiedDropconnect(None, self.out_size)
npz.load_npz(temp_file_path, lin2)
w2 = lin2.W.data
self.assertEqual((w1 == w2).all(), True)
class TestSimplifiedDropconnectNotBatchwiseMask(unittest.TestCase):
in_shape = (3,)
out_size = 2
ratio = 0.5
def setUp(self):
in_size = numpy.prod(self.in_shape)
self.link = links.SimplifiedDropconnect(
in_size, self.out_size,
initialW=chainer.initializers.Normal(1, numpy.float32),
initial_bias=chainer.initializers.Normal(1, numpy.float32))
self.link.cleargrads()
x_shape = (4,) + self.in_shape
self.x = numpy.ones(x_shape).astype(numpy.float32)
self.W = self.link.W.data
self.b = self.link.b.data
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = self.link(x, train=True, use_batchwise_mask=False)
# check mask equality here.
testing.assert_allclose(y.data[0], y.data[1])
testing.assert_allclose(y.data[0], y.data[2])
testing.assert_allclose(y.data[0], y.data[3])
mask = y.creator.mask
mask = cuda.to_cpu(mask)
y_expect = self.x.dot(self.W.T * mask.T) * (1. / (1 - self.ratio))
y_expect += self.b
testing.assert_allclose(y_expect, y.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
class TestInvalidSimplifiedDropconnect(unittest.TestCase):
def test_invalid_input_size(self):
link = links.SimplifiedDropconnect(3, 2)
x = numpy.random.uniform(-1, 1, (4, 1, 2)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
link(chainer.Variable(x))
def test_invalid_mask_size(self):
link = links.SimplifiedDropconnect(3, 2)
x = numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32)
mask = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
link(chainer.Variable(x), use_batchwise_mask=True, mask=mask)
def test_invalid_mask_size2(self):
link = links.SimplifiedDropconnect(3, 2)
x = numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32)
mask = numpy.random.uniform(-1, 1, (4, 3, 2)).astype(numpy.float32)
with self.assertRaises(type_check.InvalidType):
link(chainer.Variable(x), use_batchwise_mask=False, mask=mask)
testing.run_module(__name__, __file__)
| 9,437
| 34.615094
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/activation_tests/test_prelu.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class TestPReLUSingle(unittest.TestCase):
def setUp(self):
self.link = links.PReLU()
W = self.link.W.data
W[...] = numpy.random.uniform(-1, 1, W.shape)
self.link.cleargrads()
self.W = W.copy() # fixed on CPU
# Avoid unstability of numerical gradient
self.x = numpy.random.uniform(-1, 1, (4, 3, 2)).astype(numpy.float32)
for i in numpy.ndindex(self.x.shape):
if -0.01 < self.x[i] < 0.01:
self.x[i] = 0.5
self.gy = numpy.random.uniform(-1, 1, (4, 3, 2)).astype(numpy.float32)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = self.link(x)
self.assertEqual(y.data.dtype, numpy.float32)
y_expect = self.x.copy()
for i in numpy.ndindex(self.x.shape):
if self.x[i] < 0:
y_expect[i] *= self.W
testing.assert_allclose(y_expect, y.data)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
self.link, x_data, y_grad, self.link.W, atol=1e-4)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
class TestPReLUMulti(TestPReLUSingle):
def setUp(self):
self.link = links.PReLU(shape=(3,))
W = self.link.W.data
W[...] = numpy.random.uniform(-1, 1, W.shape)
self.link.cleargrads()
self.W = W.copy() # fixed on CPU
# Avoid unstability of numerical gradient
self.x = numpy.random.uniform(.5, 1, (4, 3, 2)).astype(numpy.float32)
self.x *= numpy.random.randint(2, size=(4, 3, 2)) * 2 - 1
self.gy = numpy.random.uniform(-1, 1, (4, 3, 2)).astype(numpy.float32)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = self.link(x)
y_expect = self.x.copy()
for i in numpy.ndindex(self.x.shape):
if self.x[i] < 0:
y_expect[i] *= self.W[i[1]]
testing.assert_allclose(y_expect, y.data)
testing.run_module(__name__, __file__)
| 2,839
| 27.979592
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/activation_tests/test_swish.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
def _sigmoid(x):
xp = backend.get_array_module(x)
half = x.dtype.type(0.5)
return xp.tanh(x * half) * half + half
class TestSwishSingle(unittest.TestCase):
def setUp(self):
self.x_shape = (4, 3, 2)
self.dtype = numpy.float32
self.link = links.Swish(())
beta = self.link.beta.data
beta[...] = numpy.random.uniform(-1, 1, beta.shape)
self.link.cleargrads()
self.x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = self.link(x)
self.assertEqual(y.data.dtype, self.dtype)
beta_data = self.link.beta.data
y_expect = x_data * _sigmoid(beta_data * x_data)
testing.assert_allclose(y_expect, y.data)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, gy_data):
gradient_check.check_backward(
self.link, x_data, gy_data, self.link.beta, atol=1e-4)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
class TestSwishFull(TestSwishSingle):
def setUp(self):
self.x_shape = (4, 3, 2)
self.dtype = numpy.float32
self.link = links.Swish(None)
self.link.cleargrads()
self.x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = self.link(x)
self.assertEqual(y.data.dtype, self.dtype)
self.assertEqual(self.link.beta.shape, self.x_shape[1:])
beta_data = self.link.beta.data
y_expect = x_data * _sigmoid(beta_data * x_data)
testing.assert_allclose(y_expect, y.data)
testing.run_module(__name__, __file__)
| 2,698
| 26.824742
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/activation_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/activation_tests/test_maxout.py
|
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer.initializers import constant
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import type_check
def _as_mat(x):
if x.ndim == 2:
return x
return x.reshape(len(x), -1)
def _maxout(x, W, b):
W_r = numpy.rollaxis(W, 2)
y = numpy.tensordot(_as_mat(x), W_r, axes=1)
if b is not None:
y += b
return numpy.max(y, axis=2)
@testing.parameterize(
*testing.product(
{'in_shape': [(2, ), (2, 5)],
'pool_size': [3],
'out_size': [4],
'initial_bias': ['random', 'scalar', None],
'batchsize': [7]}
)
)
class TestMaxout(unittest.TestCase):
def setUp(self):
# x, W, and b are set so that the result of forward
# propagation gets stable, meaning that their small perturbations
# do not change :math:`argmax_{j} W_{ij\cdot} x + b_{ij}`.
x_shape = (self.batchsize, ) + self.in_shape
self.x = numpy.random.uniform(
-0.05, 0.05, x_shape).astype(numpy.float32) + 1
self.gy = numpy.random.uniform(
-0.05, 0.05, (self.batchsize, self.out_size)
).astype(numpy.float32)
in_size = numpy.prod(self.in_shape)
initialW = numpy.random.uniform(
-0.05, 0.05, (self.out_size, self.pool_size, in_size)
).astype(numpy.float32)
for o in six.moves.range(self.out_size):
w = numpy.arange(in_size, dtype=numpy.float32) + 1
for c in six.moves.range(self.pool_size):
initialW[o, c, :] += w * c
if self.initial_bias == 'random':
initial_bias = numpy.random.uniform(
-0.05, 0.05, (self.out_size, self.pool_size))
elif self.initial_bias == 'scalar':
initial_bias = numpy.full(
(self.out_size, self.pool_size), 5, dtype=numpy.float32)
elif self.initial_bias is None:
initial_bias = None
self.link = links.Maxout(in_size, self.out_size, self.pool_size,
initialW, initial_bias)
self.y = _maxout(self.x, initialW, initial_bias)
self.link.cleargrads()
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = self.link(x)
self.assertEqual(y.data.dtype, numpy.float32)
testing.assert_allclose(self.y, y.data)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
params = [self.link.linear.W]
if self.initial_bias is not None:
params.append(self.link.linear.b)
gradient_check.check_backward(
self.link, x_data, y_grad, params, atol=1e-2)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
class TestInvalidMaxout(unittest.TestCase):
def setUp(self):
self.link = links.Maxout(2, 3, 4)
self.x = numpy.random.uniform(
-1, 1, (10, 7)).astype(numpy.float32)
def test_invalid_size(self):
with self.assertRaises(type_check.InvalidType):
self.link(chainer.Variable(self.x))
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'initializer': ['Initializer', 'scalar', 'ndarray', 'callable'],
}))
class TestInitialization(unittest.TestCase):
def setUp(self):
self.in_size = 2
self.out_size = 3
self.pool_size = 4
if self.initializer == 'Initializer':
self.initialW = constant.Constant(1.0)
self.initial_bias = constant.Constant(2.0)
elif self.initializer == 'scalar':
self.initialW = 1.0
self.initial_bias = 2.0
elif self.initializer == 'ndarray':
self.initialW = numpy.random.uniform(
-1, 1, (self.out_size, self.pool_size, self.in_size)
).astype(self.dtype)
self.initial_bias = numpy.random.uniform(
-1, 1, (self.out_size, self.pool_size)
).astype(self.dtype)
elif self.initializer == 'callable':
def callable_initialW(array):
assert array.dtype == self.dtype
assert array.shape == (
self.out_size, self.pool_size, self.in_size)
array.fill(1.0)
self.initialW = callable_initialW
def callable_initial_bias(array):
assert array.dtype == self.dtype
assert array.shape == (self.out_size, self.pool_size)
array.fill(2.0)
self.initial_bias = callable_initial_bias
else:
raise ValueError('invalid parameter')
with chainer.using_config('dtype', self.dtype):
self.link = links.Maxout(
self.in_size, self.out_size, self.pool_size,
initialW=self.initialW, initial_bias=self.initial_bias)
def check_param(self):
link = self.link
dtype = self.dtype
assert link.linear.W.dtype == dtype
assert link.linear.b.dtype == dtype
linear_out_size = self.out_size * self.pool_size
if self.initializer == 'Initializer' or self.initializer == 'callable':
W = numpy.empty(
(self.out_size, self.pool_size, self.in_size), dtype=dtype)
self.initialW(W)
bias = numpy.empty((self.out_size, self.pool_size), dtype=dtype)
self.initial_bias(bias)
elif self.initializer == 'scalar':
W = numpy.full((self.out_size, self.pool_size, self.in_size),
self.initialW, dtype=dtype)
bias = numpy.full((self.out_size, self.pool_size),
self.initial_bias, dtype=dtype)
elif self.initializer == 'ndarray':
W = self.initialW
bias = self.initial_bias
else:
raise ValueError('invalid parameter')
W = W.reshape(linear_out_size, self.in_size)
bias = bias.reshape(linear_out_size)
testing.assert_allclose(W, link.linear.W.data)
testing.assert_allclose(bias, link.linear.b.data)
def test_param_cpu(self):
self.check_param()
@attr.gpu
def test_param_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_param()
class TestInvalidInitialization(unittest.TestCase):
def setUp(self):
self.in_size = 2
self.out_size = 3
self.pool_size = 4
def test_invalid_initialW_ndarray(self):
invalid_dim = 1
initialW = numpy.random.uniform(
-1, 1, (self.out_size, self.pool_size, self.in_size, invalid_dim)
).astype(numpy.float32)
with self.assertRaises(ValueError):
links.Maxout(
self.in_size, self.out_size, self.pool_size, initialW=initialW)
def test_invalid_initial_bias_ndarray(self):
invalid_dim = 1
initial_bias = self.initial_bias = numpy.random.uniform(
-1, 1, (self.out_size, self.pool_size, invalid_dim)
).astype(numpy.float32)
with self.assertRaises(ValueError):
links.Maxout(self.in_size, self.out_size, self.pool_size,
initial_bias=initial_bias)
testing.run_module(__name__, __file__)
| 7,997
| 32.889831
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/loss_tests/test_hierarchical_softmax.py
|
import copy
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class TestHuffmanTree(unittest.TestCase):
def test_empty(self):
with self.assertRaises(ValueError):
links.BinaryHierarchicalSoftmax.create_huffman_tree({})
def test_simple(self):
tree = links.BinaryHierarchicalSoftmax.create_huffman_tree(
{'x': 8, 'y': 6, 'z': 5, 'w': 4, 'v': 3})
expect = (('z', 'y'), (('v', 'w'), 'x'))
self.assertEqual(expect, tree)
def test_same_count(self):
tree = links.BinaryHierarchicalSoftmax.create_huffman_tree(
{'x': 1, 'y': 2, 'z': 3})
# Order of the same items are not defined.
self.assertTrue((('x', 'y'), 'z') == tree or
('z', ('x', 'y')) == tree)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestBinaryHierarchicalSoftmax(unittest.TestCase):
def setUp(self):
self._config_user = chainer.using_config('dtype', self.dtype)
self._config_user.__enter__()
tree = ((0, 1), ((2, 3), 4))
self.link = links.BinaryHierarchicalSoftmax(3, tree)
self.link.cleargrads()
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
self.t = numpy.array([0, 2]).astype(numpy.int32)
self.gy = numpy.random.uniform(-1, 1, ()).astype(self.dtype)
self.W = self.link.W.data.copy()
if self.dtype == numpy.float16:
self.check_sum_options = {'delta': 1e-3}
self.test_forward_options = {'atol': 0.005}
self.check_backward_options = {'dtype': numpy.float64}
else:
self.check_sum_options = {'delta': 1e-5}
self.test_forward_options = {}
self.check_backward_options = {}
def tearDown(self):
self._config_user.__exit__(None, None, None)
def check_sum(self, x, gpu=False):
total = 0
for i in range(5):
t = numpy.array([i], dtype=numpy.int32)
if gpu:
t = cuda.to_gpu(t)
loss = self.link(chainer.Variable(x), chainer.Variable(t)).data
self.assertEqual(loss.dtype, self.dtype)
self.assertEqual(loss.shape, ())
total += numpy.exp(-cuda.to_cpu(loss))
self.assertAlmostEqual(1.0, float(total), **self.check_sum_options)
@condition.retry(3)
def test_sum_cpu(self):
x = numpy.array([[1.0, 2.0, 3.0]], self.dtype)
self.check_sum(x)
@attr.gpu
@condition.retry(3)
def test_sum_gpu(self):
x = numpy.array([[1.0, 2.0, 3.0]], self.dtype)
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_sum(cuda.to_gpu(x), gpu=True)
@attr.gpu
def test_forward(self):
# TODO(unno): We need to test return values of forward function.
cpu_loss = self.link(chainer.Variable(self.x),
chainer.Variable(self.t)).data
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
gpu_loss = self.link(chainer.Variable(cuda.to_gpu(self.x)),
chainer.Variable(cuda.to_gpu(self.t))).data
testing.assert_allclose(
cpu_loss, cuda.to_cpu(gpu_loss), **self.test_forward_options)
def check_backward(self, x_data, t_data, y_grad):
def f(x, t):
# Force to cast the dtype of an internal state so that we can
# perform numerical gradient in higher precision.
if self.dtype == numpy.float16 and x.dtype == numpy.float64:
self.link._func.codes = self.link._func.codes.astype(x.dtype)
return self.link(x, t)
gradient_check.check_backward(
f, (x_data, t_data), y_grad, self.link.W,
atol=1e-4, rtol=1e-3, **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.t, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x),
cuda.to_gpu(self.t),
cuda.to_gpu(self.gy))
@attr.gpu
def test_to_cpu(self):
f = copy.deepcopy(self.link)._func
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
with testing.assert_warns(DeprecationWarning):
self.link.to_cpu()
g = self.link._func
self.assertTrue((f.begins == g.begins).all())
self.assertTrue((f.paths == g.paths).all())
self.assertTrue((f.codes == g.codes).all())
testing.run_module(__name__, __file__)
| 4,988
| 33.888112
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/loss_tests/test_crf1d.py
|
import itertools
import unittest
import numpy
from six import moves
import chainer
from chainer.backends import cuda
from chainer import initializers
from chainer import links
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'initial_cost': ['random', None],
'transpose': [True, False],
}))
class TestCRF1d(unittest.TestCase):
def _calc_score(self, batch, ys):
cost = self.link.cost.array
return sum(x[batch, y] for x, y in zip(self.xs, ys)) + \
sum(cost[y1, y2] for y1, y2 in zip(ys[:-1], ys[1:]))
def _crf1d(self, cost_data, xs_data, ys_data):
z = numpy.zeros((self.batches[0],), numpy.float32)
for b, length in enumerate(self.lengths):
for ys in itertools.product(range(self.n_label), repeat=length):
z[b] += numpy.exp(chainer.cuda.to_cpu(self._calc_score(b, ys)))
score = numpy.zeros((self.batches[0],), numpy.float32)
for b, length in enumerate(self.lengths):
ys = [self.ys[i][b] for i in range(length)]
score[b] = self._calc_score(b, ys)
loss = -(score - numpy.log(z))
return numpy.sum(loss) / self.batches[0]
def setUp(self):
self._config_user = chainer.using_config('dtype', self.dtype)
self._config_user.__enter__()
self.n_label = 3
self.lengths = [3, 3]
self.batches = [2, 2, 2]
self.xs = [numpy.random.uniform(-1, 1, (b, 3)).astype(self.dtype)
for b in self.batches]
self.ys = [numpy.random.randint(
0, self.n_label, (b,)).astype(numpy.int32)
for b in self.batches]
self.link = links.CRF1d(n_label=self.n_label)
self.cost_shape = (self.n_label, self.n_label)
if self.dtype == numpy.float16:
self.check_forward_options = {'rtol': 5e-3, 'atol': 1e-2}
else:
self.check_forward_options = {'atol': 1e-4}
def tearDown(self):
self._config_user.__exit__(None, None, None)
def check_forward(self, x_data, t_data):
if self.transpose:
# Make transposed arrays manually
xs = [self.link.xp.empty((l, 3), dtype=self.dtype)
for l in self.lengths]
ts = [self.link.xp.empty((l,), dtype=numpy.int32)
for l in self.lengths]
for i, batch in enumerate(self.batches):
for j in moves.range(batch):
xs[j][i] = x_data[i][j]
ts[j][i] = t_data[i][j]
else:
xs = x_data
ts = t_data
x = self.link(xs, ts, transpose=self.transpose)
t = self._crf1d(self.link.cost.array, x_data, t_data)
testing.assert_allclose(x.array, t,
**self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(self.xs, self.ys)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.xs), cuda.to_gpu(self.ys))
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'initializer': ['random', None]
}))
class TestInitialization(unittest.TestCase):
def setUp(self):
self.n_label = 3
self.initial_cost = numpy.empty((self.n_label, self.n_label),
dtype=self.dtype)
if self.initializer is None:
initializer = initializers.constant.Zero()
elif self.initializer == 'random':
initializer = initializers.GlorotUniform()
initializer(self.initial_cost)
with chainer.using_config('dtype', self.dtype):
self.link = links.CRF1d(self.n_label,
initial_cost=self.initial_cost)
def check_param(self):
link = self.link
dtype = self.dtype
assert link.cost.dtype == dtype
testing.assert_allclose(link.cost.array,
self.initial_cost,
atol=0, rtol=0)
def test_param_cpu(self):
self.check_param()
@attr.gpu
def test_param_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_param()
testing.run_module(__name__, __file__)
| 4,491
| 32.029412
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/loss_tests/test_negative_sampling.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backend import CpuDevice
from chainer import links
from chainer import testing
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
't': [[0, 2], [-1, 1, 2]],
'reduce': ['sum', 'no'],
}))
@testing.inject_backend_tests(
None,
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestNegativeSampling(unittest.TestCase):
in_size = 3
sample_size = 2
def setUp(self):
self._config_user = chainer.using_config('dtype', self.dtype)
self._config_user.__enter__()
batch = len(self.t)
x_shape = (batch, self.in_size)
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
self.t = numpy.array(self.t).astype(numpy.int32)
if self.reduce == 'no':
g_shape = self.t.shape
elif self.reduce == 'sum':
g_shape = ()
self.gy = numpy.random.uniform(-1, 1, g_shape).astype(self.dtype)
if self.dtype == numpy.float16:
self.test_forward_options = {'atol': 1e-2}
self.test_backward_options = {'atol': 5e-3}
else:
self.test_forward_options = {}
self.test_backward_options = {'atol': 1e-4}
def tearDown(self):
self._config_user.__exit__(None, None, None)
def create_link(self, rng=None):
if rng is None:
rng = numpy.random.RandomState()
link = links.NegativeSampling(
self.in_size, [10, 5, 2, 5, 2], self.sample_size)
link.cleargrads()
# W is initialized with zero. Inject random values for meaningful test.
link.W.array[:] = rng.uniform(-1, 1, link.W.shape)
return link
def call_link_with_samples(self, samples, func):
# Call the link with given `samples` array.
# `func` is a function in which the link is called.
# mock sampler that returns the saved samples
def mock_sample(shape):
assert samples.shape == shape
return samples.copy()
# Wrap F.negative_sampling to replace sampler with the mock
orig_negative_sampling = chainer.functions.negative_sampling
def wrap_negative_sampling(*args, **kwargs):
args = args[:3] + (mock_sample,) + args[4:]
return orig_negative_sampling(*args, **kwargs)
with testing.patch(
'chainer.functions.loss.negative_sampling.negative_sampling',
wraps=wrap_negative_sampling) as m:
ret = func()
assert m.call_count == 1
return ret
def test_forward(self, backend_config):
x_data = backend_config.get_array(self.x)
t_data = backend_config.get_array(self.t)
x = chainer.Variable(x_data)
t = chainer.Variable(t_data, requires_grad=False)
link = self.create_link()
link.to_device(backend_config.device)
y, samples = link(x, t, reduce=self.reduce, return_samples=True)
self.assertEqual(y.shape, self.gy.shape)
cpu_device = CpuDevice()
W = cpu_device.send(link.W.data)
samples = cpu_device.send(samples)
loss = numpy.empty((len(self.x),), self.dtype)
for i in range(len(self.x)):
ix = self.x[i]
it = self.t[i]
if it == -1:
loss[i] = 0
else:
w = W[samples[i]]
f = w.dot(ix)
# first one is positive example
f[0] *= -1
loss[i] = numpy.logaddexp(f, 0).sum()
if self.reduce == 'sum':
loss = loss.sum()
testing.assert_allclose(y.data, loss, **self.test_forward_options)
def test_to_cpu(self, backend_config):
link = self.create_link()
link.to_device(backend_config.device)
self.assertEqual(link.sampler.device, backend_config.device)
with testing.assert_warns(DeprecationWarning):
link.to_cpu()
self.assertEqual(link.sampler.device, backend.CpuDevice())
def test_return_samples(self, backend_config):
batch_size = self.t.shape[0]
link = self.create_link()
link.to_device(backend_config.device)
x_data = backend_config.get_array(self.x)
t_data = backend_config.get_array(self.t)
x = chainer.Variable(x_data)
t = chainer.Variable(t_data, requires_grad=False)
# return_samples=True
y, samples = link(x, t, reduce=self.reduce, return_samples=True)
assert isinstance(samples, backend_config.xp.ndarray)
assert samples.shape == (batch_size, self.sample_size + 1)
assert samples.dtype == numpy.int32
# return_samples=False, with saved samples
y_ = self.call_link_with_samples(
samples,
lambda: link(x, t, reduce=self.reduce))
# y and y_ should equal
cpu_device = CpuDevice()
numpy.testing.assert_array_equal(
cpu_device.send(y.array), cpu_device.send(y_.array))
def test_backward_compare_with_numpy(self, backend_config):
# This test compares gradients with that of NumPy mode.
rng = numpy.random.RandomState()
rng_state = rng.get_state()
# Call NumPy mode link and save samples
x = chainer.Variable(self.x)
t = chainer.Variable(self.t, requires_grad=False)
link = self.create_link(rng)
y, samples = link(x, t, return_samples=True)
y.backward()
assert t.grad is None
gw_cpu = link.W.grad
gx_cpu = x.grad
# Call GPU mode link
rng.set_state(rng_state)
link = self.create_link(rng)
link.to_device(backend_config.device)
x = chainer.Variable(backend_config.get_array(self.x))
t = chainer.Variable(
backend_config.get_array(self.t), requires_grad=False)
samples = backend_config.get_array(samples)
y = self.call_link_with_samples(samples, lambda: link(x, t))
y.backward()
assert t.grad is None
gw_gpu = link.W.grad
gx_gpu = x.grad
# Compare gradients from CPU and GPU modes
testing.assert_allclose(gx_cpu, gx_gpu, **self.test_backward_options)
testing.assert_allclose(gw_cpu, gw_gpu, **self.test_backward_options)
testing.run_module(__name__, __file__)
| 6,720
| 32.108374
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/loss_tests/test_black_out.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import links
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))
class TestBlackOut(unittest.TestCase):
batch_size = 5
in_size = 4
count = [3, 2, 1]
n_samples = 7
def setUp(self):
self._config_user = chainer.using_config('dtype', self.dtype)
self._config_user.__enter__()
x_shape = (self.batch_size, self.in_size)
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
self.t = numpy.random.randint(
len(self.count), size=self.batch_size).astype(numpy.int32)
self.link = links.BlackOut(self.in_size, self.count, self.n_samples)
self.w = numpy.random.uniform(-1, 1, self.link.W.data.shape)
self.link.W.data[:] = self.w
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-3}
else:
self.check_forward_options = {'atol': 1e-4}
def tearDown(self):
self._config_user.__exit__(None, None, None)
def check_forward(self, x_data, t_data):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data, requires_grad=False)
self.link.sample_data = self.link.sampler.sample(
(self.batch_size, self.n_samples))
y = self.link(x, t)
expect_y = numpy.empty((self.batch_size), dtype=self.dtype)
samples = cuda.to_cpu(self.link.sample_data)
for b in range(self.batch_size):
z = 0
for i in range(self.n_samples):
w = samples[b, i]
z += numpy.exp(self.w[w].dot(self.x[b]))
y0 = self.w[self.t[b]].dot(self.x[b])
z += numpy.exp(y0)
l = y0 - numpy.log(z)
for i in range(self.n_samples):
w = samples[b, i]
l += numpy.log(1 - numpy.exp(self.w[w].dot(self.x[b])) / z)
expect_y[b] = l
loss = -numpy.sum(expect_y) / self.batch_size
testing.assert_allclose(y.data, loss, **self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(self.x, self.t)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.chainerx
def test_forward_chainerx_native(self):
device = chainer.get_device('native:0')
self.link.to_device(device)
self.check_forward(device.send(self.x), device.send(self.t))
@attr.chainerx
@attr.gpu
def test_forward_chainerx_cuda(self):
device = chainer.get_device('cuda:0')
self.link.to_device(device)
self.check_forward(device.send(self.x), device.send(self.t))
testing.run_module(__name__, __file__)
| 2,964
| 30.542553
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/loss_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/test_convolution_2d.py
|
import unittest
import numpy
import pytest
import six.moves.cPickle as pickle
import chainer
from chainer.backends import cuda
from chainer import functions as F
from chainer import links
from chainer import memory_layouts
from chainer import testing
from chainer.testing import attr
from chainer.utils import conv
@testing.parameterize(*testing.product({
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[{}]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestConvolution2D(testing.LinkTestCase):
param_names = ('W', 'b')
skip_double_backward_test = True
def setUp(self):
self.N = 2
self.in_channels = 3
self.out_channels = 2
self.ksize = 3
self.stride = 2
self.pad = 1
if self.x_dtype == numpy.float16 or self.W_dtype == numpy.float16:
self.check_forward_options.update({'atol': 5e-3, 'rtol': 5e-2})
self.check_backward_options.update({'atol': 3e-2, 'rtol': 5e-2})
def before_test(self, test_name):
# cuDNN 5 and 5.1 results suffer from precision issues
using_old_cudnn = (self.backend_config.xp is cuda.cupy
and self.backend_config.use_cudnn == 'always'
and cuda.cuda.cudnn.getVersion() < 6000)
if using_old_cudnn:
self.check_backward_options.update({'atol': 3e-2, 'rtol': 5e-2})
def generate_params(self):
initialW = chainer.initializers.Normal(1, self.W_dtype)
initial_bias = chainer.initializers.Normal(1, self.x_dtype)
return initialW, initial_bias
def create_link(self, initializers):
initialW, initial_bias = initializers
link = links.Convolution2D(
self.in_channels, self.out_channels, self.ksize,
stride=self.stride, pad=self.pad,
initialW=initialW,
initial_bias=initial_bias)
return link
def generate_inputs(self):
h, w = 4, 3
x = numpy.random.uniform(-1, 1,
(self.N, self.in_channels,
h, w)).astype(self.x_dtype)
return x,
def forward_expected(self, link, inputs):
x, = inputs
W = link.W
b = link.b
y = F.convolution_2d(
x, W, b,
pad=self.pad,
stride=self.stride)
return y.array,
def test_pickling(self, backend_config):
x_data, = self.generate_inputs()
link = self.create_link(self.generate_params())
link.to_device(backend_config.device)
x = chainer.Variable(x_data)
x.to_device(backend_config.device)
y = link(x)
y_data1 = y.data
del x, y
pickled = pickle.dumps(link, -1)
del link
link = pickle.loads(pickled)
x = chainer.Variable(x_data)
x.to_device(backend_config.device)
y = link(x)
y_data2 = y.data
testing.assert_allclose(y_data1, y_data2, atol=0, rtol=0)
def test_from_params(self, backend_config):
if (
(backend_config.use_cuda and
backend_config.cuda_device == 1) or
(backend_config.use_chainerx and
'cuda' in backend_config.chainerx_device)):
raise unittest.SkipTest()
link1 = self.create_link(self.generate_params())
link1.to_device(backend_config.device)
link2 = links.Convolution2D.from_params(
link1.W, link1.b, stride=self.stride, pad=self.pad)
assert link2.W.shape == link1.W.shape
assert link2.b.shape == link2.b.shape
assert link2.stride == link1.stride
assert link2.pad == link1.pad
@testing.parameterize(*testing.product({
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestConvolution2DIm2ColConsistency(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1,
(2, 3, 4, 3)).astype(self.x_dtype)
@attr.gpu
def test_im2col_consistency(self):
col_cpu = conv.im2col_cpu(self.x, 3, 3, 2, 2, 1, 1)
col_gpu = conv.im2col_gpu(cuda.to_gpu(self.x), 3, 3, 2, 2, 1, 1)
testing.assert_allclose(col_cpu, col_gpu.get(), atol=0, rtol=0)
@attr.gpu
def test_col2im_consistency(self):
col = conv.im2col_cpu(self.x, 3, 3, 2, 2, 1, 1)
h, w = self.x.shape[2:]
im_cpu = conv.col2im_cpu(col, 2, 2, 1, 1, h, w)
im_gpu = conv.col2im_gpu(cuda.to_gpu(col), 2, 2, 1, 1, h, w)
testing.assert_allclose(im_cpu, im_gpu.get())
@testing.parameterize(*testing.product({
'conv_args': [((None, 2, 3, 2, 1), {}),
((2, 3), {'stride': 2, 'pad': 1})],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[{}]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestConvolution2DParameterShapePlaceholder(testing.LinkTestCase):
param_names = ('W', 'b')
skip_double_backward_test = True
def before_test(self, test_name):
# cuDNN 5 and 5.1 results suffer from precision issues
using_old_cudnn = (self.backend_config.xp is cuda.cupy
and self.backend_config.use_cudnn == 'always'
and cuda.cuda.cudnn.getVersion() < 6000)
if using_old_cudnn:
self.check_backward_options.update({'atol': 3e-2, 'rtol': 5e-2})
def generate_params(self):
return ()
def create_link(self, initializers):
args, kwargs = self.conv_args
link = links.Convolution2D(*args, **kwargs)
b = link.b.data
b[...] = numpy.random.uniform(-1, 1, b.shape)
return link
def generate_inputs(self):
x = numpy.random.uniform(-1, 1,
(2, 3, 4, 3)).astype(numpy.float32)
return x,
def forward_expected(self, link, inputs):
x, = inputs
y = link(x).array
return y,
def test_pickling(self, backend_config):
x_data, = self.generate_inputs()
link = self.create_link(self.generate_params())
link.to_device(backend_config.device)
x = chainer.Variable(x_data)
x.to_device(backend_config.device)
y = link(x)
y_data1 = y.data
del x, y
pickled = pickle.dumps(link, -1)
del link
link = pickle.loads(pickled)
x = chainer.Variable(x_data)
x.to_device(backend_config.device)
y = link(x)
y_data2 = y.data
testing.assert_allclose(y_data1, y_data2, atol=0, rtol=0)
class Convolution2DMemoryLayoutsTestBase(object):
batch = 2
in_channels = 5
out_channels = 7
height = 13
width = 11
kernel_height = 5
kernel_width = 4
strides_height = 3
strides_width = 2
dtype = numpy.float32
def create_link(self):
link = links.Convolution2D(
self.in_channels,
self.out_channels,
(self.kernel_height, self.kernel_width),
(self.strides_height, self.strides_width))
return link
def create_input_array(self, xp):
x_shape = (self.batch, self.height, self.width, self.in_channels)
x = xp.ones(x_shape, self.dtype)
return x
@testing.inject_backend_tests(
[
'test_param_layout_to_device',
'test_forward',
],
# GPU tests
[{'use_cuda': True, 'use_cudnn': 'always'}])
class TestConvolution2DMemoryLayouts(unittest.TestCase,
Convolution2DMemoryLayoutsTestBase):
def test_param_layout(self):
with chainer.using_config('compute_mode', 'cudnn_fast'):
link = self.create_link()
assert link.W.layout == memory_layouts.CUDNN_CHANNEL_LAST_W
def test_param_layout_to_device(self, backend_config):
with chainer.using_config('compute_mode', 'cudnn_fast'):
link = self.create_link()
assert link.W.device == chainer.get_device('@numpy')
link.to_device(backend_config.device)
assert link.W.device == backend_config.device
assert link.W.layout == memory_layouts.CUDNN_CHANNEL_LAST_W
def test_forward(self, backend_config):
with chainer.using_config('compute_mode', 'cudnn_fast'):
link = self.create_link()
link.to_device(backend_config.device)
x = self.create_input_array(backend_config.xp)
x = chainer.Variable(x, layout=memory_layouts.CUDNN_CHANNEL_LAST_X)
x.to_device(backend_config.device)
with backend_config:
y = link(x)
assert link.W.device == backend_config.device
assert y.layout == memory_layouts.CUDNN_CHANNEL_LAST_X
assert y.shape == (
self.batch,
self.out_channels,
(self.height - self.kernel_height + 1) // self.strides_height,
(self.width - self.kernel_width + 1) // self.strides_width)
@testing.inject_backend_tests(
[
'test_forward',
],
# CPU tests
[{},
# GPU tests
{'use_cuda': True, 'use_cudnn': 'never'}])
class TestConvolution2DInvalidComputeMode(unittest.TestCase,
Convolution2DMemoryLayoutsTestBase):
def test_forward(self, backend_config):
with chainer.using_config('compute_mode', 'cudnn_fast'):
link = self.create_link()
link.to_device(backend_config.device)
x = self.create_input_array(backend_config.xp)
x = chainer.Variable(x, layout=memory_layouts.CUDNN_CHANNEL_LAST_X)
x.to_device(backend_config.device)
with backend_config:
with pytest.raises(RuntimeError):
link(x)
testing.run_module(__name__, __file__)
| 10,531
| 30.818731
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/test_bias.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(
{'learn_b': True},
{'learn_b': False}
)
class TestBias(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
self.b = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
self.y_expected = numpy.copy(self.x)
for i, j, k in numpy.ndindex(self.y_expected.shape):
self.y_expected[i, j, k] += self.b[j]
self.gy = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
axis = 1
if self.learn_b:
self.link = links.Bias(axis, self.b.shape)
self.link.b.data = self.b
else:
self.link = links.Bias(axis, None)
self.link.cleargrads()
def test_attribute_presence(self):
self.assertEqual(self.learn_b, hasattr(self.link, 'b'))
def check_forward(self, x_data, b_data, y_expected):
x = chainer.Variable(x_data)
if b_data is None:
y = self.link(x)
testing.assert_allclose(y_expected, y.data)
else:
b = chainer.Variable(b_data)
y = self.link(x, b)
testing.assert_allclose(y_expected, y.data)
def test_forward_cpu(self):
if self.learn_b:
b = None
else:
b = self.b
self.check_forward(self.x, b, self.y_expected)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
if self.learn_b:
b = None
else:
b = cuda.to_gpu(self.b)
self.check_forward(x, b, self.y_expected)
def check_backward(self, x_data, b_data, y_grad):
if b_data is None:
params = [self.link.b]
gradient_check.check_backward(
self.link, x_data, y_grad, params, atol=1e-2)
else:
gradient_check.check_backward(
self.link, (x_data, b_data), y_grad, atol=1e-2)
@condition.retry(3)
def test_backward_cpu(self):
if self.learn_b:
b = None
else:
b = self.b
self.check_backward(self.x, b, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
if self.learn_b:
b = None
else:
b = cuda.to_gpu(self.b)
gy = cuda.to_gpu(self.gy)
self.check_backward(x, b, gy)
class TestBiasInvalidArgc(unittest.TestCase):
def setUp(self):
x_data = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
b_data = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
self.axis = 1
self.x = chainer.Variable(x_data)
self.b = chainer.Variable(b_data)
def test_bias_invalid_argc1(self):
func = links.Bias(self.axis, self.b.data.shape)
with chainer.using_config('debug', True):
with self.assertRaises(AssertionError):
func(self.x, self.b)
def test_bias_invalid_argc2(self):
func = links.Bias(self.axis, None)
with chainer.using_config('debug', True):
with self.assertRaises(AssertionError):
func(self.x)
testing.run_module(__name__, __file__)
| 3,625
| 28.966942
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/test_inception.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import links
from chainer import testing
from chainer.testing import attr
class TestInception(unittest.TestCase):
in_channels = 3
out1, proj3, out3, proj5, out5, proj_pool = 3, 2, 3, 2, 3, 3
def setUp(self):
self.x = numpy.random.uniform(
-1, 1, (10, self.in_channels, 5, 5)
).astype(numpy.float32)
out = self.out1 + self.out3 + self.out5 + self.proj_pool
self.gy = numpy.random.uniform(
-1, 1, (10, out, 5, 5)).astype(numpy.float32)
self.l = links.Inception(
self.in_channels, self.out1, self.proj3, self.out3,
self.proj5, self.out5, self.proj_pool)
def check_backward(self, x_data, y_grad):
x = chainer.Variable(x_data)
y = self.l(x)
y.grad = y_grad
y.backward()
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.l.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| 1,221
| 26.155556
| 70
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/test_convolution_nd.py
|
import unittest
import numpy
import six.moves.cPickle as pickle
import chainer
from chainer.backends import cuda
from chainer import functions as F
from chainer import initializers
from chainer.links.connection import convolution_nd
from chainer import testing
from chainer.testing import attr
from chainer.utils import conv_nd
@testing.parameterize(*(testing.product({
'dims': [(3, 4), (3, 4, 3)],
'dtype': [numpy.float32],
'in_channels': [4, None, 'omit'],
'groups': [1, 2],
}) + testing.product({
'dims': [(5,)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'in_channels': [4, None, 'omit'],
'groups': [1, 2],
})))
@testing.inject_backend_tests(
None,
# CPU tests
[{}]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestConvolutionND(testing.LinkTestCase):
param_names = ('W', 'b')
def setUp(self):
self.ndim = len(self.dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
self.x_shape = (2, 4) + self.dims
self.check_backward_options.update({'eps': 1e-2,
'atol': 1e-3, 'rtol': 1e-3})
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 5e-3, 'rtol': 5e-2})
self.check_backward_options.update({
'eps': 2 ** -4, 'atol': 2 ** -4, 'rtol': 2 ** -4})
def before_test(self, test_name):
# cuDNN 5 and 5.1 results suffer from precision issues
using_old_cudnn = (self.backend_config.xp is cuda.cupy
and self.backend_config.use_cudnn == 'always'
and cuda.cuda.cudnn.getVersion() < 6000)
if using_old_cudnn:
self.check_backward_options.update({
'eps': 2 ** -4, 'atol': 2 ** -4, 'rtol': 2 ** -4})
def generate_params(self):
initial_bias = initializers.Uniform(scale=1., dtype=self.dtype)
return initial_bias,
def create_link(self, initializers):
initial_bias, = initializers
if self.in_channels == 'omit':
link = convolution_nd.ConvolutionND(
self.ndim, 2, self.ksize, stride=self.stride,
pad=self.pad, groups=self.groups,
initial_bias=initial_bias)
else:
link = convolution_nd.ConvolutionND(
self.ndim, self.in_channels, 2, self.ksize, stride=self.stride,
pad=self.pad, groups=self.groups,
initial_bias=initial_bias)
return link
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
return x,
def forward_expected(self, link, inputs):
x, = inputs
W = link.W
b = link.b
y = F.convolution_nd(
x, W, b,
pad=self.pad,
groups=self.groups,
stride=self.stride)
return y.array,
def test_pickling(self, backend_config):
x_data, = self.generate_inputs()
link = self.create_link(self.generate_params())
link.to_device(backend_config.device)
x = chainer.Variable(x_data)
x.to_device(backend_config.device)
y = link(x)
y_data1 = y.data
del x, y
pickled = pickle.dumps(link, -1)
del link
link = pickle.loads(pickled)
x = chainer.Variable(x_data)
x.to_device(backend_config.device)
y = link(x)
y_data2 = y.data
testing.assert_allclose(y_data1, y_data2, atol=0, rtol=0)
def test_from_params(self, backend_config):
if (
(backend_config.use_cuda and
backend_config.cuda_device == 1) or
(backend_config.use_chainerx and
'cuda' in backend_config.chainerx_device)):
raise unittest.SkipTest()
link1 = self.create_link(self.generate_params())
link1.to_device(backend_config.device)
if self.in_channels in (None, 'omit'):
link1._initialize_params(self.x_shape[1])
link2 = convolution_nd.ConvolutionND.from_params(
link1.W, link1.b,
stride=self.stride, pad=self.pad, groups=self.groups)
assert link2.W.shape == link1.W.shape
assert link2.b.shape == link1.b.shape
assert link2.stride == link1.stride
assert link2.pad == link1.pad
@testing.parameterize(*(testing.product({
'dims': [(3, 4), (3, 4, 3)],
'dtype': [numpy.float32],
}) + testing.product({
'dims': [(5,)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
})))
class TestConvolutionNDIm2ColConsistency(unittest.TestCase):
def setUp(self):
self.ndim = len(self.dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
self.x_shape = (2, 4) + self.dims
self.x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
@attr.gpu
def test_im2col_consistency(self):
col_cpu = conv_nd.im2col_nd_cpu(
self.x, self.ksize, self.stride, self.pad)
col_gpu = conv_nd.im2col_nd_gpu(
cuda.to_gpu(self.x), self.ksize, self.stride, self.pad)
testing.assert_allclose(col_cpu, col_gpu.get(), atol=0, rtol=0)
@attr.gpu
def test_col2im_consistency(self):
col = conv_nd.im2col_nd_cpu(self.x, self.ksize, self.stride, self.pad)
im_cpu = conv_nd.col2im_nd_cpu(col, self.stride, self.pad, self.dims)
im_gpu = conv_nd.col2im_nd_gpu(
cuda.to_gpu(col), self.stride, self.pad, self.dims)
testing.assert_allclose(im_cpu, im_gpu.get())
class TestConvolutionNDNoInitialBias(unittest.TestCase):
def test_no_initial_bias(self):
ndim = 3
ksize = 3
link = convolution_nd.ConvolutionND(
ndim, 3, 2, ksize, nobias=True)
self.assertIsNone(link.b)
class TestConvolutionNDWrappers(unittest.TestCase):
def _get_data(self, ndim):
in_channels = 3
out_channels = 2
dtype = numpy.float32
x_shape = (2, in_channels) + (3,) * ndim
x = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
return in_channels, out_channels, x
def test_conv1d(self):
in_c, out_c, x = self._get_data(1)
link_nd = convolution_nd.ConvolutionND(1, in_c, out_c, 2, initialW=1)
link_1d = convolution_nd.Convolution1D(in_c, out_c, 2, initialW=1)
testing.assert_allclose(link_nd(x).data, link_1d(x).data)
def test_conv3d(self):
in_c, out_c, x = self._get_data(3)
link_nd = convolution_nd.ConvolutionND(3, in_c, out_c, 2, initialW=1)
link_3d = convolution_nd.Convolution3D(in_c, out_c, 2, initialW=1)
testing.assert_allclose(link_nd(x).data, link_3d(x).data)
testing.run_module(__name__, __file__)
| 7,238
| 31.755656
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/test_deconvolution_nd.py
|
import unittest
import numpy
from chainer.backends import cuda
import chainer.functions as F
from chainer import initializers
from chainer.links.connection import deconvolution_nd
from chainer import testing
from chainer.testing import parameterize
from chainer.utils import conv
@parameterize(*testing.product({
'dims': [(3, 2), (2,)],
'nobias': [True, False],
'dtype': [numpy.float32],
'used_outsize': ['case1', 'case2', 'None'],
'in_channels': [4, None, 'omit'],
'groups': [1, 2],
}) + testing.product({
'dims': [(4, 3, 2)],
'nobias': [False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'used_outsize': ['None'],
'in_channels': [4, None, 'omit'],
'groups': [1, 2],
}))
@testing.inject_backend_tests(
['test_forward', 'test_backward'],
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX is not supported
# TODO(ecastill) chainerx support for case2
)
class TestDeconvolutionND(testing.LinkTestCase):
def setUp(self):
self.N = 2
self.out_channels = 2
self.ndim = len(self.dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
if self.nobias:
self.param_names = ('W',)
else:
self.param_names = ('W', 'b')
if self.used_outsize == 'case1' or self.used_outsize == 'None':
# Use output size determined with get_deconv_outsize.
outs = tuple(
conv.get_deconv_outsize(d, k, s, p)
for (d, k, s, p) in zip(self.dims, self.ksize,
self.stride, self.pad))
elif self.used_outsize == 'case2':
# Use possible output size other than the one determined with
# get_deconv_outsize.
outs = tuple(
conv.get_deconv_outsize(d, k, s, p) + 1
for (d, k, s, p) in zip(self.dims, self.ksize,
self.stride, self.pad))
if self.used_outsize != 'None':
self.outsize = outs
else:
self.outsize = None
self.x_shape = (self.N, 4) + self.dims
self.check_backward_options.update({
'eps': 1e-2, 'atol': 1e-4, 'rtol': 1e-3})
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 5e-3, 'rtol': 5e-2})
self.check_backward_options.update({
'eps': 2 ** -3, 'atol': 1e-2, 'rtol': 1e-1})
def before_test(self, test_name):
# cuDNN 5 and 5.1 results suffer from precision issues
using_old_cudnn = (self.backend_config.xp is cuda.cupy
and self.backend_config.use_cudnn == 'always'
and cuda.cuda.cudnn.getVersion() < 6000)
if using_old_cudnn:
self.check_backward_options.update({
'eps': 2 ** -3, 'atol': 1e-2, 'rtol': 1e-1})
def generate_params(self):
initial_bias = initializers.Uniform(scale=1., dtype=self.dtype)
return initial_bias,
def create_link(self, initializers):
initial_bias, = initializers
if self.in_channels == 'omit':
link = deconvolution_nd.DeconvolutionND(
self.ndim, self.out_channels, self.ksize, stride=self.stride,
pad=self.pad, outsize=self.outsize, initial_bias=initial_bias,
nobias=self.nobias, groups=self.groups)
else:
link = deconvolution_nd.DeconvolutionND(
self.ndim, self.in_channels, self.out_channels, self.ksize,
stride=self.stride, pad=self.pad, outsize=self.outsize,
initial_bias=initial_bias, nobias=self.nobias,
groups=self.groups)
return link
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
return x,
def forward_expected(self, link, inputs):
x, = inputs
W = link.W
b = link.b
y = F.deconvolution_nd(
x, W, b, outsize=self.outsize,
stride=self.stride, pad=self.pad,
groups=self.groups)
return y.array,
class TestDeconvolutionNDNoInitialBias(unittest.TestCase):
def test_no_initial_bias(self):
ndim = 3
ksize = 3
link = deconvolution_nd.DeconvolutionND(
ndim, 3, 2, ksize, nobias=True)
self.assertIsNone(link.b)
class TestDeconvolutionNDWrappers(unittest.TestCase):
def _get_data(self, ndim):
in_channels = 3
out_channels = 2
dtype = numpy.float32
x_shape = (2, in_channels) + (3,) * ndim
x = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
return in_channels, out_channels, x
def test_deconv1d(self):
in_c, out_c, x = self._get_data(1)
link_nd = deconvolution_nd.DeconvolutionND(
1, in_c, out_c, 2, initialW=1)
link_1d = deconvolution_nd.Deconvolution1D(
in_c, out_c, 2, initialW=1)
testing.assert_allclose(link_nd(x).data, link_1d(x).data)
def test_deconv3d(self):
in_c, out_c, x = self._get_data(3)
link_nd = deconvolution_nd.DeconvolutionND(
3, in_c, out_c, 2, initialW=1)
link_3d = deconvolution_nd.Deconvolution3D(
in_c, out_c, 2, initialW=1)
testing.assert_allclose(link_nd(x).data, link_3d(x).data)
testing.run_module(__name__, __file__)
| 5,640
| 32.182353
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/test_mlp_convolution_2d.py
|
import unittest
import mock
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import links
from chainer import testing
from chainer.testing import attr
@testing.parameterize(
{'use_cudnn': 'always'},
{'use_cudnn': 'never'},
)
class TestMLPConvolution2D(unittest.TestCase):
def setUp(self):
self.mlp = links.MLPConvolution2D(
3, (96, 96, 96), 11, activation=functions.sigmoid)
self.x = numpy.zeros((10, 3, 20, 20), dtype=numpy.float32)
def test_init(self):
self.assertIs(self.mlp.activation, functions.sigmoid)
self.assertEqual(len(self.mlp), 3)
for i, conv in enumerate(self.mlp):
self.assertIsInstance(conv, links.Convolution2D)
if i == 0:
self.assertEqual(conv.W.data.shape, (96, 3, 11, 11))
else:
self.assertEqual(conv.W.data.shape, (96, 96, 1, 1))
def check_call(self, x_data):
with chainer.using_config('use_cudnn', self.use_cudnn):
x = chainer.Variable(x_data)
actual = self.mlp(x)
act = functions.sigmoid
expect = self.mlp[2](act(self.mlp[1](act(self.mlp[0](x)))))
numpy.testing.assert_array_equal(
cuda.to_cpu(expect.data), cuda.to_cpu(actual.data))
def test_call_cpu(self):
self.check_call(self.x)
@attr.gpu
def test_call_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.mlp.to_gpu()
self.check_call(cuda.to_gpu(self.x))
@testing.parameterize(
{'use_cudnn': 'always'},
{'use_cudnn': 'never'},
)
@attr.cudnn
class TestMLPConvolution2DCudnnCall(unittest.TestCase):
def setUp(self):
self.mlp = links.MLPConvolution2D(
3, (96, 96, 96), 11, activation=functions.sigmoid)
with testing.assert_warns(DeprecationWarning):
self.mlp.to_gpu()
self.x = cuda.cupy.zeros((10, 3, 20, 20), dtype=numpy.float32)
self.gy = cuda.cupy.zeros((10, 96, 10, 10), dtype=numpy.float32)
def forward(self):
x = chainer.Variable(self.x)
return self.mlp(x)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with mock.patch('cupy.cudnn.convolution_forward') as func:
self.forward()
self.assertEqual(func.called,
chainer.should_use_cudnn('>=auto'))
def test_call_cudnn_backrward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
y.grad = self.gy
patch = 'cupy.cudnn.convolution_backward_data'
with mock.patch(patch) as func:
y.backward()
self.assertEqual(func.called,
chainer.should_use_cudnn('>=auto'))
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'never'],
'mlpconv_args': [
((None, (96, 96, 96), 11), {'activation': functions.sigmoid}),
(((96, 96, 96), 11), {'activation': functions.sigmoid})
]
}))
class TestMLPConvolution2DShapePlaceholder(unittest.TestCase):
def setUp(self):
args, kwargs = self.mlpconv_args
self.mlp = links.MLPConvolution2D(*args, **kwargs)
self.x = numpy.zeros((10, 3, 20, 20), dtype=numpy.float32)
def test_init(self):
self.assertIs(self.mlp.activation, functions.sigmoid)
self.assertEqual(len(self.mlp), 3)
def check_call(self, x_data):
with chainer.using_config('use_cudnn', self.use_cudnn):
x = chainer.Variable(x_data)
actual = self.mlp(x)
act = functions.sigmoid
expect = self.mlp[2](act(self.mlp[1](act(self.mlp[0](x)))))
numpy.testing.assert_array_equal(
cuda.to_cpu(expect.data), cuda.to_cpu(actual.data))
for i, conv in enumerate(self.mlp):
self.assertIsInstance(conv, links.Convolution2D)
if i == 0:
self.assertEqual(conv.W.data.shape, (96, 3, 11, 11))
else:
self.assertEqual(conv.W.data.shape, (96, 96, 1, 1))
def test_call_cpu(self):
self.check_call(self.x)
@attr.gpu
def test_call_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.mlp.to_gpu()
self.check_call(cuda.to_gpu(self.x))
class TestInitArgumentForv2(unittest.TestCase):
in_channels = 10
out_channels = (15, 20)
ksize = 3
stride = 1
pad = 0
def test_valid_instantiation_ksize_is_not_none(self):
l = links.MLPConvolution2D(
self.in_channels, self.out_channels, self.ksize, self.stride,
self.pad, functions.relu, conv_init=None, bias_init=None)
self.assertEqual(len(l), 2)
self.assertEqual(l[0].W.shape,
(self.out_channels[0], self.in_channels,
self.ksize, self.ksize))
self.assertEqual(l[1].W.shape,
(self.out_channels[1], self.out_channels[0], 1, 1))
def test_valid_instantiation_ksize_is_none(self):
l = links.MLPConvolution2D(self.out_channels, self.ksize, None,
self.stride, self.pad, functions.relu,
conv_init=None, bias_init=None)
x = numpy.random.uniform(
-1, 1, (10, self.in_channels, 10, 10)).astype(numpy.float32)
l(x) # create weight tensors of convolutions by initialization
self.assertEqual(len(l), 2)
self.assertEqual(l[0].W.shape,
(self.out_channels[0], self.in_channels,
self.ksize, self.ksize))
self.assertEqual(l[1].W.shape,
(self.out_channels[1], self.out_channels[0], 1, 1))
def test_valid_instantiation_in_channels_is_omitted(self):
l = links.MLPConvolution2D(
self.out_channels, self.ksize, stride=self.stride, pad=self.pad,
activation=functions.relu, conv_init=None, bias_init=None)
x = numpy.random.uniform(
-1, 1, (10, self.in_channels, 10, 10)).astype(numpy.float32)
l(x) # create weight tensors of convolutions by initialization
self.assertEqual(len(l), 2)
self.assertEqual(l[0].W.shape,
(self.out_channels[0], self.in_channels,
self.ksize, self.ksize))
self.assertEqual(l[1].W.shape,
(self.out_channels[1], self.out_channels[0], 1, 1))
def test_forbid_wscale_as_a_positional_argument(self):
with self.assertRaises(TypeError):
# 7th positional argument was wscale in v1
links.MLPConvolution2D(self.in_channels, self.out_channels, None,
self.stride, self.pad, functions.relu, 1)
def test_forbid_wscale_as_a_keyword_argument(self):
with self.assertRaises(ValueError):
links.MLPConvolution2D(
self.in_channels, self.out_channels, wscale=1)
testing.run_module(__name__, __file__)
| 7,175
| 35.426396
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/test_deformable_convolution_2d.py
|
import unittest
import numpy
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer import utils
@testing.parameterize(*testing.product({
'nobias': [True, False],
'initialization': ['explicit', 'placeholder']
}))
class TestDeformableConvolution2D(unittest.TestCase):
def setUp(self):
in_channels = 3
out_channels = 2
batch_size = 2
h = 9
w = 9
ksize = 3
stride = 2
pad = 1
out_h = utils.conv.get_conv_outsize(h, ksize, stride, pad)
out_w = utils.conv.get_conv_outsize(w, ksize, stride, pad)
self.x = numpy.random.uniform(
-1, 1, (batch_size, in_channels, h, w)).astype(numpy.float32)
self.gy = numpy.random.uniform(
-1, 1,
(batch_size, out_channels, out_h, out_w)).astype(numpy.float32)
if self.initialization == 'explicit':
pass
elif self.initialization == 'placeholder':
in_channels = None
self.link = links.DeformableConvolution2D(
in_channels, out_channels, ksize, stride=stride, pad=pad,
offset_nobias=self.nobias, deform_nobias=self.nobias)
def check_backward(self, x_data, y_grad):
if self.nobias:
params = (self.link.deform_conv.W)
else:
params = (self.link.deform_conv.W, self.link.deform_conv.b)
gradient_check.check_backward(
self.link, x_data,
y_grad, params,
eps=2 ** -3, atol=1e-3, rtol=1e-3)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x),
cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| 2,074
| 27.819444
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/test_scale.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(
{'learn_W': True, 'bias_term': False, 'bias_shape': None},
{'learn_W': True, 'bias_term': True, 'bias_shape': None},
{'learn_W': False, 'bias_term': False, 'bias_shape': None},
{'learn_W': False, 'bias_term': True, 'bias_shape': (2,)}
)
class TestScale(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
self.W = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
self.b = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
self.y_expected = numpy.copy(self.x)
for i, j, k in numpy.ndindex(self.y_expected.shape):
self.y_expected[i, j, k] *= self.W[j]
if self.bias_term:
self.y_expected[i, j, k] += self.b[j]
self.gy = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
bias_term = self.bias_term
bias_shape = self.bias_shape
axis = 1
if self.learn_W:
self.link = links.Scale(
axis, self.W.shape, bias_term, bias_shape)
self.link.W.data = self.W
if bias_term:
self.link.bias.b.data = self.b
else:
self.link = links.Scale(
axis, None, bias_term, bias_shape)
if bias_term:
self.link.bias.b.data = self.b
self.link.cleargrads()
def test_attribute_presence(self):
self.assertEqual(self.learn_W, hasattr(self.link, 'W'))
self.assertEqual(self.bias_term, hasattr(self.link, 'bias'))
def check_forward(self, x_data, W_data, y_expected):
x = chainer.Variable(x_data)
if W_data is None:
y = self.link(x)
testing.assert_allclose(y_expected, y.data)
else:
W = chainer.Variable(W_data)
y = self.link(x, W)
testing.assert_allclose(y_expected, y.data)
def test_forward_cpu(self):
if self.learn_W:
W = None
else:
W = self.W
self.check_forward(self.x, W, self.y_expected)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
if self.learn_W:
W = None
else:
W = cuda.to_gpu(self.W)
self.check_forward(x, W, self.y_expected)
def check_backward(self, x_data, W_data, y_grad):
if W_data is None:
params = [self.link.W]
gradient_check.check_backward(
self.link, x_data, y_grad, params, atol=1e-2)
else:
gradient_check.check_backward(
self.link, (x_data, W_data), y_grad, atol=1e-2)
@condition.retry(3)
def test_backward_cpu(self):
if self.learn_W:
W = None
else:
W = self.W
self.check_backward(self.x, W, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
if self.learn_W:
W = None
else:
W = cuda.to_gpu(self.W)
gy = cuda.to_gpu(self.gy)
self.check_backward(x, W, gy)
class TestScaleInvalidArgc(unittest.TestCase):
def setUp(self):
x_data = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
W_data = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
self.axis = 1
self.x = chainer.Variable(x_data)
self.W = chainer.Variable(W_data)
def test_scale_invalid_argc1(self):
func = links.Scale(self.axis, self.W.data.shape)
with chainer.using_config('debug', True):
with self.assertRaises(AssertionError):
func(self.x, self.W)
def test_scale_invalid_argc2(self):
func = links.Scale(self.axis, None)
with chainer.using_config('debug', True):
with self.assertRaises(AssertionError):
func(self.x)
class TestScaleNoBiasShape(unittest.TestCase):
def test_scale_no_bias_shape(self):
axis = 1
with self.assertRaises(ValueError):
links.Scale(axis, None, True, None)
testing.run_module(__name__, __file__)
| 4,561
| 30.902098
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/test_bilinear.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
def _check_forward(e1, e2, f, y_expect):
e1 = chainer.Variable(e1)
e2 = chainer.Variable(e2)
y = f(e1, e2)
testing.assert_allclose(y_expect, y.data)
def _check_backward(e1, e2, y_grad, link, bias):
params = [link.W]
if bias:
params.append(link.b)
gradient_check.check_backward(
link, (e1, e2), y_grad, params, eps=1e-2, rtol=1e-3)
def _batch_to_gpu(*xs):
return tuple(cuda.to_gpu(x) for x in xs)
def _uniform(*shape):
return numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
def _as_mat(x):
return x.reshape(len(x), -1)
class TestBilinear(unittest.TestCase):
in_shape = (3, 4)
out_size = 4
batch_size = 10
def setUp(self):
self.f = links.Bilinear(
self.in_shape[0], self.in_shape[1], self.out_size)
self.f.W.data[...] = _uniform(*self.f.W.data.shape)
self.f.V1.data[...] = _uniform(*self.f.V1.data.shape)
self.f.V2.data[...] = _uniform(*self.f.V2.data.shape)
self.f.b.data[...] = _uniform(*self.f.b.data.shape)
self.f.cleargrads()
self.W = self.f.W.data.copy()
self.V1 = self.f.V1.data.copy()
self.V2 = self.f.V2.data.copy()
self.b = self.f.b.data.copy()
self.e1 = _uniform(self.batch_size, self.in_shape[0])
self.e2 = _uniform(self.batch_size, self.in_shape[1])
self.gy = _uniform(self.batch_size, self.out_size)
self.y = (
numpy.einsum('ij,ik,jkl->il', self.e1, self.e2, self.W) +
self.e1.dot(self.V1) + self.e2.dot(self.V2) + self.b)
@condition.retry(3)
def test_forward_cpu(self):
_check_forward(self.e1, self.e2, self.f, self.y)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.f.to_gpu()
_check_forward(cuda.to_gpu(self.e1),
cuda.to_gpu(self.e2),
self.f, self.y)
@condition.retry(3)
def test_backward_cpu(self):
_check_backward(self.e1, self.e2, self.gy, self.f, True)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.f.to_gpu()
_check_backward(cuda.to_gpu(self.e1),
cuda.to_gpu(self.e2),
cuda.to_gpu(self.gy),
self.f, True)
class TestBilinear2(TestBilinear):
def setUp(self):
super(TestBilinear2, self).setUp()
assert self.in_shape[1] % 2 == 0
self.e1 = _uniform(self.batch_size, 1, self.in_shape[0])
self.e2 = _uniform(self.batch_size, self.in_shape[1] // 2, 2)
self.gy = _uniform(self.batch_size, self.out_size)
e1 = _as_mat(self.e1)
e2 = _as_mat(self.e2)
self.y = (
numpy.einsum('ij,ik,jkl->il', e1, e2, self.W) +
e1.dot(self.V1) + e2.dot(self.V2) + self.b)
class TestBilinear3(TestBilinear):
out_size = 1
class TestBilinear4(TestBilinear):
in_shape = (1, 2)
class TestBilinear5(TestBilinear):
in_shape = (2, 1)
class TestBilinear6(TestBilinear):
in_shape = (1, 1)
class TestBilinear7(TestBilinear):
in_shape = (1, 2)
out_size = 1
class TestBilinear8(TestBilinear):
in_shape = (2, 1)
out_size = 1
class TestBilinear9(TestBilinear):
in_shape = (1, 1)
out_size = 1
class TestBilinearWOBias(TestBilinear):
def setUp(self):
self.f = links.Bilinear(
self.in_shape[0], self.in_shape[1], self.out_size, True)
W = self.f.W.data
W[...] = numpy.random.uniform(-1, 1, W.shape)
self.f.cleargrads()
self.W = W.copy()
self.e1 = _uniform(self.batch_size, self.in_shape[0])
self.e2 = _uniform(self.batch_size, self.in_shape[1])
self.gy = _uniform(self.batch_size, self.out_size)
self.y = numpy.einsum('ij,ik,jkl->il', self.e1, self.e2, self.W)
@condition.retry(3)
def test_backward_cpu(self):
_check_backward(self.e1, self.e2, self.gy, self.f, False)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.f.to_gpu()
_check_backward(cuda.to_gpu(self.e1), cuda.to_gpu(self.e2),
cuda.to_gpu(self.gy), self.f, False)
class TestBilinearWOBias2(TestBilinearWOBias):
def setUp(self):
super(TestBilinearWOBias2, self).setUp()
assert self.in_shape[1] % 2 == 0
self.e1 = _uniform(self.batch_size, 1, self.in_shape[0])
self.e2 = _uniform(self.batch_size, 2, self.in_shape[1] // 2)
self.gy = _uniform(self.batch_size, self.out_size)
self.y = numpy.einsum(
'ij,ik,jkl->il', _as_mat(self.e1), _as_mat(self.e2), self.W)
class TestBilinearWOBias3(TestBilinearWOBias):
out_size = 1
class TestBilinearWOBias4(TestBilinearWOBias):
in_shape = (1, 2)
class TestBilinearWOBias5(TestBilinearWOBias):
in_shape = (2, 1)
class TestBilinearWOBias6(TestBilinearWOBias):
in_shape = (1, 1)
class TestBilinearWOBias7(TestBilinearWOBias):
in_shape = (1, 2)
out_size = 1
class TestBilinearWOBias8(TestBilinearWOBias):
in_shape = (2, 1)
out_size = 1
class TestBilinearWOBias9(TestBilinearWOBias):
in_shape = (1, 1)
out_size = 1
class InitByInitialParameter(unittest.TestCase):
in_shape = (2, 3)
out_size = 4
batch_size = 10
def setUp(self):
self.W = _uniform(self.in_shape[0], self.in_shape[1], self.out_size)
self.V1 = _uniform(self.in_shape[0], self.out_size)
self.V2 = _uniform(self.in_shape[1], self.out_size)
self.b = _uniform(self.out_size,)
class NormalInitialParameter(InitByInitialParameter):
def check_normal(self, initialW, initial_bias, nobias):
links.Bilinear(
self.in_shape[0], self.in_shape[1], self.out_size, nobias,
initialW, initial_bias)
def test_normal_cpu_bias(self):
self.check_normal(self.W, (self.V1, self.V2, self.b), False)
class InvalidInitialParameter(InitByInitialParameter):
def setUp(self):
super(InvalidInitialParameter, self).setUp()
self.invalidW = _uniform(self.in_shape[0] + 1, self.in_shape[1],
self.out_size)
self.invalidV1 = _uniform(self.in_shape[0] + 1, self.out_size)
self.invalidV2 = _uniform(self.in_shape[1] + 1, self.out_size)
self.invalidb = _uniform(self.out_size + 1,)
def check_invalid(self, initialW, initial_bias, nobias):
with self.assertRaises(AssertionError):
links.Bilinear(
self.in_shape[0], self.in_shape[1], self.out_size, nobias,
initialW, initial_bias)
def test_invalidW_cpu(self):
self.check_invalid(self.invalidW, (self.V1, self.V2, self.b), False)
def test_invalidW_cpu2(self):
self.check_invalid(self.invalidW, None, True)
def test_invalidV1_cpu(self):
self.check_invalid(self.W, (self.invalidV1, self.V2, self.b), False)
def test_invalidV2_cpu(self):
self.check_invalid(self.W, (self.V1, self.invalidV2, self.b), False)
def test_invalidb_cpu(self):
self.check_invalid(self.W, (self.V1, self.V2, self.invalidb), False)
testing.run_module(__name__, __file__)
| 7,655
| 25.129693
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/test_depthwise_convolution_2d.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(*testing.product({
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
'nobias': [True, False],
}))
class TestDepthwiseConvolution2D(unittest.TestCase):
def setUp(self):
self.link = links.DepthwiseConvolution2D(
3, 2, 3, stride=2, pad=1,
initialW=chainer.initializers.Normal(1, self.W_dtype),
initial_bias=chainer.initializers.Normal(1, self.x_dtype))
self.link.cleargrads()
self.x = numpy.random.uniform(-1, 1,
(2, 3, 4, 3)).astype(self.x_dtype)
self.gy = numpy.random.uniform(-1, 1,
(2, 6, 2, 2)).astype(self.x_dtype)
self.check_backward_options = {}
if self.x_dtype == numpy.float16 or self.W_dtype == numpy.float16:
self.check_backward_options = {'atol': 3e-2, 'rtol': 5e-2}
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
self.link, x_data, y_grad, (self.link.W, self.link.b), eps=2 ** -3,
**self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
class TestDepthWiseConvolution2DParameterShapePlaceholder(unittest.TestCase):
def setUp(self):
in_channels = None
self.link = links.DepthwiseConvolution2D(in_channels, 2, 3,
stride=2, pad=1)
self.x = numpy.random.uniform(-1, 1,
(2, 3, 4, 3)).astype(numpy.float32)
self.link(chainer.Variable(self.x))
b = self.link.b.data
b[...] = numpy.random.uniform(-1, 1, b.shape)
self.link.cleargrads()
self.gy = numpy.random.uniform(-1, 1,
(2, 6, 2, 2)).astype(numpy.float32)
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
self.link, x_data, y_grad, (self.link.W, self.link.b), eps=1e-2)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| 2,938
| 33.576471
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/test_zoneoutlstm.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
def _sigmoid(x):
xp = backend.get_array_module(x)
half = x.dtype.type(0.5)
return xp.tanh(x * half) * half + half
def _zoneoutlstm(func, c, h, x, c_creator, h_creator):
device = backend.get_device_from_array(x)
with chainer.using_device(device):
xp = device.xp
lstm_in = x.dot(func.upward.W.data.T)
lstm_in += h.dot(func.lateral.W.data.T)
lstm_in = xp.reshape(lstm_in, (len(lstm_in),
lstm_in.shape[1] // 4,
4))
a, i, f, o = xp.split(lstm_in, 4, 2)
a = xp.reshape(a, (len(a), a.shape[1]))
i = xp.reshape(i, (len(i), i.shape[1]))
f = xp.reshape(f, (len(f), f.shape[1]))
o = xp.reshape(o, (len(o), o.shape[1]))
c_tmp = xp.tanh(a) * _sigmoid(i) + _sigmoid(f) * c
c_next = c * c_creator.flag_h + c_tmp * c_creator.flag_x
h_next = h * h_creator.flag_h + \
(_sigmoid(o) * xp.tanh(c_tmp)) * h_creator.flag_x
return c_next, h_next
@testing.parameterize(
{'in_size': 10, 'out_size': 10, 'c_ratio': 0.5, 'h_ratio': 0.25},
{'in_size': 10, 'out_size': 40, 'c_ratio': 0.25, 'h_ratio': 0.5},
{'in_size': 10, 'out_size': 10, 'c_ratio': 0.3, 'h_ratio': 0.3},
{'in_size': 10, 'out_size': 10, 'c_ratio': 1.0, 'h_ratio': 1.0},
{'in_size': 10, 'out_size': 40, 'c_ratio': 0.0, 'h_ratio': 0.0},
)
class TestZoneoutlstm(unittest.TestCase):
def setUp(self):
self.link = links.StatefulZoneoutLSTM(self.in_size, self.out_size,
c_ratio=self.c_ratio,
h_ratio=self.h_ratio)
upward = self.link.upward.W.data
upward[...] = numpy.random.uniform(-1, 1, upward.shape)
lateral = self.link.lateral.W.data
lateral[...] = numpy.random.uniform(-1, 1, lateral.shape)
c_shape = (4, self.out_size)
h_shape = (4, self.out_size)
x_shape = (4, self.in_size)
gy_shape = (4, self.out_size)
self.c = numpy.zeros(c_shape).astype(numpy.float32)
self.h = numpy.zeros(h_shape).astype(numpy.float32)
self.x = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(numpy.float32)
def _forward(self, link, x):
return link(x)
def check_forward(self, c_data, h_data, x_data):
x = chainer.Variable(x_data)
h1 = self.link(x)
c1 = self.link.c
c1_expect, h1_expect = _zoneoutlstm(self.link, c_data, h_data,
x_data, c1.creator, h1.creator)
testing.assert_allclose(h1.data, h1_expect)
testing.assert_allclose(self.link.c.data, c1_expect)
testing.assert_allclose(self.link.h.data, h1_expect)
h2 = self.link(x)
c2 = self.link.c
c2_expect, h2_expect = _zoneoutlstm(self.link, c1_expect, h1_expect,
x_data, c2.creator, h2.creator)
testing.assert_allclose(h2.data, h2_expect)
testing.assert_allclose(self.link.c.data, c2_expect)
testing.assert_allclose(self.link.h.data, h2_expect)
def test_forward_cpu(self):
self.check_forward(self.c, self.h, self.x)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.c),
cuda.to_gpu(self.h),
cuda.to_gpu(self.x))
@attr.multi_gpu(2)
def test_forward_gpu_multi(self):
with cuda.get_device_from_id(0):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
c = cuda.to_gpu(self.c)
h = cuda.to_gpu(self.h)
x = cuda.to_gpu(self.x)
with cuda.get_device_from_id(1):
self.check_forward(c, h, x)
def check_backward(self, c_data, h_data, x_data, y_grad):
x = chainer.Variable(x_data)
y = self._forward(self.link, x)
c = self.link.c
d = {'c_creator': c.creator, 'y_creator': y.creator}
y.grad = y_grad
y.backward()
def f():
c_creator = d['c_creator']
y_creator = d['y_creator']
c, y = _zoneoutlstm(self.link, c_data, h_data,
x_data, c_creator, y_creator)
return y,
gx, = gradient_check.numerical_grad(f, (x.data,), (y_grad,))
testing.assert_allclose(gx, x.grad, atol=1e-3)
def test_backward_cpu(self):
self.check_backward(self.c, self.h, self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.c),
cuda.to_gpu(self.h),
cuda.to_gpu(self.x),
cuda.to_gpu(self.gy))
class TestZoneoutState(unittest.TestCase):
def setUp(self):
in_size, out_size = 10, 8
self.link = links.StatefulZoneoutLSTM(in_size, out_size)
def check_reset_state(self):
self.link.reset_state()
self.assertIsNone(self.link.c)
self.assertIsNone(self.link.h)
def test_reset_state_cpu(self):
self.check_reset_state()
@attr.gpu
def test_reset_state_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_reset_state()
class TestZoneoutToCPUToGPU(unittest.TestCase):
def setUp(self):
in_size, out_size = 10, 8
self.link = links.StatefulZoneoutLSTM(in_size, out_size)
self.c = chainer.Variable(
numpy.random.uniform(-1, 1, (1, out_size)).astype(numpy.float32))
self.h = chainer.Variable(
numpy.random.uniform(-1, 1, (1, out_size)).astype(numpy.float32))
def check_to_cpu(self, c, h):
self.link.c = c
self.link.h = h
with testing.assert_warns(DeprecationWarning):
self.link.to_cpu()
self.assertIs(self.link.xp, numpy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
with testing.assert_warns(DeprecationWarning):
self.link.to_cpu()
self.assertIs(self.link.xp, numpy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
def test_to_cpu_cpu(self):
self.check_to_cpu(self.c, self.h)
@attr.gpu
def test_to_cpu_gpu(self):
self.c.to_gpu()
self.h.to_gpu()
self.check_to_cpu(self.c, self.h)
def check_to_cpu_to_gpu(self, c, h):
self.link.c = c
self.link.h = h
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.assertIs(self.link.xp, cuda.cupy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.assertIs(self.link.xp, cuda.cupy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
with testing.assert_warns(DeprecationWarning):
self.link.to_cpu()
self.assertIs(self.link.xp, numpy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.assertIs(self.link.xp, cuda.cupy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
@attr.gpu
def test_to_cpu_to_gpu_cpu(self):
self.check_to_cpu_to_gpu(self.c, self.h)
@attr.gpu
def test_to_cpu_to_gpu_gpu(self):
self.c.to_gpu()
self.h.to_gpu()
self.check_to_cpu_to_gpu(self.c, self.h)
testing.run_module(__name__, __file__)
| 8,509
| 35.367521
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/test_dilated_convolution_2d.py
|
import unittest
import numpy
import six.moves.cPickle as pickle
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.utils import conv
class TestDilatedConvolution2D(unittest.TestCase):
def setUp(self):
self.link = links.DilatedConvolution2D(
3, 2, 3, stride=2, pad=2, dilate=2)
b = self.link.b.data
b[...] = numpy.random.uniform(-1, 1, b.shape)
self.link.cleargrads()
self.x = numpy.random.uniform(-1, 1,
(2, 3, 4, 3)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1,
(2, 2, 2, 2)).astype(numpy.float32)
@attr.gpu
def test_im2col_consistency(self):
col_cpu = conv.im2col_cpu(self.x, 3, 3, 2, 2, 2, 2, dy=2, dx=2)
col_gpu = conv.im2col_gpu(
cuda.to_gpu(self.x), 3, 3, 2, 2, 2, 2, dy=2, dx=2)
testing.assert_allclose(col_cpu, col_gpu.get(), atol=0, rtol=0)
@attr.gpu
def test_col2im_consistency(self):
col = conv.im2col_cpu(self.x, 3, 3, 2, 2, 2, 2, dy=2, dx=2)
h, w = self.x.shape[2:]
im_cpu = conv.col2im_cpu(col, 2, 2, 2, 2, h, w, dy=2, dx=2)
im_gpu = conv.col2im_gpu(
cuda.to_gpu(col), 2, 2, 2, 2, h, w, dy=2, dx=2)
testing.assert_allclose(im_cpu, im_gpu.get())
def check_forward_consistency(self):
x_cpu = chainer.Variable(self.x)
y_cpu = self.link(x_cpu)
self.assertEqual(y_cpu.data.dtype, numpy.float32)
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
y_gpu = self.link(x_gpu)
self.assertEqual(y_gpu.data.dtype, numpy.float32)
testing.assert_allclose(y_cpu.data, y_gpu.data.get())
@attr.gpu
def test_forward_consistency(self):
self.check_forward_consistency()
@attr.gpu
def test_forward_consistency_im2col(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_forward_consistency()
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
self.link, x_data, y_grad, (self.link.W, self.link.b), eps=1e-2,
atol=5e-5, rtol=5e-4)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
def test_backward_gpu_im2col(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
with chainer.using_config('use_cudnn', 'never'):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_pickling(self, x_data):
x = chainer.Variable(x_data)
y = self.link(x)
y_data1 = y.data
del x, y
pickled = pickle.dumps(self.link, -1)
del self.link
self.link = pickle.loads(pickled)
x = chainer.Variable(x_data)
y = self.link(x)
y_data2 = y.data
testing.assert_allclose(y_data1, y_data2, atol=0, rtol=0)
def test_pickling_cpu(self):
self.check_pickling(self.x)
@attr.gpu
def test_pickling_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_pickling(cuda.to_gpu(self.x))
@testing.parameterize(
{'args': (2, 3), 'kwargs': {'stride': 2, 'pad': 2, 'dilate': 2}},
{'args': (None, 2, 3), 'kwargs': {'stride': 2, 'pad': 2, 'dilate': 2}})
class TestDilatedConvolution2DParameterShapePlaceholder(unittest.TestCase):
def setUp(self):
self.link = links.DilatedConvolution2D(*self.args, **self.kwargs)
self.x = numpy.random.uniform(-1, 1,
(2, 3, 4, 3)).astype(numpy.float32)
self.link(chainer.Variable(self.x))
b = self.link.b.data
b[...] = numpy.random.uniform(-1, 1, b.shape)
self.link.cleargrads()
self.gy = numpy.random.uniform(-1, 1,
(2, 2, 2, 2)).astype(numpy.float32)
@attr.gpu
def test_im2col_consistency(self):
col_cpu = conv.im2col_cpu(self.x, 3, 3, 2, 2, 2, 2, dy=2, dx=2)
col_gpu = conv.im2col_gpu(
cuda.to_gpu(self.x), 3, 3, 2, 2, 2, 2, dy=2, dx=2)
testing.assert_allclose(col_cpu, col_gpu.get(), atol=0, rtol=0)
@attr.gpu
def test_col2im_consistency(self):
col = conv.im2col_cpu(self.x, 3, 3, 2, 2, 2, 2, dy=2, dx=2)
h, w = self.x.shape[2:]
im_cpu = conv.col2im_cpu(col, 2, 2, 2, 2, h, w, dy=2, dx=2)
im_gpu = conv.col2im_gpu(
cuda.to_gpu(col), 2, 2, 2, 2, h, w, dy=2, dx=2)
testing.assert_allclose(im_cpu, im_gpu.get())
def check_forward_consistency(self):
x_cpu = chainer.Variable(self.x)
y_cpu = self.link(x_cpu)
self.assertEqual(y_cpu.data.dtype, numpy.float32)
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
y_gpu = self.link(x_gpu)
self.assertEqual(y_gpu.data.dtype, numpy.float32)
testing.assert_allclose(y_cpu.data, y_gpu.data.get())
@attr.gpu
def test_forward_consistency(self):
self.check_forward_consistency()
@attr.gpu
def test_forward_consistency_im2col(self):
with chainer.using_config('use_cudnn', 'never'):
self.check_forward_consistency()
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
self.link, x_data, y_grad, (self.link.W, self.link.b), eps=1e-2,
atol=5e-5, rtol=5e-4)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
def test_backward_gpu_im2col(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
with chainer.using_config('use_cudnn', 'never'):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_pickling(self, x_data):
x = chainer.Variable(x_data)
y = self.link(x)
y_data1 = y.data
del x, y
pickled = pickle.dumps(self.link, -1)
del self.link
self.link = pickle.loads(pickled)
x = chainer.Variable(x_data)
y = self.link(x)
y_data2 = y.data
testing.assert_allclose(y_data1, y_data2, atol=0, rtol=0)
def test_pickling_cpu(self):
self.check_pickling(self.x)
@attr.gpu
def test_pickling_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_pickling(cuda.to_gpu(self.x))
testing.run_module(__name__, __file__)
| 7,200
| 32.03211
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/test_highway.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
class TestHighway(unittest.TestCase):
in_out_size = 3
def setUp(self):
self.x = numpy.random.uniform(
-1, 1, (5, self.in_out_size)).astype(numpy.float32)
self.gy = numpy.random.uniform(
-1, 1, (5, self.in_out_size)).astype(numpy.float32)
self.link = links.Highway(
self.in_out_size, activate=functions.tanh)
Wh = self.link.plain.W.data
Wh[...] = numpy.random.uniform(-1, 1, Wh.shape)
bh = self.link.plain.b.data
bh[...] = numpy.random.uniform(-1, 1, bh.shape)
Wt = self.link.transform.W.data
Wt[...] = numpy.random.uniform(-1, 1, Wt.shape)
bt = self.link.transform.b.data
bt[...] = numpy.random.uniform(-1, 1, bt.shape)
self.link.cleargrads()
self.Wh = Wh.copy() # fixed on CPU
self.bh = bh.copy() # fixed on CPU
self.Wt = Wt.copy() # fixed on CPU
self.bt = bt.copy() # fixed on CPU
a = numpy.tanh(self.x.dot(Wh.T) + bh)
b = self.sigmoid(self.x.dot(Wt.T) + bt)
self.y = (a * b +
self.x * (numpy.ones_like(self.x) - b))
def sigmoid(self, x):
half = x.dtype.type(0.5)
return numpy.tanh(x * half) * half + half
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = self.link(x)
self.assertEqual(y.data.dtype, numpy.float32)
testing.assert_allclose(self.y, y.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
self.link, x_data, y_grad,
(self.link.plain.W, self.link.plain.b,
self.link.transform.W, self.link.transform.b),
eps=1e-2, atol=3.2e-3, rtol=1e-2)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| 2,545
| 29.309524
| 70
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/test_embed_id.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(
{'x_data': [0, 1, 0], 'ignore_label': None},
{'x_data': [[0, 1, 0], [1, 0, 1]], 'ignore_label': None},
{'x_data': [0, 1, -1], 'ignore_label': -1},
{'x_data': [[0, 1, -1], [-1, 0, 1]], 'ignore_label': -1},
)
class TestEmbedID(unittest.TestCase):
def setUp(self):
self.link = links.EmbedID(3, 2, ignore_label=self.ignore_label)
self.link.ignore_label
self.link.cleargrads()
self.W = self.link.W.data.copy() # fixed on CPU
self.x = numpy.array(self.x_data, dtype=numpy.int32)
y_shape = self.x.shape + (2,)
self.gy = numpy.random.uniform(-1, 1, y_shape).astype(numpy.float32)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = self.link(x)
self.assertEqual(y.data.dtype, numpy.float32)
y_expect = numpy.empty_like(self.gy)
for i in numpy.ndindex(self.x.shape):
if self.x[i] == -1:
y_expect[i] = 0
else:
y_expect[i] = self.W[int(self.x[i])]
testing.assert_allclose(y_expect, y.data, atol=0, rtol=0)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
@attr.gpu
def test_forward_mixed_cpu_gpu_1(self):
# self.link is not sent to gpu
with self.assertRaises(TypeError):
self.check_forward(cuda.to_gpu(self.x))
@attr.gpu
def test_forward_mixed_cpu_gpu_2(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
with self.assertRaises(TypeError):
# self.x is not sent to gpu
self.check_forward(self.x)
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
self.link, x_data, y_grad, self.link.W, atol=1e-4, rtol=1e-3)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@testing.parameterize(
{'t_value': -1, 'valid': False, 'ignore_label': None},
{'t_value': 3, 'valid': False, 'ignore_label': None},
{'t_value': 0, 'valid': True, 'ignore_label': None},
{'t_value': -1, 'valid': True, 'ignore_label': -1},
{'t_value': 3, 'valid': False, 'ignore_label': -1},
{'t_value': 0, 'valid': True, 'ignore_label': -1},
)
class TestEmbedIDValueCheck(unittest.TestCase):
def setUp(self):
self.link = links.EmbedID(2, 2, ignore_label=self.ignore_label)
self.t = numpy.array([self.t_value], dtype=numpy.int32)
self.original_debug = chainer.is_debug()
chainer.set_debug(True)
def tearDown(self):
chainer.set_debug(self.original_debug)
def check_value_check(self, t_data):
t = chainer.Variable(t_data)
if self.valid:
# Check if it throws nothing
self.link(t)
else:
with self.assertRaises(ValueError):
self.link(t)
def test_value_check_cpu(self):
self.check_value_check(self.t)
@attr.gpu
def test_value_check_gpu(self):
self.check_value_check(self.t)
class TestEmbedIDUnpickleOldFile(unittest.TestCase):
def test_old_unpickle(self):
embed = links.EmbedID(3, 4)
# To emulate an old pickled file
delattr(embed, 'ignore_label')
x = chainer.Variable(numpy.arange(2, dtype=numpy.int32))
y = embed(x)
self.assertEqual(y.data.shape, (2, 4))
class TestEmbedIDFromParams(unittest.TestCase):
def setUp(self):
self.in_size, self.out_size = 10, 5
def test_from_params(self):
link1 = links.EmbedID(self.in_size, self.out_size)
link2 = links.EmbedID.from_params(link1.W)
assert link2.W.shape == link1.W.shape
testing.run_module(__name__, __file__)
| 4,458
| 29.128378
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/test_linear.py
|
import os
import tempfile
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer.serializers import npz
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import type_check
@testing.parameterize(*testing.product({
'in_shape': [(3,), (3, 2, 2)],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestLinear(unittest.TestCase):
out_size = 2
def setUp(self):
in_size = numpy.prod(self.in_shape)
self.link = links.Linear(
in_size, self.out_size,
initialW=chainer.initializers.Normal(1, self.W_dtype),
initial_bias=chainer.initializers.Normal(1, self.x_dtype))
W = self.link.W.data
b = self.link.b.data
self.link.cleargrads()
x_shape = (4,) + self.in_shape
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.x_dtype)
self.gy = numpy.random.uniform(
-1, 1, (4, self.out_size)).astype(self.x_dtype)
self.y = self.x.reshape(4, -1).dot(W.T) + b
self.check_forward_options = {}
self.check_backward_options = {}
if self.x_dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_backward_options = {'atol': 1e-2, 'rtol': 5e-2}
elif self.W_dtype == numpy.float16:
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = self.link(x)
self.assertEqual(y.data.dtype, self.x_dtype)
testing.assert_allclose(self.y, y.data, **self.check_forward_options)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
self.link, x_data, y_grad, (self.link.W, self.link.b),
dtype='d', **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@testing.parameterize(*testing.product({
'input_variable': [True, False],
'linear_args': [(None, 2), (2,)],
}))
class TestLinearParameterShapePlaceholder(unittest.TestCase):
in_size = 3
in_shape = (in_size,)
def setUp(self):
self.link = links.Linear(*self.linear_args)
self.out_size = self.linear_args[-1]
temp_x = numpy.random.uniform(
-1, 1, (self.out_size, self.in_size)).astype(numpy.float32)
if self.input_variable:
self.link(chainer.Variable(temp_x))
else:
self.link(temp_x)
W = self.link.W.data
W[...] = numpy.random.uniform(-1, 1, W.shape)
b = self.link.b.data
b[...] = numpy.random.uniform(-1, 1, b.shape)
self.link.cleargrads()
x_shape = (4,) + self.in_shape
self.x = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
self.gy = numpy.random.uniform(
-1, 1, (4, self.out_size)).astype(numpy.float32)
self.y = self.x.reshape(4, -1).dot(W.T) + b
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = self.link(x)
self.assertEqual(y.data.dtype, numpy.float32)
testing.assert_allclose(self.y, y.data)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
self.link, x_data, y_grad, (self.link.W, self.link.b), eps=1e-2)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def test_serialization(self):
lin1 = links.Linear(self.out_size)
x = chainer.Variable(self.x)
# Must call the link to initialize weights.
lin1(x)
w1 = lin1.W.data
fd, temp_file_path = tempfile.mkstemp()
os.close(fd)
npz.save_npz(temp_file_path, lin1)
lin2 = links.Linear(self.out_size)
npz.load_npz(temp_file_path, lin2)
w2 = lin2.W.data
self.assertEqual((w1 == w2).all(), True)
class TestEmptyBatchInitialize(unittest.TestCase):
def setUp(self):
self.link = links.Linear(4)
self.x = numpy.random.uniform(-1, 1, (0, 3)).astype(numpy.float32)
def test_empty_batch_dim(self):
y = self.link(chainer.Variable(self.x))
assert y.shape == (0, 4)
class TestNBatchAxesInitialize(unittest.TestCase):
def setUp(self):
self.link = links.Linear(4)
self.x = numpy.random.uniform(-1, 1, (2, 5, 3)).astype(numpy.float32)
def test_init_n_batch_axes(self):
y = self.link(chainer.Variable(self.x), n_batch_axes=2)
assert y.shape == (2, 5, 4)
class TestInvalidLinear(unittest.TestCase):
def setUp(self):
self.link = links.Linear(3, 2)
self.x = numpy.random.uniform(-1, 1, (4, 1, 2)).astype(numpy.float32)
def test_invalid_size(self):
with self.assertRaises(type_check.InvalidType):
self.link(chainer.Variable(self.x))
@testing.parameterize(*testing.product({
'nobias': [True, False],
}))
class TestLinearFromParams(unittest.TestCase):
def setUp(self):
self.out_size = 10
self.in_size = 5
def test_from_params(self):
link1 = links.Linear(
self.in_size, self.out_size, nobias=self.nobias)
link2 = links.Linear.from_params(link1.W, link1.b, nobias=self.nobias)
assert link1.W.shape == link2.W.shape
assert (link2.b is None) == self.nobias
if not self.nobias:
assert link2.b.shape == link1.b.shape
testing.run_module(__name__, __file__)
| 6,766
| 30.474419
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/test_inceptionbn.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import links
from chainer import testing
from chainer.testing import attr
class TestInceptionBNBase(unittest.TestCase):
in_channels = 3
out1, proj3, out3, proj33, out33, proj_pool = 3, 2, 3, 2, 3, 3
pooltype = 'max'
stride = 1
batchsize = 10
insize = 10
def setup_data(self):
dtype = chainer.get_dtype()
self.x = numpy.random.uniform(
-1, 1, (10, self.in_channels, 5, 5)
).astype(dtype)
self.l = links.InceptionBN(
self.in_channels, self.out1, self.proj3, self.out3,
self.proj33, self.out33, self.pooltype, self.proj_pool,
self.stride)
def check_backward(self, x_data):
xp = backend.get_array_module(x_data)
x = chainer.Variable(x_data)
y = self.l(x)
y.grad = xp.random.randn(*y.data.shape).astype('f')
y.backward()
class TestInceptionBN(TestInceptionBNBase):
def setUp(self):
self.setup_data()
def test_backward_cpu(self):
self.check_backward(self.x)
@attr.gpu
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.l.to_gpu()
self.check_backward(cuda.to_gpu(self.x))
class TestInceptionBN2(TestInceptionBN):
pooltype = 'avg'
class TestInceptionBN3(TestInceptionBN):
out1 = 0
class TestInceptionBN4(TestInceptionBN):
out1 = 0
pooltype = 'avg'
class TestInceptionBN5(TestInceptionBN):
out1 = 0
proj_pool = None
class TestInceptionBN6(TestInceptionBN):
out1 = 0
pooltype = 'avg'
proj_pool = None
class TestInceptionBN7(TestInceptionBN):
out1 = 0
stride = 2
class TestInceptionBN8(TestInceptionBN):
out1 = 0
stride = 2
proj_pool = None
class TestInceptionBN9(TestInceptionBN):
out1 = 0
stride = 2
pooltype = 'avg'
class TestInceptionBN10(TestInceptionBN):
out1 = 0
stride = 2
pooltype = 'avg'
proj_pool = None
class TestInceptionBNInvalidCall(TestInceptionBNBase):
proj_pool = None
def test_invalid_architecture(self):
with self.assertRaises(AssertionError):
self.setup_data()
class TestInceptionBNInvalidCall2(TestInceptionBNInvalidCall):
pooltype = 'avg'
proj_pool = None
class TestInceptionBNInvalidCall3(TestInceptionBNInvalidCall):
stride = 2
class TestInceptionBNInvalidCall4(TestInceptionBNInvalidCall):
stride = 2
pooltype = 'avg'
class TestInceptionBNInvalidPoolType(TestInceptionBNBase):
pooltype = 'invalid_pooltype'
def test_invalid_pooltype(self):
with self.assertRaises(NotImplementedError):
self.setup_data()
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float16],
}))
class TestInceptionBnDtype(TestInceptionBNBase):
def setUp(self):
with chainer.using_config('dtype', self.dtype):
self.setup_data()
def test_dtype(self):
link = self.l
# Check the dtype of batch normalization layers.
assert link.proj3n.beta.dtype == self.dtype
assert link.conv3n.beta.dtype == self.dtype
assert link.proj33n.beta.dtype == self.dtype
assert link.conv33an.beta.dtype == self.dtype
assert link.conv33bn.beta.dtype == self.dtype
assert link.conv1n.beta.dtype == self.dtype
assert link.poolpn.beta.dtype == self.dtype
testing.run_module(__name__, __file__)
| 3,560
| 20.071006
| 67
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/test_local_convolution_2d.py
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(*testing.product({
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
'nobias': [True, False],
}))
class TestLocalConvolution2D(unittest.TestCase):
def setUp(self):
self.link = links.LocalConvolution2D(
3, 2, in_size=4, ksize=3, stride=1,
initialW=chainer.initializers.Normal(1, self.W_dtype),
initial_bias=chainer.initializers.Normal(1, self.x_dtype))
self.link.cleargrads()
self.x = numpy.random.uniform(-1, 1,
(2, 3, 4, 4)).astype(self.x_dtype)
self.gy = numpy.random.uniform(-1, 1,
(2, 2, 2, 2)).astype(self.x_dtype)
self.check_backward_options = {}
if self.x_dtype == numpy.float16 or self.W_dtype == numpy.float16:
self.check_backward_options = {'atol': 3e-2, 'rtol': 5e-2}
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
self.link, x_data, y_grad, (self.link.W, self.link.b), eps=2 ** -3,
**self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
class TestLocalConvolution2DParameterShapePlaceholder(unittest.TestCase):
def setUp(self):
in_channels = None
self.link = links.LocalConvolution2D(in_channels, 2, ksize=3,
stride=1)
self.x = numpy.random.uniform(-1, 1,
(2, 3, 4, 4)).astype(numpy.float32)
self.link(chainer.Variable(self.x))
b = self.link.b.data
b[...] = numpy.random.uniform(-1, 1, b.shape)
self.link.cleargrads()
self.gy = numpy.random.uniform(-1, 1,
(2, 2, 2, 2)).astype(numpy.float32)
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
self.link, x_data, y_grad, (self.link.W, self.link.b), eps=1e-2)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| 2,918
| 33.341176
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/connection_tests/test_deconvolution_2d.py
|
import numpy
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import links as L
from chainer import testing
from chainer.testing import parameterize
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
@parameterize(
*testing.product({
'nobias': [True, False],
'dilate': [1, 2],
'groups': [1, 3],
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
})
)
@testing.inject_backend_tests(
['test_forward', 'test_backward'],
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestDeconvolution2D(testing.LinkTestCase):
def setUp(self):
self.in_channels = 3
self.out_channels = 6
self.ksize = 3
self.stride = 2
self.pad = 1
if self.nobias:
TestDeconvolution2D.param_names = ('W',)
else:
TestDeconvolution2D.param_names = ('W', 'b')
self.check_backward_options.update({'atol': 1e-3, 'rtol': 1e-2})
def before_test(self, test_name):
# cuDNN 5 and 5.1 results suffer from precision issues
using_old_cudnn = (self.backend_config.xp is cuda.cupy
and self.backend_config.use_cudnn == 'always'
and cuda.cuda.cudnn.getVersion() < 6000)
if using_old_cudnn:
self.check_backward_options.update({'atol': 3e-2, 'rtol': 5e-2})
def generate_inputs(self):
N = 2
h, w = 3, 2
x = numpy.random.uniform(
-1, 1, (N, self.in_channels, h, w)).astype(self.x_dtype)
return x,
def generate_params(self):
initialW = chainer.initializers.Normal(1, self.W_dtype)
initial_bias = chainer.initializers.Normal(1, self.x_dtype)
return initialW, initial_bias
def create_link(self, initializers):
initialW, initial_bias = initializers
if self.nobias:
link = L.Deconvolution2D(
self.in_channels, self.out_channels, self.ksize,
stride=self.stride, pad=self.pad, nobias=self.nobias,
dilate=self.dilate, groups=self.groups,
initialW=initialW)
else:
link = L.Deconvolution2D(
self.in_channels, self.out_channels, self.ksize,
stride=self.stride, pad=self.pad, nobias=self.nobias,
dilate=self.dilate, groups=self.groups,
initialW=initialW,
initial_bias=initial_bias)
return link
def forward_expected(self, link, inputs):
x, = inputs
W = link.W
if self.nobias:
y = F.deconvolution_2d(
x, W,
stride=self.stride, pad=self.pad,
dilate=self.dilate, groups=self.groups)
else:
b = link.b
y = F.deconvolution_2d(
x, W, b,
stride=self.stride, pad=self.pad,
dilate=self.dilate, groups=self.groups)
return y.array,
@parameterize(
*testing.product({
'nobias': [True, False],
'use_cudnn': ['always', 'never'],
'deconv_args': [((3, 2, 3), {}), ((2, 3), {}), ((None, 2, 3), {}),
((2, 3), {'stride': 2, 'pad': 1}),
((None, 2, 3, 2, 1), {})]
})
)
@testing.inject_backend_tests(
['test_forward', 'test_backward'],
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestDeconvolution2DParameterShapePlaceholder(testing.LinkTestCase):
def setUp(self):
if self.nobias:
self.param_names = ('W',)
else:
self.param_names = ('W', 'b')
self.check_backward_options.update({'atol': 1e-4, 'rtol': 1e-3})
def before_test(self, test_name):
# cuDNN 5 and 5.1 results suffer from precision issues
using_old_cudnn = (self.backend_config.xp is cuda.cupy
and self.backend_config.use_cudnn == 'always'
and cuda.cuda.cudnn.getVersion() < 6000)
if using_old_cudnn:
self.check_backward_options.update({'atol': 3e-2, 'rtol': 5e-2})
def generate_inputs(self):
N = 2
h, w = 3, 2
x = numpy.random.uniform(
-1, 1, (N, 3, h, w)).astype(numpy.float32)
return x,
def generate_params(self):
return []
def create_link(self, initializers):
args, kwargs = self.deconv_args
kwargs['nobias'] = self.nobias
link = L.Deconvolution2D(*args, **kwargs)
if not self.nobias:
link.b.data[...] = numpy.random.uniform(
-1, 1, link.b.data.shape).astype(numpy.float32)
return link
def forward_expected(self, link, inputs):
x, = inputs
y = link(x).array
return y,
testing.run_module(__name__, __file__)
| 5,624
| 29.080214
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/rnn_tests/test_link_n_step_lstm.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
def sigmoid(x):
return numpy.tanh(x * 0.5) * 0.5 + 0.5
@testing.parameterize(*testing.product({
'hidden_none': [True, False],
}))
class TestNStepLSTM(unittest.TestCase):
lengths = [3, 1, 2]
n_layers = 2
in_size = 3
out_size = 2
dropout = 0.0
def setUp(self):
shape = (self.n_layers, len(self.lengths), self.out_size)
if self.hidden_none:
self.h = self.c = numpy.zeros(shape, 'f')
else:
self.h = numpy.random.uniform(-1, 1, shape).astype('f')
self.c = numpy.random.uniform(-1, 1, shape).astype('f')
self.xs = [
numpy.random.uniform(-1, 1, (l, self.in_size)).astype('f')
for l in self.lengths]
self.gh = numpy.random.uniform(-1, 1, shape).astype('f')
self.gc = numpy.random.uniform(-1, 1, shape).astype('f')
self.gys = [
numpy.random.uniform(-1, 1, (l, self.out_size)).astype('f')
for l in self.lengths]
self.rnn = links.NStepLSTM(
self.n_layers, self.in_size, self.out_size, self.dropout)
for layer in self.rnn:
for p in layer.params():
p.array[...] = numpy.random.uniform(-1, 1, p.shape)
self.rnn.cleargrads()
def check_forward(self, h_data, c_data, xs_data):
if self.hidden_none:
h = c = None
else:
h = chainer.Variable(h_data)
c = chainer.Variable(c_data)
xs = [chainer.Variable(x) for x in xs_data]
hy, cy, ys = self.rnn(h, c, xs)
assert hy.shape == h_data.shape
assert cy.shape == c_data.shape
assert len(xs) == len(ys)
for x, y in zip(xs, ys):
assert len(x) == len(y)
assert y.shape[1] == self.out_size
with testing.assert_warns(DeprecationWarning):
self.rnn.to_cpu()
for batch, seq in enumerate(self.xs):
for layer in range(self.n_layers):
p = self.rnn[layer]
h_prev = self.h[layer, batch]
c_prev = self.c[layer, batch]
hs = []
for x in seq:
i = sigmoid(
x.dot(p.w0.array.T) + h_prev.dot(p.w4.array.T) +
p.b0.array + p.b4.array)
f = sigmoid(
x.dot(p.w1.array.T) + h_prev.dot(p.w5.array.T) +
p.b1.array + p.b5.array)
c_bar = numpy.tanh(
x.dot(p.w2.array.T) + h_prev.dot(p.w6.array.T) +
p.b2.array + p.b6.array)
o = sigmoid(
x.dot(p.w3.array.T) + h_prev.dot(p.w7.array.T) +
p.b3.array + p.b7.array)
e_c = (f * c_prev + i * c_bar)
e_h = o * numpy.tanh(e_c)
h_prev = e_h
c_prev = e_c
hs.append(e_h)
seq = hs
testing.assert_allclose(hy.array[layer, batch], h_prev)
testing.assert_allclose(cy.array[layer, batch], c_prev)
for y, ey in zip(ys[batch].array, seq):
testing.assert_allclose(y, ey)
def test_forward_cpu_train(self):
with chainer.using_config('train', True):
self.check_forward(self.h, self.c, self.xs)
@attr.gpu
def test_forward_gpu_train(self):
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'always'), \
chainer.using_config('train', True):
self.check_forward(
cuda.to_gpu(self.h),
cuda.to_gpu(self.c),
[cuda.to_gpu(x) for x in self.xs])
def test_forward_cpu_test(self):
with chainer.using_config('train', False):
self.check_forward(self.h, self.c, self.xs)
@attr.gpu
def test_forward_gpu_test(self):
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'always'), \
chainer.using_config('train', False):
self.check_forward(
cuda.to_gpu(self.h),
cuda.to_gpu(self.c),
[cuda.to_gpu(x) for x in self.xs])
@attr.multi_gpu(2)
def test_forward_nonzero_gpu_test(self):
# Issue #5347
# to_gpu should work without setting the current device
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu(1)
with chainer.using_config('use_cudnn', 'always'), \
chainer.using_config('train', False):
self.check_forward(
cuda.to_gpu(self.h, 1),
cuda.to_gpu(self.c, 1),
[cuda.to_gpu(x, 1) for x in self.xs])
def check_multi_gpu_forward(self, train=True):
# See chainer/chainer#6262
# NStepLSTM w/ cudnn & dropout should work on not current device
msg = None
rnn = self.rnn.copy('copy')
rnn.dropout = .5
with cuda.get_device_from_id(1):
if self.hidden_none:
h = None
else:
h = cuda.to_gpu(self.h)
c = cuda.to_gpu(self.c)
xs = [cuda.to_gpu(x) for x in self.xs]
with testing.assert_warns(DeprecationWarning):
rnn = rnn.to_gpu()
with cuda.get_device_from_id(0),\
chainer.using_config('train', train),\
chainer.using_config('use_cudnn', 'always'):
try:
rnn(h, c, xs)
except Exception as e:
msg = e
assert msg is None
@attr.cudnn
@attr.multi_gpu(2)
def test_multi_gpu_forward_training(self):
self.check_multi_gpu_forward(True)
@attr.cudnn
@attr.multi_gpu(2)
def test_multi_gpu_forward_test(self):
self.check_multi_gpu_forward(False)
def check_backward(
self, h_data, c_data, xs_data, gh_data, gc_data, gys_data):
def fun(*args):
if self.hidden_none:
h = c = None
xs = args
else:
h, c = args[:2]
xs = args[2:]
hy, cy, ys = self.rnn(h, c, xs)
return tuple([hy, cy] + list(ys))
params = []
for layer in self.rnn:
for p in layer.params():
params.append(p)
if self.hidden_none:
in_data = xs_data
else:
in_data = [h_data, c_data] + xs_data
gradient_check.check_backward(
fun, tuple(in_data),
tuple([gh_data, gc_data] + gys_data),
tuple(params), eps=1e-2, rtol=1e-3, atol=1e-3)
def test_backward_cpu(self):
self.check_backward(
self.h, self.c, self.xs, self.gh, self.gc, self.gys)
@attr.gpu
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'always'):
self.check_backward(
cuda.to_gpu(self.h),
cuda.to_gpu(self.c),
[cuda.to_gpu(x) for x in self.xs],
cuda.to_gpu(self.gh),
cuda.to_gpu(self.gc),
[cuda.to_gpu(gy) for gy in self.gys])
def test_n_cells(self):
self.assertEqual(self.rnn.n_cells, 2)
assert self.rnn.n_cells == 2
@testing.parameterize(*testing.product({
'hidden_none': [True, False],
}))
class TestNStepBiLSTM(unittest.TestCase):
lengths = [3, 1, 2]
n_layers = 2
in_size = 3
out_size = 2
dropout = 0.0
def setUp(self):
shape = (self.n_layers * 2, len(self.lengths), self.out_size)
if self.hidden_none:
self.h = self.c = numpy.zeros(shape, 'f')
else:
self.h = numpy.random.uniform(-1, 1, shape).astype('f')
self.c = numpy.random.uniform(-1, 1, shape).astype('f')
self.xs = [
numpy.random.uniform(-1, 1, (l, self.in_size)).astype('f')
for l in self.lengths]
self.gh = numpy.random.uniform(-1, 1, shape).astype('f')
self.gc = numpy.random.uniform(-1, 1, shape).astype('f')
self.gys = [
numpy.random.uniform(-1, 1, (l, self.out_size * 2)).astype('f')
for l in self.lengths]
self.rnn = links.NStepBiLSTM(
self.n_layers, self.in_size, self.out_size, self.dropout)
for layer in self.rnn:
for p in layer.params():
p.array[...] = numpy.random.uniform(-1, 1, p.shape)
self.rnn.cleargrads()
def check_forward(self, h_data, c_data, xs_data):
if self.hidden_none:
h = c = None
else:
h = chainer.Variable(h_data)
c = chainer.Variable(c_data)
xs = [chainer.Variable(x) for x in xs_data]
hy, cy, ys = self.rnn(h, c, xs)
assert hy.shape == h_data.shape
assert cy.shape == c_data.shape
assert len(xs) == len(ys)
for x, y in zip(xs, ys):
assert len(x) == len(y)
assert y.shape[1] == self.out_size * 2
with testing.assert_warns(DeprecationWarning):
self.rnn.to_cpu()
for batch, seq in enumerate(self.xs):
for layer in range(self.n_layers):
# forward
di = 0
layer_idx = layer * 2 + di
p = self.rnn[layer_idx]
h_prev = self.h[layer_idx, batch]
c_prev = self.c[layer_idx, batch]
hs_f = []
for x in seq:
i = sigmoid(x.dot(p.w0.array.T) +
h_prev.dot(p.w4.array.T) +
p.b0.array + p.b4.array)
f = sigmoid(x.dot(p.w1.array.T) +
h_prev.dot(p.w5.array.T) +
p.b1.array + p.b5.array)
c_bar = numpy.tanh(x.dot(p.w2.array.T) +
h_prev.dot(p.w6.array.T) +
p.b2.array + p.b6.array)
o = sigmoid(
x.dot(p.w3.array.T) + h_prev.dot(p.w7.array.T) +
p.b3.array + p.b7.array)
e_c = (f * c_prev + i * c_bar)
e_h = o * numpy.tanh(e_c)
h_prev = e_h
c_prev = e_c
hs_f.append(e_h)
testing.assert_allclose(hy.array[layer_idx, batch], h_prev)
testing.assert_allclose(cy.array[layer_idx, batch], c_prev)
# backward
di = 1
layer_idx = layer * 2 + di
p = self.rnn[layer_idx]
h_prev = self.h[layer_idx, batch]
c_prev = self.c[layer_idx, batch]
hs_b = []
for x in reversed(seq):
i = sigmoid(x.dot(p.w0.array.T) +
h_prev.dot(p.w4.array.T) +
p.b0.array + p.b4.array)
f = sigmoid(x.dot(p.w1.array.T) +
h_prev.dot(p.w5.array.T) +
p.b1.array + p.b5.array)
c_bar = numpy.tanh(x.dot(p.w2.array.T) +
h_prev.dot(p.w6.array.T) +
p.b2.array + p.b6.array)
o = sigmoid(
x.dot(p.w3.array.T) + h_prev.dot(p.w7.array.T) +
p.b3.array + p.b7.array)
e_c = (f * c_prev + i * c_bar)
e_h = o * numpy.tanh(e_c)
h_prev = e_h
c_prev = e_c
hs_b.append(e_h)
testing.assert_allclose(hy.array[layer_idx, batch], h_prev)
testing.assert_allclose(cy.array[layer_idx, batch], c_prev)
hs_b.reverse()
seq = [numpy.concatenate([hfi, hbi], axis=0) for (hfi, hbi)
in zip(hs_f, hs_b)]
for y, ey in zip(ys[batch].array, seq):
testing.assert_allclose(y, ey)
def test_forward_cpu_train(self):
with chainer.using_config('train', True):
self.check_forward(self.h, self.c, self.xs)
@attr.gpu
def test_forward_gpu_train(self):
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'always'), \
chainer.using_config('train', True):
self.check_forward(
cuda.to_gpu(self.h),
cuda.to_gpu(self.c),
[cuda.to_gpu(x) for x in self.xs])
def test_forward_cpu_test(self):
with chainer.using_config('train', False):
self.check_forward(self.h, self.c, self.xs)
@attr.gpu
def test_forward_gpu_test(self):
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'always'), \
chainer.using_config('train', False):
self.check_forward(
cuda.to_gpu(self.h),
cuda.to_gpu(self.c),
[cuda.to_gpu(x) for x in self.xs])
def check_multi_gpu_forward(self, train=True):
# See chainer/chainer#6262
# NStepBiLSTM w/ cudnn & dropout should work on not current device
msg = None
rnn = self.rnn.copy('copy')
rnn.dropout = .5
with cuda.get_device_from_id(1):
if self.hidden_none:
h = None
else:
h = cuda.to_gpu(self.h)
c = cuda.to_gpu(self.c)
xs = [cuda.to_gpu(x) for x in self.xs]
with testing.assert_warns(DeprecationWarning):
rnn = rnn.to_gpu()
with cuda.get_device_from_id(0),\
chainer.using_config('train', train),\
chainer.using_config('use_cudnn', 'always'):
try:
rnn(h, c, xs)
except Exception as e:
msg = e
assert msg is None
@attr.gpu
@attr.multi_gpu(2)
def test_multi_gpu_forward_training(self):
self.check_multi_gpu_forward(True)
@attr.gpu
@attr.multi_gpu(2)
def test_multi_gpu_forward_test(self):
self.check_multi_gpu_forward(False)
def check_backward(
self, h_data, c_data, xs_data, gh_data, gc_data, gys_data):
def fun(*args):
if self.hidden_none:
h = c = None
xs = args
else:
h, c = args[:2]
xs = args[2:]
hy, cy, ys = self.rnn(h, c, xs)
return tuple([hy, cy] + list(ys))
params = []
for layer in self.rnn:
for p in layer.params():
params.append(p)
if self.hidden_none:
in_data = xs_data
else:
in_data = [h_data, c_data] + xs_data
gradient_check.check_backward(
fun, tuple(in_data),
tuple([gh_data, gc_data] + gys_data),
tuple(params), eps=1e-2, rtol=1e-3, atol=1e-3)
def test_backward_cpu(self):
self.check_backward(
self.h, self.c, self.xs, self.gh, self.gc, self.gys)
@attr.gpu
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'auto'):
self.check_backward(
cuda.to_gpu(self.h),
cuda.to_gpu(self.c),
[cuda.to_gpu(x) for x in self.xs],
cuda.to_gpu(self.gh),
cuda.to_gpu(self.gc),
[cuda.to_gpu(gy) for gy in self.gys])
def test_n_cells(self):
assert self.rnn.n_cells == 2
testing.run_module(__name__, __file__)
| 16,320
| 34.023605
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/rnn_tests/test_link_gru.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
def _sigmoid(x):
xp = backend.get_array_module(x)
half = x.dtype.type(0.5)
return xp.tanh(x * half) * half + half
def _gru(func, h, x):
xp = backend.get_array_module(h, x)
r = _sigmoid(x.dot(func.W_r.W.data.T) + h.dot(func.U_r.W.data.T))
z = _sigmoid(x.dot(func.W_z.W.data.T) + h.dot(func.U_z.W.data.T))
h_bar = xp.tanh(x.dot(func.W.W.data.T) + (r * h).dot(func.U.W.data.T))
y = (1 - z) * h + z * h_bar
return y
@testing.parameterize(
{'gru': links.StatelessGRU, 'state': 'random',
'in_size': 3, 'out_size': 5},
{'gru': links.StatelessGRU, 'state': 'random', 'out_size': 5},
{'gru': links.GRU, 'state': 'random', 'in_size': 3, 'out_size': 5},
{'gru': links.GRU, 'state': 'zero', 'in_size': 3, 'out_size': 5},
{'gru': links.StatefulGRU, 'state': 'random', 'in_size': 3, 'out_size': 5},
{'gru': links.StatefulGRU, 'state': 'zero', 'in_size': 3, 'out_size': 5},
)
class TestGRU(unittest.TestCase):
def setUp(self):
if self.gru == links.StatelessGRU:
if hasattr(self, 'in_size'):
self.link = self.gru(self.in_size, self.out_size)
else:
self.link = self.gru(None, self.out_size)
self.in_size = self.out_size
elif self.gru == links.StatefulGRU or self.gru == links.GRU:
self.link = self.gru(self.in_size, self.out_size)
else:
self.fail('Unsupported link(only GRU, StatelessGRU and '
'StatefulGRU are supported):{}'.format(self.gru))
self.x = numpy.random.uniform(
-1, 1, (3, self.in_size)).astype(numpy.float32)
if self.state == 'random':
self.h = numpy.random.uniform(
-1, 1, (3, self.out_size)).astype(numpy.float32)
elif self.state == 'zero':
self.h = numpy.zeros((3, self.out_size), dtype=numpy.float32)
else:
self.fail('Unsupported state initialization:{}'.format(self.state))
self.gy = numpy.random.uniform(
-1, 1, (3, self.out_size)).astype(numpy.float32)
def _forward(self, link, h, x):
if isinstance(link, links.StatelessGRU):
return link(h, x)
else:
if self.state != 'zero':
link.set_state(h)
return link(x)
def check_forward(self, h_data, x_data):
h = chainer.Variable(h_data)
x = chainer.Variable(x_data)
y = self._forward(self.link, h, x)
self.assertEqual(y.data.dtype, numpy.float32)
y_expect = _gru(self.link, h_data, x_data)
testing.assert_allclose(y_expect, y.data)
if isinstance(self.link, links.StatefulGRU):
testing.assert_allclose(self.link.h.data, y.data)
def test_forward_cpu(self):
self.check_forward(self.h, self.x)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.h),
cuda.to_gpu(self.x))
def check_backward(self, h_data, x_data, y_grad):
h = chainer.Variable(h_data)
x = chainer.Variable(x_data)
y = self._forward(self.link, h, x)
y.grad = y_grad
y.backward()
def f():
return _gru(self.link, h_data, x_data),
gx, = gradient_check.numerical_grad(f, (x.data,), (y_grad,))
testing.assert_allclose(gx, x.grad, atol=1e-3)
if isinstance(self.link, links.StatelessGRU):
gh, = gradient_check.numerical_grad(f, (h.data,), (y_grad,))
testing.assert_allclose(gh, h.grad, atol=1e-3)
def test_backward_cpu(self):
self.check_backward(self.h, self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.h),
cuda.to_gpu(self.x),
cuda.to_gpu(self.gy))
@testing.parameterize(
*testing.product({
'link_array_module': ['to_cpu', 'to_gpu'],
'state_array_module': ['to_cpu', 'to_gpu'],
'gru': [links.GRU, links.StatefulGRU]
}))
class TestGRUState(unittest.TestCase):
def setUp(self):
in_size, out_size = 10, 8
self.link = self.gru(in_size, out_size)
self.h = chainer.Variable(
numpy.random.uniform(-1, 1, (3, out_size)).astype(numpy.float32))
def check_set_state(self, h):
self.link.set_state(h)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
def test_set_state_cpu(self):
self.check_set_state(self.h)
@attr.gpu
def test_set_state_gpu(self):
with testing.assert_warns(DeprecationWarning):
getattr(self.link, self.link_array_module)()
getattr(self.h, self.state_array_module)()
self.check_set_state(self.h)
def check_reset_state(self):
self.link.reset_state()
self.assertIsNone(self.link.h)
def test_reset_state_cpu(self):
self.check_reset_state()
@attr.gpu
def test_reset_state_gpu(self):
with testing.assert_warns(DeprecationWarning):
getattr(self.link, self.link_array_module)()
self.check_reset_state()
@testing.parameterize(
{'gru': links.GRU},
{'gru': links.StatefulGRU}
)
class TestGRUToCPUToGPU(unittest.TestCase):
def setUp(self):
in_size, out_size = 10, 8
self.link = links.StatefulGRU(in_size, out_size)
self.h = chainer.Variable(
numpy.random.uniform(-1, 1, (3, out_size)).astype(numpy.float32))
def check_to_cpu(self, h):
self.link.set_state(h)
with testing.assert_warns(DeprecationWarning):
self.link.to_cpu()
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
with testing.assert_warns(DeprecationWarning):
self.link.to_cpu()
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
def test_to_cpu_cpu(self):
self.check_to_cpu(self.h)
@attr.gpu
def test_to_cpu_gpu(self):
self.h.to_gpu()
self.check_to_cpu(self.h)
def check_to_cpu_to_gpu(self, h):
self.link.set_state(h)
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
with testing.assert_warns(DeprecationWarning):
self.link.to_cpu()
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
@attr.gpu
def test_to_cpu_to_gpu_cpu(self):
self.check_to_cpu_to_gpu(self.h)
@attr.gpu
def test_to_cpu_to_gpu_gpu(self):
self.h.to_gpu()
self.check_to_cpu_to_gpu(self.h)
class InvalidCallOfGRU(unittest.TestCase):
def setUp(self):
self.gru = links.GRU(10, 10)
def test_no_argument(self):
with self.assertRaises(ValueError):
self.gru()
def test_too_many_argument_1(self):
x = numpy.random.uniform(-1, 1, (5, 10))
h = numpy.random.uniform(-1, 1, (5, 10))
with self.assertRaises(ValueError):
self.gru(x, h)
def test_too_many_argument_2(self):
x = numpy.random.uniform(-1, 1, (5, 10))
h = numpy.random.uniform(-1, 1, (5, 10))
z = numpy.random.uniform(-1, 1, (5, 10))
with self.assertRaises(ValueError):
self.gru(x, h, z)
testing.run_module(__name__, __file__)
| 8,066
| 32.197531
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/rnn_tests/test_link_lstm.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import functions
from chainer import links
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product_dict(
[
{'in_size': 10, 'out_size': 10},
{'in_size': 10, 'out_size': 40},
],
[
{'input_none': False},
{'input_none': True, 'input_omit': True},
{'input_none': True, 'input_omit': False},
],
[
{'input_variable': False},
{'input_variable': True},
]
))
class TestLSTM(unittest.TestCase):
def setUp(self):
if self.input_none:
if self.input_omit:
self.link = links.LSTM(self.out_size)
else:
self.link = links.LSTM(None, self.out_size)
else:
self.link = links.LSTM(self.in_size, self.out_size)
self.link.cleargrads()
x1_shape = (4, self.in_size)
self.x1 = numpy.random.uniform(-1, 1, x1_shape).astype(numpy.float32)
x2_shape = (3, self.in_size)
self.x2 = numpy.random.uniform(-1, 1, x2_shape).astype(numpy.float32)
x3_shape = (0, self.in_size)
self.x3 = numpy.random.uniform(-1, 1, x3_shape).astype(numpy.float32)
def check_forward(self, x1_data, x2_data, x3_data):
xp = self.link.xp
x1 = chainer.Variable(x1_data) if self.input_variable else x1_data
h1 = self.link(x1)
device = backend.get_device_from_array(x1_data)
with chainer.using_device(device):
c0 = chainer.Variable(xp.zeros((len(self.x1), self.out_size),
dtype=self.x1.dtype))
c1_expect, h1_expect = functions.lstm(c0, self.link.upward(x1))
testing.assert_allclose(h1.data, h1_expect.data)
testing.assert_allclose(self.link.h.data, h1_expect.data)
testing.assert_allclose(self.link.c.data, c1_expect.data)
batch = len(x2_data)
x2 = chainer.Variable(x2_data) if self.input_variable else x2_data
h1_in, h1_rest = functions.split_axis(
self.link.h.data, [batch], axis=0)
y2 = self.link(x2)
device = backend.get_device_from_array(x1)
with chainer.using_device(device):
c2_expect, y2_expect = \
functions.lstm(c1_expect,
self.link.upward(x2) + self.link.lateral(h1_in))
testing.assert_allclose(y2.data, y2_expect.data)
testing.assert_allclose(self.link.h.data[:batch], y2_expect.data)
testing.assert_allclose(self.link.h.data[batch:], h1_rest.data)
x3 = chainer.Variable(x3_data) if self.input_variable else x3_data
h2_rest = self.link.h
y3 = self.link(x3)
c3_expect, y3_expect = \
functions.lstm(c2_expect, self.link.upward(x3))
testing.assert_allclose(y3.data, y3_expect.data)
testing.assert_allclose(self.link.h.data, h2_rest.data)
def test_forward_cpu(self):
self.check_forward(self.x1, self.x2, self.x3)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x1), cuda.to_gpu(self.x2),
cuda.to_gpu(self.x3))
@attr.multi_gpu(2)
def test_forward_gpu_multi(self):
with cuda.get_device_from_id(0):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
x1 = cuda.to_gpu(self.x1)
x2 = cuda.to_gpu(self.x2)
x3 = cuda.to_gpu(self.x3)
with cuda.get_device_from_id(1):
self.check_forward(x1, x2, x3)
class TestLSTMState(unittest.TestCase):
def setUp(self):
self.link = links.LSTM(5, 7)
self.x = chainer.Variable(
numpy.random.uniform(-1, 1, (3, 5)).astype(numpy.float32))
self.c = chainer.Variable(
numpy.random.uniform(-1, 1, (3, 5)).astype(numpy.float32))
self.h = chainer.Variable(
numpy.random.uniform(-1, 1, (3, 5)).astype(numpy.float32))
def check_state(self):
self.assertIsNone(self.link.c)
self.assertIsNone(self.link.h)
self.link(self.x)
self.assertIsNotNone(self.link.c)
self.assertIsNotNone(self.link.h)
def test_state_cpu(self):
self.check_state()
@attr.gpu
def test_state_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.x.to_gpu()
self.check_state()
def check_set_state(self, c, h):
self.link.set_state(c, h)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
testing.assert_allclose(c.data, self.link.c.data)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
testing.assert_allclose(h.data, self.link.h.data)
def test_set_state_cpu(self):
self.check_set_state(self.c, self.h)
@attr.gpu
def test_set_state_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_set_state(self.c, self.h)
def check_reset_state(self):
self.link(self.x)
self.link.reset_state()
self.assertIsNone(self.link.c)
self.assertIsNone(self.link.h)
def test_reset_state_cpu(self):
self.check_reset_state()
@attr.gpu
def test_reset_state_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.x.to_gpu()
self.check_reset_state()
class TestLSTMToCPUToGPU(unittest.TestCase):
def setUp(self):
self.link = links.LSTM(5, 7)
self.x = chainer.Variable(
numpy.random.uniform(-1, 1, (3, 5)).astype(numpy.float32))
def check_to_cpu(self, s):
with testing.assert_warns(DeprecationWarning):
self.link.to_cpu()
self.assertIsInstance(s.data, self.link.xp.ndarray)
with testing.assert_warns(DeprecationWarning):
self.link.to_cpu()
self.assertIsInstance(s.data, self.link.xp.ndarray)
def test_to_cpu_cpu(self):
self.link(self.x)
self.check_to_cpu(self.link.c)
self.check_to_cpu(self.link.h)
@attr.gpu
def test_to_cpu_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.x.to_gpu()
self.link(self.x)
self.check_to_cpu(self.link.c)
self.check_to_cpu(self.link.h)
def check_to_cpu_to_gpu(self, s):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.assertIsInstance(s.data, self.link.xp.ndarray)
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.assertIsInstance(s.data, self.link.xp.ndarray)
with testing.assert_warns(DeprecationWarning):
self.link.to_cpu()
self.assertIsInstance(s.data, self.link.xp.ndarray)
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.assertIsInstance(s.data, self.link.xp.ndarray)
@attr.gpu
def test_to_cpu_to_gpu_cpu(self):
self.link(self.x)
self.check_to_cpu_to_gpu(self.link.c)
self.check_to_cpu_to_gpu(self.link.h)
@attr.gpu
def test_to_cpu_to_gpu_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.x.to_gpu()
self.link(self.x)
self.check_to_cpu_to_gpu(self.link.c)
self.check_to_cpu_to_gpu(self.link.h)
class TestLSTMInvalidSize(unittest.TestCase):
in_size = 10
out_size = 20
def setUp(self):
self.link = links.LSTM(self.in_size, self.out_size)
upward = self.link.upward.W.data
upward[...] = numpy.random.uniform(-1, 1, upward.shape)
lateral = self.link.lateral.W.data
lateral[...] = numpy.random.uniform(-1, 1, lateral.shape)
x1_shape = (4, self.in_size)
self.x1 = numpy.random.uniform(-1, 1, x1_shape).astype(numpy.float32)
x2_shape = (5, self.in_size)
self.x2 = numpy.random.uniform(-1, 1, x2_shape).astype(numpy.float32)
def check_forward_invalid_size(self, x1_data, x2_data):
x1 = chainer.Variable(x1_data)
x2 = chainer.Variable(x2_data)
self.link(x1)
with self.assertRaises(TypeError):
self.link(x2)
def test_forward_invalid_size_cpu(self):
self.check_forward_invalid_size(self.x1, self.x2)
@attr.gpu
def test_forward_invalid_size_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward_invalid_size(cuda.to_gpu(self.x1),
cuda.to_gpu(self.x2))
class TestLSTMInitialize(unittest.TestCase):
def test_initialize_bias_default(self):
link = links.LSTM(2, 3)
numpy.testing.assert_array_equal(
link.upward.b.data,
numpy.array([0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0]))
class TestLSTMEmptyBatchInitialize(unittest.TestCase):
def setUp(self):
self.link = links.LSTM(4)
self.x = numpy.random.uniform(-1, 1, (0, 3)).astype(numpy.float32)
def test_empty_batch_dim(self):
y = self.link(chainer.Variable(self.x))
assert y.shape == (0, 4)
@testing.parameterize(*testing.product_dict(
[
{'in_size': 10, 'out_size': 10},
{'in_size': 10, 'out_size': 40},
],
[
{'input_none': False},
{'input_none': True, 'input_omit': True},
{'input_none': True, 'input_omit': False},
],
[
{'input_variable': False},
{'input_variable': True},
]
))
class TestStatelessLSTM(unittest.TestCase):
def setUp(self):
if self.input_none:
if self.input_omit:
self.link = links.StatelessLSTM(self.out_size)
else:
self.link = links.StatelessLSTM(None, self.out_size)
else:
self.link = links.StatelessLSTM(self.in_size, self.out_size)
self.link.cleargrads()
x_shape = (4, self.in_size)
self.x = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
def check_forward(self, x_data):
xp = self.link.xp
x = chainer.Variable(x_data) if self.input_variable else x_data
c1, h1 = self.link(None, None, x)
device = backend.get_device_from_array(x_data)
with chainer.using_device(device):
c0 = chainer.Variable(xp.zeros((len(self.x), self.out_size),
dtype=self.x.dtype))
c1_expect, h1_expect = functions.lstm(c0, self.link.upward(x))
testing.assert_allclose(h1.data, h1_expect.data)
testing.assert_allclose(c1.data, c1_expect.data)
c2, h2 = self.link(c1, h1, x)
c2_expect, h2_expect = \
functions.lstm(c1_expect,
self.link.upward(x) + self.link.lateral(h1))
testing.assert_allclose(h2.data, h2_expect.data)
testing.assert_allclose(c2.data, c2_expect.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
@attr.multi_gpu(2)
def test_forward_gpu_multi(self):
with cuda.get_device_from_id(0):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
with cuda.get_device_from_id(1):
self.check_forward(x)
testing.run_module(__name__, __file__)
| 11,811
| 32.942529
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/rnn_tests/test_link_peephole.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
def _sigmoid(x):
xp = backend.get_array_module(x)
half = x.dtype.type(0.5)
return xp.tanh(x * half) * half + half
def _peephole(func, c, h, x):
device = backend.get_device_from_array(x)
with chainer.using_device(device):
xp = device.xp
lstm_in = x.dot(func.upward.W.data.T)
lstm_in += h.dot(func.lateral.W.data.T)
lstm_in = xp.reshape(lstm_in, (len(lstm_in),
lstm_in.shape[1] // 4,
4))
a, i, f, o = xp.split(lstm_in, 4, 2)
a = xp.reshape(a, (len(a), a.shape[1]))
i = xp.reshape(i, (len(i), i.shape[1]))
f = xp.reshape(f, (len(f), f.shape[1]))
o = xp.reshape(o, (len(o), o.shape[1]))
peep_in_i = c.dot(func.peep_i.W.data.T)
peep_in_f = c.dot(func.peep_f.W.data.T)
a = xp.tanh(a)
i = _sigmoid(i + peep_in_i)
f = _sigmoid(f + peep_in_f)
c_next = a * i + f * c
peep_in_o = c_next.dot(func.peep_o.W.data.T)
o = _sigmoid(o + peep_in_o)
y = o * xp.tanh(c_next)
return c_next, y
@testing.parameterize(
{'in_size': 10, 'out_size': 10},
{'in_size': 10, 'out_size': 40},
)
class TestPeephole(unittest.TestCase):
def setUp(self):
self.link = links.StatefulPeepholeLSTM(self.in_size, self.out_size)
upward = self.link.upward.W.data
upward[...] = numpy.random.uniform(-1, 1, upward.shape)
lateral = self.link.lateral.W.data
lateral[...] = numpy.random.uniform(-1, 1, lateral.shape)
peep_i = self.link.peep_i.W.data
peep_i[...] = numpy.random.uniform(-1, 1, peep_i.shape)
peep_f = self.link.peep_f.W.data
peep_f[...] = numpy.random.uniform(-1, 1, peep_f.shape)
peep_o = self.link.peep_o.W.data
peep_o[...] = numpy.random.uniform(-1, 1, peep_o.shape)
c_shape = (1, self.out_size)
h_shape = (1, self.out_size)
x_shape = (4, self.in_size)
gy_shape = (4, self.out_size)
self.c = numpy.zeros(c_shape).astype(numpy.float32)
self.h = numpy.zeros(h_shape).astype(numpy.float32)
self.x = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(numpy.float32)
def _forward(self, link, x):
return link(x)
def check_forward(self, c_data, h_data, x_data):
x = chainer.Variable(x_data)
h1 = self.link(x)
c1_expect, h1_expect = _peephole(self.link, c_data, h_data, x_data)
testing.assert_allclose(h1.data, h1_expect)
testing.assert_allclose(self.link.c.data, c1_expect)
testing.assert_allclose(self.link.h.data, h1_expect)
h2 = self.link(x)
c2_expect, h2_expect = _peephole(self.link,
c1_expect, h1_expect, x_data)
testing.assert_allclose(h2.data, h2_expect)
testing.assert_allclose(self.link.c.data, c2_expect)
testing.assert_allclose(self.link.h.data, h2_expect)
def test_forward_cpu(self):
self.check_forward(self.c, self.h, self.x)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.c),
cuda.to_gpu(self.h),
cuda.to_gpu(self.x))
@attr.multi_gpu(2)
def test_forward_gpu_multi(self):
with cuda.get_device_from_id(0):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
c = cuda.to_gpu(self.c)
h = cuda.to_gpu(self.h)
x = cuda.to_gpu(self.x)
with cuda.get_device_from_id(1):
self.check_forward(c, h, x)
def check_backward(self, c_data, h_data, x_data, y_grad):
x = chainer.Variable(x_data)
y = self._forward(self.link, x)
y.grad = y_grad
y.backward()
def f():
c, y = _peephole(self.link, c_data, h_data, x_data)
return y,
gx, = gradient_check.numerical_grad(f, (x.data,), (y_grad,))
testing.assert_allclose(gx, x.grad, atol=1e-3)
def test_backward_cpu(self):
self.check_backward(self.c, self.h, self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.c),
cuda.to_gpu(self.h),
cuda.to_gpu(self.x),
cuda.to_gpu(self.gy))
class TestPeepholeState(unittest.TestCase):
def setUp(self):
in_size, out_size = 10, 8
self.link = links.StatefulPeepholeLSTM(in_size, out_size)
def check_reset_state(self):
self.link.reset_state()
self.assertIsNone(self.link.c)
self.assertIsNone(self.link.h)
def test_reset_state_cpu(self):
self.check_reset_state()
@attr.gpu
def test_reset_state_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_reset_state()
class TestPeepholeToCPUToGPU(unittest.TestCase):
def setUp(self):
in_size, out_size = 10, 8
self.link = links.StatefulPeepholeLSTM(in_size, out_size)
self.c = chainer.Variable(
numpy.random.uniform(-1, 1, (1, out_size)).astype(numpy.float32))
self.h = chainer.Variable(
numpy.random.uniform(-1, 1, (1, out_size)).astype(numpy.float32))
def check_to_cpu(self, c, h):
self.link.c = c
self.link.h = h
with testing.assert_warns(DeprecationWarning):
self.link.to_cpu()
self.assertIs(self.link.xp, numpy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
with testing.assert_warns(DeprecationWarning):
self.link.to_cpu()
self.assertIs(self.link.xp, numpy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
def test_to_cpu_cpu(self):
self.check_to_cpu(self.c, self.h)
@attr.gpu
def test_to_cpu_gpu(self):
self.c.to_gpu()
self.h.to_gpu()
self.check_to_cpu(self.c, self.h)
def check_to_cpu_to_gpu(self, c, h):
self.link.c = c
self.link.h = h
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.assertIs(self.link.xp, cuda.cupy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.assertIs(self.link.xp, cuda.cupy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
with testing.assert_warns(DeprecationWarning):
self.link.to_cpu()
self.assertIs(self.link.xp, numpy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.assertIs(self.link.xp, cuda.cupy)
self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
@attr.gpu
def test_to_cpu_to_gpu_cpu(self):
self.check_to_cpu_to_gpu(self.c, self.h)
@attr.gpu
def test_to_cpu_to_gpu_gpu(self):
self.c.to_gpu()
self.h.to_gpu()
self.check_to_cpu_to_gpu(self.c, self.h)
testing.run_module(__name__, __file__)
| 8,129
| 34.347826
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/rnn_tests/test_link_n_step_rnn.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer import initializers
from chainer.testing import attr
from chainer.testing import condition
def relu(x):
return x * (x > 0)
@testing.parameterize(*testing.product({
'hidden_none': [True, False],
'activation': ['tanh', 'relu'],
}))
class TestNStepRNN(unittest.TestCase):
lengths = [3, 1, 2]
n_layers = 2
in_size = 3
out_size = 2
dropout = 0.0
def setUp(self):
shape = (self.n_layers, len(self.lengths), self.out_size)
if self.hidden_none:
self.h = numpy.zeros(shape, 'f')
else:
self.h = numpy.random.uniform(-1, 1, shape).astype('f')
self.xs = [
numpy.random.uniform(-1, 1, (l, self.in_size)).astype('f')
for l in self.lengths]
self.gh = numpy.random.uniform(-1, 1, shape).astype('f')
self.gys = [
numpy.random.uniform(-1, 1, (l, self.out_size)).astype('f')
for l in self.lengths]
if self.activation == 'tanh':
rnn_link_class = links.NStepRNNTanh
elif self.activation == 'relu':
rnn_link_class = links.NStepRNNReLU
self.rnn = rnn_link_class(
self.n_layers, self.in_size, self.out_size, self.dropout)
for layer in self.rnn:
for p in layer.params():
p.array[...] = numpy.random.uniform(-1, 1, p.shape)
self.rnn.cleargrads()
def check_forward(self, h_data, xs_data):
if self.hidden_none:
h = None
else:
h = chainer.Variable(h_data)
xs = [chainer.Variable(x) for x in xs_data]
hy, ys = self.rnn(h, xs)
assert hy.shape == h_data.shape
assert len(xs) == len(ys)
for x, y in zip(xs, ys):
assert len(x) == len(y)
assert y.shape[1] == self.out_size
with testing.assert_warns(DeprecationWarning):
self.rnn.to_cpu()
for batch, seq in enumerate(self.xs):
for layer in range(self.n_layers):
p = self.rnn[layer]
h_prev = self.h[layer, batch]
hs = []
for x in seq:
if self.activation == 'tanh':
activation_func = numpy.tanh
elif self.activation == 'relu':
activation_func = relu
h_prev = activation_func(x.dot(p.w0.array.T) +
h_prev.dot(p.w1.array.T) +
p.b0.array + p.b1.array)
hs.append(h_prev)
seq = hs
testing.assert_allclose(hy.data[layer, batch], h_prev)
for y, ey in zip(ys[batch].array, seq):
testing.assert_allclose(y, ey)
def test_forward_cpu_train(self):
with chainer.using_config('train', True):
self.check_forward(self.h, self.xs)
@attr.gpu
def test_forward_gpu_train(self):
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'always'), \
chainer.using_config('train', True):
self.check_forward(
cuda.to_gpu(self.h),
[cuda.to_gpu(x) for x in self.xs])
def test_forward_cpu_test(self):
with chainer.using_config('train', False):
self.check_forward(self.h, self.xs)
@attr.gpu
def test_forward_gpu_test(self):
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'always'), \
chainer.using_config('train', False):
self.check_forward(
cuda.to_gpu(self.h),
[cuda.to_gpu(x) for x in self.xs])
def check_multi_gpu_forward(self, train=True):
# See chainer/chainer#6262
# NStepRNNTanh and NStepRNNReLU w/ cudnn & dropout should work on
# not current device
msg = None
rnn = self.rnn.copy('copy')
rnn.dropout = .5
with cuda.get_device_from_id(1):
if self.hidden_none:
h = None
else:
h = cuda.to_gpu(self.h)
xs = [cuda.to_gpu(x) for x in self.xs]
with testing.assert_warns(DeprecationWarning):
rnn = rnn.to_gpu()
with cuda.get_device_from_id(0),\
chainer.using_config('train', train),\
chainer.using_config('use_cudnn', 'always'):
try:
rnn(h, xs)
except Exception as e:
msg = e
assert msg is None
@attr.cudnn
@attr.multi_gpu(2)
def test_multi_gpu_forward_training(self):
self.check_multi_gpu_forward(True)
@attr.cudnn
@attr.multi_gpu(2)
def test_multi_gpu_forward_test(self):
self.check_multi_gpu_forward(False)
def check_backward(
self, h_data, xs_data, gh_data, gys_data):
def fun(*args):
if self.hidden_none:
h = None
xs = args
else:
h, = args[:1]
xs = args[1:]
hy, ys = self.rnn(h, xs)
return tuple([hy, ] + list(ys))
params = []
for layer in self.rnn:
for p in layer.params():
params.append(p)
if self.hidden_none:
in_data = xs_data
else:
in_data = [h_data, ] + xs_data
gradient_check.check_backward(
fun, tuple(in_data),
tuple([gh_data, ] + gys_data),
tuple(params), rtol=1e-2, atol=5e-2)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(
self.h, self.xs, self.gh, self.gys)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'auto'):
self.check_backward(
cuda.to_gpu(self.h),
[cuda.to_gpu(x) for x in self.xs],
cuda.to_gpu(self.gh),
[cuda.to_gpu(gy) for gy in self.gys])
def test_n_cells(self):
assert self.rnn.n_cells == 1
@testing.parameterize(*testing.product({
'hidden_none': [True, False],
'activation': ['tanh', 'relu'],
}))
class TestNStepBiRNN(unittest.TestCase):
lengths = [3, 1, 2]
n_layers = 2
in_size = 3
out_size = 2
dropout = 0.0
def setUp(self):
shape = (self.n_layers * 2, len(self.lengths), self.out_size)
if self.hidden_none:
self.h = numpy.zeros(shape, 'f')
else:
self.h = numpy.random.uniform(-1, 1, shape).astype('f')
self.xs = [
numpy.random.uniform(-1, 1, (l, self.in_size)).astype('f')
for l in self.lengths]
self.gh = numpy.random.uniform(-1, 1, shape).astype('f')
self.gys = [
numpy.random.uniform(-1, 1, (l, self.out_size * 2)).astype('f')
for l in self.lengths]
if self.activation == 'tanh':
rnn_link_class = links.NStepBiRNNTanh
elif self.activation == 'relu':
rnn_link_class = links.NStepBiRNNReLU
self.rnn = rnn_link_class(
self.n_layers, self.in_size, self.out_size, self.dropout)
for layer in self.rnn:
for p in layer.params():
p.array[...] = numpy.random.uniform(-1, 1, p.array.shape)
self.rnn.cleargrads()
def check_forward(self, h_data, xs_data):
if self.hidden_none:
h = None
else:
h = chainer.Variable(h_data)
xs = [chainer.Variable(x) for x in xs_data]
hy, ys = self.rnn(h, xs)
assert hy.shape == h_data.shape
assert len(xs) == len(ys)
for x, y in zip(xs, ys):
assert len(x) == len(y)
assert y.shape[1] == self.out_size * 2
with testing.assert_warns(DeprecationWarning):
self.rnn.to_cpu()
for batch, seq in enumerate(self.xs):
for layer in range(self.n_layers):
# forward
di = 0
layer_idx = layer * 2 + di
p = self.rnn[layer_idx]
h_prev = self.h[layer_idx, batch]
hs_f = []
for x in seq:
if self.activation == 'tanh':
activation_func = numpy.tanh
elif self.activation == 'relu':
activation_func = relu
h_prev = activation_func(x.dot(p.w0.array.T) +
h_prev.dot(p.w1.array.T) +
p.b0.array + p.b1.array)
hs_f.append(h_prev)
testing.assert_allclose(hy.array[layer_idx, batch], h_prev)
# backward
di = 1
layer_idx = layer * 2 + di
p = self.rnn[layer_idx]
h_prev = self.h[layer_idx, batch]
hs_b = []
for x in reversed(seq):
if self.activation == 'tanh':
activation_func = numpy.tanh
elif self.activation == 'relu':
activation_func = relu
h_prev = activation_func(x.dot(p.w0.array.T) +
h_prev.dot(p.w1.array.T) +
p.b0.array + p.b1.array)
hs_b.append(h_prev)
testing.assert_allclose(hy.data[layer_idx, batch], h_prev)
hs_b.reverse()
seq = [numpy.concatenate([hfi, hbi], axis=0) for (hfi, hbi)
in zip(hs_f, hs_b)]
for y, ey in zip(ys[batch].array, seq):
testing.assert_allclose(y, ey)
def test_forward_cpu_train(self):
with chainer.using_config('train', True):
self.check_forward(self.h, self.xs)
@attr.gpu
def test_forward_gpu_train(self):
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'always'), \
chainer.using_config('train', True):
self.check_forward(
cuda.to_gpu(self.h),
[cuda.to_gpu(x) for x in self.xs])
def test_forward_cpu_test(self):
with chainer.using_config('train', False):
self.check_forward(self.h, self.xs)
@attr.gpu
def test_forward_gpu_test(self):
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'always'), \
chainer.using_config('train', False):
self.check_forward(
cuda.to_gpu(self.h),
[cuda.to_gpu(x) for x in self.xs])
def check_multi_gpu_forward(self, train=True):
# See chainer/chainer#6262
# NStepBiRNNTanh and NStepBiRNNReLU w/ cudnn & dropout should work on
# not current device
msg = None
rnn = self.rnn.copy('copy')
rnn.dropout = .5
with cuda.get_device_from_id(1):
if self.hidden_none:
h = None
else:
h = cuda.to_gpu(self.h)
xs = [cuda.to_gpu(x) for x in self.xs]
with testing.assert_warns(DeprecationWarning):
rnn = rnn.to_gpu()
with cuda.get_device_from_id(0),\
chainer.using_config('train', train),\
chainer.using_config('use_cudnn', 'always'):
try:
rnn(h, xs)
except Exception as e:
msg = e
assert msg is None
@attr.cudnn
@attr.multi_gpu(2)
def test_multi_gpu_forward_training(self):
self.check_multi_gpu_forward(True)
@attr.cudnn
@attr.multi_gpu(2)
def test_multi_gpu_forward_test(self):
self.check_multi_gpu_forward(False)
def check_backward(
self, h_data, xs_data, gh_data, gys_data):
def fun(*args):
if self.hidden_none:
h = None
xs = args
else:
h, = args[:1]
xs = args[1:]
hy, ys = self.rnn(h, xs)
return tuple([hy, ] + list(ys))
params = []
for layer in self.rnn:
for p in layer.params():
params.append(p)
if self.hidden_none:
in_data = xs_data
else:
in_data = [h_data, ] + xs_data
gradient_check.check_backward(
fun, tuple(in_data),
tuple([gh_data, ] + gys_data),
tuple(params), rtol=1e-2, atol=5e-2)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(
self.h, self.xs, self.gh, self.gys)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'auto'):
self.check_backward(
cuda.to_gpu(self.h),
[cuda.to_gpu(x) for x in self.xs],
cuda.to_gpu(self.gh),
[cuda.to_gpu(gy) for gy in self.gys])
def test_n_cells(self):
assert self.rnn.n_cells == 1
@testing.parameterize(
*testing.product(
{
'dtype': [numpy.float32, numpy.float64],
'initialW': ['zero', 'random'],
'initial_bias': ['zero', 'random'],
'activation_type': ['tanh', 'relu'],
'use_bi_direction': [True, False]
}
)
)
class TestInitialization(unittest.TestCase):
def get_initializers(self):
if self.initialW == 'zero':
weight_initializer = initializers.constant.Zero()
elif self.initialW == 'random':
weight_initializer = initializers.GlorotUniform(
rng=numpy.random.RandomState(seed=0))
if self.initial_bias == 'zero':
bias_initializer = initializers.constant.Zero()
elif self.initial_bias == 'random':
bias_initializer = initializers.Uniform(
rng=numpy.random.RandomState(seed=0))
return weight_initializer, bias_initializer
def setUp(self):
weight_initializer, bias_initializer = self.get_initializers()
with chainer.using_config('dtype', self.dtype):
if self.activation_type == 'tanh':
if self.use_bi_direction:
link = links.NStepBiRNNTanh
else:
link = links.NStepRNNTanh
elif self.activation_type == 'relu':
if self.use_bi_direction:
link = links.NStepBiRNNReLU
else:
link = links.NStepRNNReLU
self.link = link(
1, 10, 10, 0.0,
initialW=weight_initializer,
initial_bias=bias_initializer)
def check_param(self):
weight_initializer, bias_initializer = self.get_initializers()
link = self.link
xp = link.xp
dtype = self.dtype
for ws_i in link.ws:
for w in ws_i:
assert w.dtype == dtype
w_expected = xp.empty(w.shape, dtype)
weight_initializer(w_expected)
testing.assert_allclose(
w.array, w_expected, atol=0, rtol=0)
for bs_i in link.bs:
for b in bs_i:
assert b.dtype == dtype
b_expected = xp.empty(b.shape, dtype)
bias_initializer(b_expected)
testing.assert_allclose(
b.array, b_expected, atol=0, rtol=0)
def test_param_cpu(self):
self.check_param()
@attr.gpu
def test_param_gpu(self):
self.link.to_device('@cupy:0')
self.check_param()
testing.run_module(__name__, __file__)
| 16,312
| 31.889113
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/rnn_tests/test_link_mgu.py
|
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import links
from chainer import testing
from chainer.testing import attr
def sigmoid(x):
return 1 / (1 + numpy.exp(-x))
def mgu(W_f, W_h, h, x):
f = sigmoid(numpy.concatenate([h, x]).dot(W_f.T))
hx = numpy.concatenate([f * h, x])
h_bar = numpy.tanh(hx.dot(W_h.T))
h_new = f * h_bar + (1 - f) * h
return h_new
class TestStatelessMGU(unittest.TestCase):
in_size = 4
out_size = 5
def setUp(self):
self.h = numpy.random.uniform(
-1, 1, (3, self.out_size)).astype(numpy.float32)
self.x = numpy.random.uniform(
-1, 1, (3, self.in_size)).astype(numpy.float32)
self.gy = numpy.random.uniform(
-1, 1, (3, self.out_size)).astype(numpy.float32)
self.mgu = links.StatelessMGU(self.in_size, self.out_size)
def check_forward(self, h_data, x_data):
h = chainer.Variable(h_data)
x = chainer.Variable(x_data)
y = self.mgu(h, x)
W_f = cuda.to_cpu(self.mgu.W_f.W.data)
W_h = cuda.to_cpu(self.mgu.W_h.W.data)
for i in six.moves.range(3):
h_new = mgu(W_f, W_h, self.h[i], self.x[i])
testing.assert_allclose(h_new, y.data[i])
def test_forward_cpu(self):
self.check_forward(self.h, self.x)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.mgu.to_gpu()
self.check_forward(cuda.to_gpu(self.h), cuda.to_gpu(self.x))
@testing.parameterize(
{'dtype': numpy.float16,
'forward_tols': {'atol': 5e-4, 'rtol': 5e-3}},
{'dtype': numpy.float32,
'forward_tols': {'atol': 1e-5, 'rtol': 1e-4}},
{'dtype': numpy.float64,
'forward_tols': {'atol': 1e-5, 'rtol': 1e-4}},
)
class TestStatefulMGU(unittest.TestCase):
in_size = 4
out_size = 5
def setUp(self):
self.x = numpy.random.uniform(
-1, 1, (3, self.in_size)).astype(self.dtype)
self.gy = numpy.random.uniform(
-1, 1, (3, self.out_size)).astype(self.dtype)
with chainer.using_config('dtype', self.dtype):
self.mgu = links.StatefulMGU(self.in_size, self.out_size)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
W_f = cuda.to_cpu(self.mgu.W_f.W.data)
W_h = cuda.to_cpu(self.mgu.W_h.W.data)
with chainer.using_config('dtype', self.dtype):
y1 = self.mgu(x)
y2 = self.mgu(x)
h = numpy.zeros(self.out_size, dtype=self.dtype)
for i in six.moves.range(3):
h1 = mgu(W_f, W_h, h, self.x[i])
testing.assert_allclose(h1, y1.data[i], **self.forward_tols)
h2 = mgu(W_f, W_h, h1, self.x[i])
testing.assert_allclose(h2, y2.data[i], **self.forward_tols)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.mgu.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
testing.run_module(__name__, __file__)
| 3,170
| 28.091743
| 72
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/rnn_tests/test_link_tree_lstm.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
def _sigmoid(x):
half = x.dtype.type(0.5)
xp = backend.get_array_module(x)
return xp.tanh(x * half) * half + half
def _child_sum_tree_lstm(func, *inputs):
cs = inputs[:len(inputs) // 2]
hs = inputs[len(inputs) // 2:-1]
x = inputs[-1]
device = backend.get_device_from_array(x)
with chainer.using_device(device):
xp = device.xp
W_x = func.W_x.W.data.T
b_x = func.W_x.b.data
W_h_aio = func.W_h_aio.W.data.T
W_h_f = func.W_h_f.W.data.T
W_xa, W_xi, W_xo, W_xf = xp.split(W_x, 4, 1)
b_a, b_i, b_o, b_f = xp.split(b_x[None, ], 4, 1)
W_ha, W_hi, W_ho = xp.split(W_h_aio, 3, 1)
W_hf = W_h_f
if len(hs) >= 1:
sum_h = sum(hs)
a = x.dot(W_xa) + sum_h.dot(W_ha) + b_a
i = x.dot(W_xi) + sum_h.dot(W_hi) + b_i
o = x.dot(W_xo) + sum_h.dot(W_ho) + b_o
f_list = [x.dot(W_xf) + h.dot(W_hf) + b_f for h in hs]
else:
a = x.dot(W_xa) + b_a
i = x.dot(W_xi) + b_i
o = x.dot(W_xo) + b_o
a = xp.tanh(a)
i = _sigmoid(i)
o = _sigmoid(o)
if len(hs) >= 1:
f_list = [_sigmoid(f) for f in f_list]
c_next = sum([f * c for f, c in zip(f_list, cs)], a * i)
y = o * xp.tanh(c_next)
else:
c_next = a * i
y = o * xp.tanh(c_next)
return c_next, y
def _nary_tree_lstm(func, *inputs):
cs = inputs[:len(inputs) // 2]
hs = inputs[len(inputs) // 2:-1]
x = inputs[-1]
device = backend.get_device_from_array(x)
with chainer.using_device(device):
xp = device.xp
W_x = func.W_x.W.data.T
b_x = func.W_x.b.data
W_h_list = [getattr(func, 'W_h{}'.format(i)).W.data.T
for i in range(1, func.n_ary + 1)]
W_xs = xp.split(W_x, 3 + func.n_ary, 1)
W_xa, W_xi, W_xo, W_xfs = W_xs[0], W_xs[1], W_xs[2], W_xs[3:]
b_xs = xp.split(b_x[None, ], 3 + func.n_ary, 1)
b_a, b_i, b_o, b_fs = b_xs[0], b_xs[1], b_xs[2], b_xs[3:]
W_ha_list = [xp.split(W_h, 3 + func.n_ary, 1)[0]
for W_h in W_h_list]
W_hi_list = [xp.split(W_h, 3 + func.n_ary, 1)[1]
for W_h in W_h_list]
W_ho_list = [xp.split(W_h, 3 + func.n_ary, 1)[2]
for W_h in W_h_list]
W_hfs_list = [xp.split(W_h, 3 + func.n_ary, 1)[3:]
for W_h in W_h_list]
assert(all(len(W_hfs_list) == len(W_hfs) for W_hfs in W_hfs_list))
a = x.dot(W_xa) + b_a + \
sum(h.dot(W_ha) for h, W_ha in zip(hs, W_ha_list))
i = x.dot(W_xi) + b_i + \
sum(h.dot(W_hi) for h, W_hi in zip(hs, W_hi_list))
o = x.dot(W_xo) + b_o + \
sum(h.dot(W_ho) for h, W_ho in zip(hs, W_ho_list))
f_list = [x.dot(W_xf) + b_f +
sum(h.dot(W_hf) for h, W_hf in zip(hs, W_hf_list))
for W_xf, b_f, W_hf_list
in zip(W_xfs, b_fs, zip(*W_hfs_list))]
a = xp.tanh(a)
i = _sigmoid(i)
o = _sigmoid(o)
f_list = [_sigmoid(f) for f in f_list]
c_next = a * i + sum(f * c for f, c in zip(f_list, cs))
y = o * xp.tanh(c_next)
return c_next, y
@testing.parameterize(*testing.product({
'dtype': [numpy.float32],
'n_ary': [0, 1, 2, 3],
'in_size': [6, 9],
'out_size': [9],
'model_type': ['ChildSumTreeLSTM', 'NaryTreeLSTM'],
}))
class TestTreeLSTM(unittest.TestCase):
def setUp(self):
if self.model_type == 'ChildSumTreeLSTM':
self.link = links.ChildSumTreeLSTM(
self.in_size, self.out_size)
elif self.model_type == 'NaryTreeLSTM':
if self.n_ary == 0:
# n_ary=0 test should be skipped for NaryTreeLSTM
self.n_ary = 1
self.link = links.NaryTreeLSTM(
self.in_size, self.out_size, n_ary=self.n_ary)
else:
NotImplementedError()
for p in self.link.params():
p.data[:] = numpy.random.uniform(-1, 1, p.shape).astype(self.dtype)
self.c_prevs = [
numpy.random.uniform(-1, 1, (5, self.out_size)).astype(self.dtype)
for _ in range(self.n_ary)]
self.h_prevs = [
numpy.random.uniform(-1, 1, (5, self.out_size)).astype(self.dtype)
for _ in range(self.n_ary)]
self.x = numpy.random.uniform(
-1, 1, (5, self.in_size)).astype(self.dtype)
self.inputs = self.c_prevs + self.h_prevs + [self.x]
self.gc = numpy.random.uniform(
-1, 1, (5, self.out_size)).astype(self.dtype)
self.gh = numpy.random.uniform(
-1, 1, (5, self.out_size)).astype(self.dtype)
self.check_forward_options = {}
self.check_backward_options = {'dtype': numpy.float64}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 5e-4, 'rtol': 5e-3}
def check_forward(self, *inputs_data):
inputs_variable = [chainer.Variable(v) for v in inputs_data]
c, h = self.link(*inputs_variable)
self.assertEqual(c.data.dtype, self.dtype)
self.assertEqual(h.data.dtype, self.dtype)
# Compute expected out
if self.model_type == 'ChildSumTreeLSTM':
c_expect, h_expect = _child_sum_tree_lstm(self.link, *inputs_data)
elif self.model_type == 'NaryTreeLSTM':
c_expect, h_expect = _nary_tree_lstm(self.link, *inputs_data)
else:
NotImplementedError()
testing.assert_allclose(
c_expect, c.data, **self.check_forward_options)
testing.assert_allclose(
h_expect, h.data, **self.check_forward_options)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(*self.inputs)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(*[cuda.to_gpu(v) for v in self.inputs])
@attr.multi_gpu(2)
def test_forward_gpu_multi(self):
with cuda.get_device_from_id(0):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
inputs = [cuda.to_gpu(v) for v in self.inputs]
with cuda.get_device_from_id(1):
self.check_forward(*inputs)
def check_forward_valid_none(self, *inputs_data):
inputs_variable = [chainer.Variable(v)
if v is not None else v for v in inputs_data]
xp = self.link.xp
inputs_data = [xp.zeros(self.h_prevs[0].shape, dtype=self.dtype)
if v is None else v for v in inputs_data[:-1]] + \
[xp.zeros(self.x.shape, dtype=self.dtype)
if inputs_data[-1] is None else inputs_data[-1]]
if self.n_ary == 0:
# in this case for link(x) without cs and hs,
# it does not include any None.
pass
else:
c, h = self.link(*inputs_variable)
self.assertEqual(c.data.dtype, self.dtype)
self.assertEqual(h.data.dtype, self.dtype)
# Compute expected out
if self.model_type == 'ChildSumTreeLSTM':
c_expect, h_expect = _child_sum_tree_lstm(
self.link, *inputs_data)
elif self.model_type == 'NaryTreeLSTM':
c_expect, h_expect = _nary_tree_lstm(self.link, *inputs_data)
else:
NotImplementedError()
testing.assert_allclose(
c_expect, c.data, **self.check_forward_options)
testing.assert_allclose(
h_expect, h.data, **self.check_forward_options)
def test_forward_none_ch_cpu(self):
inputs = [None] * len(self.c_prevs) + \
[None] * len(self.h_prevs) + [self.x]
self.check_forward_valid_none(*inputs)
@attr.gpu
def test_forward_none_ch_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
inputs = [None] * len(self.c_prevs) + \
[None] * len(self.h_prevs) + \
[cuda.to_gpu(self.x)]
self.check_forward_valid_none(*inputs)
def test_forward_none_x_cpu(self):
inputs = self.c_prevs + self.h_prevs + [None]
self.check_forward_valid_none(*inputs)
@attr.gpu
def test_forward_none_x_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
inputs = [cuda.to_gpu(v) for v in self.c_prevs] + \
[cuda.to_gpu(v) for v in self.h_prevs] + [None]
self.check_forward_valid_none(*inputs)
def check_forward_invalid_none(self, *inputs_data):
inputs_variable = [chainer.Variable(v)
if v is not None else v for v in inputs_data]
self.assertRaises(ValueError, self.link, *inputs_variable)
def test_forward_none_chx_cpu(self):
inputs = [None] * len(self.inputs)
self.check_forward_invalid_none(*inputs)
@attr.gpu
def test_forward_none_chx_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
inputs = [None] * len(self.inputs)
self.check_forward_invalid_none(*inputs)
def check_backward(self, c_grad, h_grad, *inputs):
gradient_check.check_backward(
self.link,
inputs,
(c_grad, h_grad),
**self.check_backward_options)
@condition.retry(3)
def test_full_backward_cpu(self):
self.check_backward(self.gc, self.gh, *self.inputs)
@condition.retry(3)
def test_no_gc_backward_cpu(self):
self.check_backward(None, self.gh, *self.inputs)
@condition.retry(3)
def test_no_gh_backward_cpu(self):
self.check_backward(self.gc, None, *self.inputs)
@attr.gpu
@condition.retry(3)
def test_full_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.gc), cuda.to_gpu(self.gh),
*[cuda.to_gpu(v) for v in self.inputs])
@attr.gpu
@condition.retry(3)
def test_no_gc_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(None, cuda.to_gpu(self.gh),
*[cuda.to_gpu(v) for v in self.inputs])
@attr.gpu
@condition.retry(3)
def test_no_gh_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_backward(cuda.to_gpu(self.gc), None,
*[cuda.to_gpu(v) for v in self.inputs])
testing.run_module(__name__, __file__)
| 11,274
| 34.793651
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/rnn_tests/test_link_n_step_gru.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
def sigmoid(x):
return numpy.tanh(x * 0.5) * 0.5 + 0.5
@testing.parameterize(*testing.product({
'hidden_none': [True, False],
}))
class TestNStepGRU(unittest.TestCase):
lengths = [3, 1, 2]
n_layers = 2
in_size = 3
out_size = 2
dropout = 0.0
def setUp(self):
shape = (self.n_layers, len(self.lengths), self.out_size)
if self.hidden_none:
self.h = numpy.zeros(shape, 'f')
else:
self.h = numpy.random.uniform(-1, 1, shape).astype('f')
self.xs = [
numpy.random.uniform(-1, 1, (l, self.in_size)).astype('f')
for l in self.lengths]
self.gh = numpy.random.uniform(-1, 1, shape).astype('f')
self.gys = [
numpy.random.uniform(-1, 1, (l, self.out_size)).astype('f')
for l in self.lengths]
self.rnn = links.NStepGRU(
self.n_layers, self.in_size, self.out_size, self.dropout)
for layer in self.rnn:
for p in layer.params():
p.array[...] = numpy.random.uniform(-1, 1, p.shape)
self.rnn.cleargrads()
def check_forward(self, h_data, xs_data):
if self.hidden_none:
h = None
else:
h = chainer.Variable(h_data)
xs = [chainer.Variable(x) for x in xs_data]
hy, ys = self.rnn(h, xs)
assert hy.shape == h_data.shape
assert len(xs) == len(ys)
for x, y in zip(xs, ys):
assert len(x) == len(y)
assert y.shape[1] == self.out_size
with testing.assert_warns(DeprecationWarning):
self.rnn.to_cpu()
for batch, seq in enumerate(self.xs):
for layer in range(self.n_layers):
p = self.rnn[layer]
h_prev = self.h[layer, batch]
hs = []
for x in seq:
# GRU
z = sigmoid(
x.dot(p.w1.array.T) + h_prev.dot(p.w4.array.T) +
p.b1.array + p.b4.array)
r = sigmoid(
x.dot(p.w0.array.T) + h_prev.dot(p.w3.array.T) +
p.b0.array + p.b3.array)
h_bar = numpy.tanh(
x.dot(p.w2.array.T) +
r * ((h_prev).dot(p.w5.array.T) + p.b5.array) +
p.b2.array)
e_h = (1 - z) * h_bar + z * h_prev
h_prev = e_h
hs.append(e_h)
seq = hs
testing.assert_allclose(hy.array[layer, batch], h_prev)
for y, ey in zip(ys[batch].array, seq):
testing.assert_allclose(y, ey)
def test_forward_cpu_train(self):
with chainer.using_config('train', True):
self.check_forward(self.h, self.xs)
@attr.gpu
def test_forward_gpu_train(self):
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'always'), \
chainer.using_config('train', True):
self.check_forward(
cuda.to_gpu(self.h),
[cuda.to_gpu(x) for x in self.xs])
def test_forward_cpu_test(self):
with chainer.using_config('train', False):
self.check_forward(self.h, self.xs)
@attr.gpu
def test_forward_gpu_test(self):
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'always'), \
chainer.using_config('train', False):
self.check_forward(
cuda.to_gpu(self.h),
[cuda.to_gpu(x) for x in self.xs])
def check_multi_gpu_forward(self, train=True):
# See chainer/chainer#6262
# NStepGRU w/ cudnn & dropout should work on not current device
msg = None
rnn = self.rnn.copy('copy')
rnn.dropout = .5
with cuda.get_device_from_id(1):
if self.hidden_none:
h = None
else:
h = cuda.to_gpu(self.h)
xs = [cuda.to_gpu(x) for x in self.xs]
with testing.assert_warns(DeprecationWarning):
rnn = rnn.to_gpu()
with cuda.get_device_from_id(0),\
chainer.using_config('train', train),\
chainer.using_config('use_cudnn', 'always'):
try:
rnn(h, xs)
except Exception as e:
msg = e
assert msg is None
@attr.cudnn
@attr.multi_gpu(2)
def test_multi_gpu_forward_training(self):
self.check_multi_gpu_forward(True)
@attr.cudnn
@attr.multi_gpu(2)
def test_multi_gpu_forward_test(self):
self.check_multi_gpu_forward(False)
def check_backward(
self, h_data, xs_data, gh_data, gys_data):
def fun(*args):
if self.hidden_none:
h = None
xs = args
else:
h, = args[:1]
xs = args[1:]
hy, ys = self.rnn(h, xs)
return tuple([hy, ] + list(ys))
params = []
for layer in self.rnn:
for p in layer.params():
params.append(p)
if self.hidden_none:
in_data = xs_data
else:
in_data = [h_data] + xs_data
gradient_check.check_backward(
fun, tuple(in_data),
tuple([gh_data] + gys_data),
tuple(params), eps=1e-2, rtol=1e-3, atol=1e-3)
def test_backward_cpu(self):
self.check_backward(self.h, self.xs, self.gh, self.gys)
@attr.gpu
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'auto'):
self.check_backward(
cuda.to_gpu(self.h),
[cuda.to_gpu(x) for x in self.xs],
cuda.to_gpu(self.gh),
[cuda.to_gpu(gy) for gy in self.gys])
def test_n_cells(self):
assert self.rnn.n_cells == 1
@testing.parameterize(*testing.product({
'hidden_none': [True, False],
}))
class TestNStepBiGRU(unittest.TestCase):
lengths = [3, 1, 2]
n_layers = 2
in_size = 3
out_size = 2
dropout = 0.0
def setUp(self):
shape = (self.n_layers * 2, len(self.lengths), self.out_size)
if self.hidden_none:
self.h = numpy.zeros(shape, 'f')
else:
self.h = numpy.random.uniform(-1, 1, shape).astype('f')
self.xs = [
numpy.random.uniform(-1, 1, (l, self.in_size)).astype('f')
for l in self.lengths]
self.gh = numpy.random.uniform(-1, 1, shape).astype('f')
self.gys = [
numpy.random.uniform(-1, 1, (l, self.out_size * 2)).astype('f')
for l in self.lengths]
self.rnn = links.NStepBiGRU(
self.n_layers, self.in_size, self.out_size, self.dropout)
for layer in self.rnn:
for p in layer.params():
p.array[...] = numpy.random.uniform(-1, 1, p.shape)
self.rnn.cleargrads()
def check_forward(self, h_data, xs_data):
if self.hidden_none:
h = None
else:
h = chainer.Variable(h_data)
xs = [chainer.Variable(x) for x in xs_data]
hy, ys = self.rnn(h, xs)
assert hy.shape == h_data.shape
assert len(xs) == len(ys)
for x, y in zip(xs, ys):
assert len(x) == len(y)
assert y.shape[1] == self.out_size * 2
with testing.assert_warns(DeprecationWarning):
self.rnn.to_cpu()
for batch, seq in enumerate(self.xs):
for layer in range(self.n_layers):
# forward
di = 0
layer_idx = layer * 2 + di
p = self.rnn[layer_idx]
h_prev = self.h[layer_idx, batch]
hs_f = []
for x in seq:
# GRU
z = sigmoid(
x.dot(p.w1.array.T) + h_prev.dot(p.w4.array.T) +
p.b1.array + p.b4.array)
r = sigmoid(
x.dot(p.w0.array.T) + h_prev.dot(p.w3.array.T) +
p.b0.array + p.b3.array)
h_bar = numpy.tanh(x.dot(p.w2.array.T) +
r * ((h_prev).dot(p.w5.array.T) +
p.b5.array) + p.b2.array)
e_h = (1 - z) * h_bar + z * h_prev
h_prev = e_h
hs_f.append(e_h)
testing.assert_allclose(hy.array[layer_idx, batch], h_prev)
# backward
di = 1
layer_idx = layer * 2 + di
p = self.rnn[layer_idx]
h_prev = self.h[layer_idx, batch]
hs_b = []
for x in reversed(seq):
# GRU
z = sigmoid(
x.dot(p.w1.array.T) + h_prev.dot(p.w4.array.T) +
p.b1.array + p.b4.array)
r = sigmoid(
x.dot(p.w0.array.T) + h_prev.dot(p.w3.array.T) +
p.b0.array + p.b3.array)
h_bar = numpy.tanh(x.dot(p.w2.array.T) +
r * ((h_prev).dot(p.w5.array.T) +
p.b5.array) + p.b2.array)
e_h = (1 - z) * h_bar + z * h_prev
h_prev = e_h
hs_b.append(e_h)
testing.assert_allclose(hy.array[layer_idx, batch], h_prev)
hs_b.reverse()
seq = [numpy.concatenate([hfi, hbi], axis=0) for (hfi, hbi)
in zip(hs_f, hs_b)]
for y, ey in zip(ys[batch].array, seq):
testing.assert_allclose(y, ey)
def test_forward_cpu_train(self):
with chainer.using_config('train', True):
self.check_forward(self.h, self.xs)
@attr.gpu
def test_forward_gpu_train(self):
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'always'), \
chainer.using_config('train', True):
self.check_forward(
cuda.to_gpu(self.h),
[cuda.to_gpu(x) for x in self.xs])
def test_forward_cpu_test(self):
with chainer.using_config('train', False):
self.check_forward(self.h, self.xs)
@attr.gpu
def test_forward_gpu_test(self):
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'always'), \
chainer.using_config('train', False):
self.check_forward(
cuda.to_gpu(self.h),
[cuda.to_gpu(x) for x in self.xs])
def check_multi_gpu_forward(self, train=True):
# See chainer/chainer#6262
# NStepBiGRU w/ cudnn and dropout should work on not current device
msg = None
rnn = self.rnn.copy('copy')
rnn.dropout = .5
with cuda.get_device_from_id(1):
if self.hidden_none:
h = None
else:
h = cuda.to_gpu(self.h)
xs = [cuda.to_gpu(x) for x in self.xs]
with testing.assert_warns(DeprecationWarning):
rnn = rnn.to_gpu()
with cuda.get_device_from_id(0),\
chainer.using_config('train', train),\
chainer.using_config('use_cudnn', 'always'):
try:
rnn(h, xs)
except Exception as e:
msg = e
assert msg is None
@attr.cudnn
@attr.multi_gpu(2)
def test_multi_gpu_forward_training(self):
self.check_multi_gpu_forward(True)
@attr.cudnn
@attr.multi_gpu(2)
def test_multi_gpu_forward_test(self):
self.check_multi_gpu_forward(False)
def check_backward(
self, h_data, xs_data, gh_data, gys_data):
def fun(*args):
if self.hidden_none:
h = None
xs = args
else:
h, = args[:1]
xs = args[1:]
hy, ys = self.rnn(h, xs)
return tuple([hy, ] + list(ys))
params = []
for layer in self.rnn:
for p in layer.params():
params.append(p)
if self.hidden_none:
in_data = xs_data
else:
in_data = [h_data] + xs_data
gradient_check.check_backward(
fun, tuple(in_data),
tuple([gh_data] + gys_data),
tuple(params), eps=1e-2, rtol=1e-3, atol=1e-3)
def test_backward_cpu(self):
self.check_backward(
self.h, self.xs, self.gh, self.gys)
@attr.gpu
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'auto'):
self.check_backward(
cuda.to_gpu(self.h),
[cuda.to_gpu(x) for x in self.xs],
cuda.to_gpu(self.gh),
[cuda.to_gpu(gy) for gy in self.gys])
def test_n_cells(self):
assert self.rnn.n_cells == 1
testing.run_module(__name__, __file__)
| 13,712
| 32.123188
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/model_tests/test_classifier.py
|
import unittest
import mock
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import links
from chainer import testing
from chainer.testing import attr
# testing.parameterize takes a list of dictionaries.
# Currently, we cannot set a function to the value of the dictionaries.
# As a workaround, we wrap the function and invoke it in __call__ method.
# See issue #1337 for detail.
class AccuracyWithIgnoreLabel(object):
def __call__(self, y, t):
return functions.accuracy(y, t, ignore_label=1)
@testing.parameterize(*testing.product({
'accfun': [AccuracyWithIgnoreLabel(), None],
'compute_accuracy': [True, False],
}))
class TestClassifier(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (5, 10)).astype(numpy.float32)
self.t = numpy.random.randint(3, size=5).astype(numpy.int32)
self.y = numpy.random.uniform(-1, 1, (5, 7)).astype(numpy.float32)
def check_call(
self, gpu, label_key, args, kwargs, model_args, model_kwargs):
init_kwargs = {'label_key': label_key}
if self.accfun is not None:
init_kwargs['accfun'] = self.accfun
link = links.Classifier(chainer.Link(), **init_kwargs)
if gpu:
xp = cuda.cupy
with testing.assert_warns(DeprecationWarning):
link.to_gpu()
else:
xp = numpy
link.compute_accuracy = self.compute_accuracy
y = chainer.Variable(self.y)
link.predictor = mock.MagicMock(return_value=y)
loss = link(*args, **kwargs)
link.predictor.assert_called_with(*model_args, **model_kwargs)
self.assertTrue(hasattr(link, 'y'))
self.assertIsNotNone(link.y)
self.assertTrue(hasattr(link, 'loss'))
xp.testing.assert_allclose(link.loss.data, loss.data)
self.assertTrue(hasattr(link, 'accuracy'))
if self.compute_accuracy:
self.assertIsNotNone(link.accuracy)
else:
self.assertIsNone(link.accuracy)
def test_call_cpu(self):
self.check_call(
False, -1, (self.x, self.t), {}, (self.x,), {})
def test_call_three_args_cpu(self):
self.check_call(
False, -1, (self.x, self.x, self.t), {}, (self.x, self.x), {})
def test_call_positive_cpu(self):
self.check_call(
False, 2, (self.x, self.x, self.t), {}, (self.x, self.x), {})
def test_call_kwargs_cpu(self):
self.check_call(
False, 't', (self.x,), {'t': self.t}, (self.x,), {})
def test_call_no_arg_cpu(self):
self.check_call(
False, 0, (self.t,), {}, (), {})
@attr.gpu
def test_call_gpu(self):
self.to_gpu()
self.check_call(
True, -1, (self.x, self.t), {}, (self.x,), {})
@attr.gpu
def test_call_three_args_gpu(self):
self.to_gpu()
self.check_call(
True, -1, (self.x, self.x, self.t), {}, (self.x, self.x), {})
@attr.gpu
def test_call_positive_gpu(self):
self.to_gpu()
self.check_call(
True, 2, (self.x, self.x, self.t), {}, (self.x, self.x), {})
@attr.gpu
def test_call_kwargs_gpu(self):
self.to_gpu()
self.check_call(
True, 't', (self.x,), {'t': self.t}, (self.x,), {})
@attr.gpu
def test_call_no_arg_gpu(self):
self.to_gpu()
self.check_call(
True, 0, (self.t,), {}, (), {})
def to_gpu(self):
self.x = cuda.to_gpu(self.x)
self.t = cuda.to_gpu(self.t)
self.y = cuda.to_gpu(self.y)
class TestInvalidArgument(unittest.TestCase):
def setUp(self):
self.link = links.Classifier(links.Linear(10, 3))
self.x = numpy.random.uniform(-1, 1, (5, 10)).astype(numpy.float32)
def check_invalid_argument(self):
x = chainer.Variable(self.link.xp.asarray(self.x))
with self.assertRaises(TypeError):
# link.__call__ raises TypeError as the number of arguments
# is illegal
self.link(x)
def test_invalid_argument_cpu(self):
self.check_invalid_argument()
@attr.gpu
def test_invalid_argument_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_invalid_argument()
class TestInvalidLabelKey(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (5, 10)).astype(numpy.float32)
def test_invalid_label_key_type(self):
with six.assertRaisesRegex(
self, TypeError, 'label_key must be int or str'):
links.Classifier(links.Linear(10, 3), label_key=None)
def check_invalid_key(self, gpu, label_key):
link = links.Classifier(links.Linear(10, 3), label_key=label_key)
if gpu:
with testing.assert_warns(DeprecationWarning):
link.to_gpu()
x = chainer.Variable(link.xp.asarray(self.x))
with six.assertRaisesRegex(self, ValueError, 'Label key'):
link(x)
def test_invalid_index_cpu(self):
self.check_invalid_key(False, 1)
@attr.gpu
def test_invalid_argument_gpu(self):
self.check_invalid_key(True, 1)
def test_invalid_index_too_small_cpu(self):
self.check_invalid_key(False, -2)
@attr.gpu
def test_invalid_index_too_small_gpu(self):
self.check_invalid_key(True, -2)
def test_invalid_str_key_cpu(self):
self.check_invalid_key(False, 't')
@attr.gpu
def test_invalid_str_key_gpu(self):
self.check_invalid_key(True, 't')
testing.run_module(__name__, __file__)
| 5,711
| 28.905759
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/model_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/model_tests/test_vision.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer.links.model.vision import googlenet
from chainer.links.model.vision import resnet
from chainer.links.model.vision import vgg
from chainer import testing
from chainer.testing import attr
from chainer.variable import Variable
@testing.parameterize(*testing.product({
'dtype': [numpy.float32],
'n_layers': [50, 101, 152],
'downsample_fb': [True, False],
}) + [{
'dtype': numpy.float16,
'n_layers': 50,
'downsample_fb': False,
}])
@unittest.skipUnless(resnet.available, 'Pillow is required')
@attr.slow
class TestResNetLayers(unittest.TestCase):
def setUp(self):
self._config_user = chainer.using_config('dtype', self.dtype)
self._config_user.__enter__()
if self.n_layers == 50:
self.link = resnet.ResNet50Layers(
pretrained_model=None, downsample_fb=self.downsample_fb)
elif self.n_layers == 101:
self.link = resnet.ResNet101Layers(
pretrained_model=None, downsample_fb=self.downsample_fb)
elif self.n_layers == 152:
self.link = resnet.ResNet152Layers(
pretrained_model=None, downsample_fb=self.downsample_fb)
def tearDown(self):
self._config_user.__exit__(None, None, None)
def test_available_layers(self):
result = self.link.available_layers
assert isinstance(result, list)
assert len(result) == 9
def check_call(self):
xp = self.link.xp
# Suppress warning that arises from zero division in BatchNormalization
with numpy.errstate(divide='ignore'):
x1 = Variable(xp.asarray(numpy.random.uniform(
-1, 1, (1, 3, 224, 224)).astype(self.dtype)))
y1 = cuda.to_cpu(self.link(x1)['prob'].data)
assert y1.shape == (1, 1000)
x2 = Variable(xp.asarray(numpy.random.uniform(
-1, 1, (1, 3, 128, 128)).astype(self.dtype)))
y2 = cuda.to_cpu(self.link(x2, layers=['pool5'])['pool5'].data)
assert y2.shape == (1, 2048)
def test_call_cpu(self):
self.check_call()
@attr.gpu
def test_call_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_call()
def test_prepare(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
x3 = numpy.random.uniform(0, 255, (160, 120, 3)).astype(self.dtype)
x4 = numpy.random.uniform(0, 255, (1, 160, 120)).astype(self.dtype)
x5 = numpy.random.uniform(0, 255, (3, 160, 120)).astype(numpy.uint8)
y1 = resnet.prepare(x1)
assert y1.shape == (3, 224, 224)
assert y1.dtype == self.dtype
y2 = resnet.prepare(x2)
assert y2.shape == (3, 224, 224)
assert y2.dtype == self.dtype
y3 = resnet.prepare(x3, size=None)
assert y3.shape == (3, 160, 120)
assert y3.dtype == self.dtype
y4 = resnet.prepare(x4)
assert y4.shape == (3, 224, 224)
assert y4.dtype == self.dtype
y5 = resnet.prepare(x5, size=None)
assert y5.shape == (3, 160, 120)
assert y5.dtype == self.dtype
def check_extract(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
with numpy.errstate(divide='ignore'):
result = self.link.extract([x1, x2], layers=['res3', 'pool5'])
assert len(result) == 2
y1 = cuda.to_cpu(result['res3'].data)
assert y1.shape == (2, 512, 28, 28)
assert y1.dtype == self.dtype
y2 = cuda.to_cpu(result['pool5'].data)
assert y2.shape == (2, 2048)
assert y2.dtype == self.dtype
x3 = numpy.random.uniform(0, 255, (80, 60)).astype(numpy.uint8)
result = self.link.extract([x3], layers=['res2'], size=None)
assert len(result) == 1
y3 = cuda.to_cpu(result['res2'].data)
assert y3.shape == (1, 256, 20, 15)
assert y3.dtype == self.dtype
def test_extract_cpu(self):
err = 'ignore' if self.dtype is numpy.float16 else None
with numpy.errstate(over=err): # ignore FP16 overflow
self.check_extract()
@attr.gpu
def test_extract_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_extract()
def check_predict(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
with numpy.errstate(divide='ignore'):
result = self.link.predict([x1, x2], oversample=False)
y = cuda.to_cpu(result.data)
assert y.shape == (2, 1000)
assert y.dtype == self.dtype
result = self.link.predict([x1, x2], oversample=True)
y = cuda.to_cpu(result.data)
assert y.shape == (2, 1000)
assert y.dtype == self.dtype
def test_predict_cpu(self):
err = 'ignore' if self.dtype is numpy.float16 else None
with numpy.errstate(over=err): # ignore FP16 overflow
self.check_predict()
@attr.gpu
def test_predict_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_predict()
def check_copy(self):
copied = self.link.copy()
assert copied.conv1 is copied.functions['conv1'][0]
assert (
copied.res2.a is
getattr(copied.res2, copied.res2._forward[0]))
def test_copy_cpu(self):
self.check_copy()
@attr.gpu
def test_copy_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_copy()
@testing.parameterize(*testing.product({
'n_layers': [16, 19],
'dtype': [numpy.float16, numpy.float32],
}))
@unittest.skipUnless(resnet.available, 'Pillow is required')
@attr.slow
class TestVGGs(unittest.TestCase):
def setUp(self):
self._config_user = chainer.using_config('dtype', self.dtype)
self._config_user.__enter__()
if self.n_layers == 16:
self.link = vgg.VGG16Layers(pretrained_model=None)
elif self.n_layers == 19:
self.link = vgg.VGG19Layers(pretrained_model=None)
def tearDown(self):
self._config_user.__exit__(None, None, None)
def test_available_layers(self):
result = self.link.available_layers
assert isinstance(result, list)
if self.n_layers == 16:
assert len(result) == 22
elif self.n_layers == 19:
assert len(result) == 25
def check_call(self):
xp = self.link.xp
x1 = Variable(xp.asarray(numpy.random.uniform(
-1, 1, (1, 3, 224, 224)).astype(self.dtype)))
y1 = cuda.to_cpu(self.link(x1)['prob'].data)
assert y1.shape == (1, 1000)
def test_call_cpu(self):
self.check_call()
@attr.gpu
def test_call_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_call()
def test_prepare(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
x3 = numpy.random.uniform(0, 255, (160, 120, 3)).astype(self.dtype)
x4 = numpy.random.uniform(0, 255, (1, 160, 120)).astype(self.dtype)
x5 = numpy.random.uniform(0, 255, (3, 160, 120)).astype(numpy.uint8)
y1 = vgg.prepare(x1)
assert y1.shape == (3, 224, 224)
assert y1.dtype == self.dtype
y2 = vgg.prepare(x2)
assert y2.shape == (3, 224, 224)
assert y2.dtype == self.dtype
y3 = vgg.prepare(x3, size=None)
assert y3.shape == (3, 160, 120)
assert y3.dtype == self.dtype
y4 = vgg.prepare(x4)
assert y4.shape == (3, 224, 224)
assert y4.dtype == self.dtype
y5 = vgg.prepare(x5, size=None)
assert y5.shape == (3, 160, 120)
assert y5.dtype == self.dtype
def check_extract(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
result = self.link.extract([x1, x2], layers=['pool3', 'fc7'])
assert len(result) == 2
y1 = cuda.to_cpu(result['pool3'].data)
assert y1.shape == (2, 256, 28, 28)
assert y1.dtype == self.dtype
y2 = cuda.to_cpu(result['fc7'].data)
assert y2.shape == (2, 4096)
assert y2.dtype == self.dtype
x3 = numpy.random.uniform(0, 255, (80, 60)).astype(numpy.uint8)
result = self.link.extract([x3], layers=['pool1'], size=None)
assert len(result) == 1
y3 = cuda.to_cpu(result['pool1'].data)
assert y3.shape == (1, 64, 40, 30)
assert y3.dtype == self.dtype
def test_extract_cpu(self):
self.check_extract()
@attr.gpu
def test_extract_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_extract()
def check_predict(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
result = self.link.predict([x1, x2], oversample=False)
y = cuda.to_cpu(result.data)
assert y.shape == (2, 1000)
assert y.dtype == self.dtype
result = self.link.predict([x1, x2], oversample=True)
y = cuda.to_cpu(result.data)
assert y.shape == (2, 1000)
assert y.dtype == self.dtype
def test_predict_cpu(self):
self.check_predict()
@attr.gpu
def test_predict_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_predict()
def check_copy(self):
copied = self.link.copy()
assert copied.conv1_1 is copied.functions['conv1_1'][0]
def test_copy_cpu(self):
self.check_copy()
@attr.gpu
def test_copy_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_copy()
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32],
}))
@unittest.skipUnless(googlenet.available, 'Pillow is required')
@attr.slow
class TestGoogLeNet(unittest.TestCase):
def setUp(self):
self._config_user = chainer.using_config('dtype', self.dtype)
self._config_user.__enter__()
self.link = googlenet.GoogLeNet(pretrained_model=None)
def tearDown(self):
self._config_user.__exit__(None, None, None)
def test_available_layers(self):
result = self.link.available_layers
assert isinstance(result, list)
assert len(result) == 21
def check_call_prob(self):
xp = self.link.xp
x = Variable(xp.asarray(numpy.random.uniform(
-1, 1, (1, 3, 224, 224)).astype(self.dtype)))
y = cuda.to_cpu(self.link(x)['prob'].data)
assert y.shape == (1, 1000)
def check_call_loss1_fc2(self):
xp = self.link.xp
x = Variable(xp.asarray(numpy.random.uniform(
-1, 1, (1, 3, 224, 224)).astype(self.dtype)))
y = cuda.to_cpu(self.link(x, ['loss1_fc2'])['loss1_fc2'].data)
assert y.shape == (1, 1000)
def check_call_loss2_fc2(self):
xp = self.link.xp
x = Variable(xp.asarray(numpy.random.uniform(
-1, 1, (1, 3, 224, 224)).astype(self.dtype)))
y = cuda.to_cpu(self.link(x, ['loss2_fc2'])['loss2_fc2'].data)
assert y.shape == (1, 1000)
def test_call_cpu(self):
self.check_call_prob()
self.check_call_loss1_fc2()
self.check_call_loss2_fc2()
@attr.gpu
def test_call_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_call_prob()
self.check_call_loss1_fc2()
self.check_call_loss2_fc2()
def test_prepare(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
x3 = numpy.random.uniform(0, 255, (160, 120, 3)).astype(self.dtype)
x4 = numpy.random.uniform(0, 255, (1, 160, 120)).astype(self.dtype)
x5 = numpy.random.uniform(0, 255, (3, 160, 120)).astype(numpy.uint8)
y1 = googlenet.prepare(x1)
assert y1.shape == (3, 224, 224)
assert y1.dtype, self.dtype
y2 = googlenet.prepare(x2)
assert y2.shape == (3, 224, 224)
assert y2.dtype, self.dtype
y3 = googlenet.prepare(x3, size=None)
assert y3.shape == (3, 160, 120)
assert y3.dtype, self.dtype
y4 = googlenet.prepare(x4)
assert y4.shape == (3, 224, 224)
assert y4.dtype, self.dtype
y5 = googlenet.prepare(x5, size=None)
assert y5.shape == (3, 160, 120)
assert y5.dtype, self.dtype
def check_extract(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
result = self.link.extract([x1, x2], layers=['pool5', 'loss3_fc'])
assert len(result) == 2
y1 = cuda.to_cpu(result['pool5'].data)
assert y1.shape == (2, 1024, 1, 1)
assert y1.dtype == self.dtype
y2 = cuda.to_cpu(result['loss3_fc'].data)
assert y2.shape == (2, 1000)
assert y2.dtype == self.dtype
x3 = numpy.random.uniform(0, 255, (80, 60)).astype(numpy.uint8)
result = self.link.extract([x3], layers=['pool1'], size=None)
assert len(result) == 1
y3 = cuda.to_cpu(result['pool1'].data)
assert y3.shape == (1, 64, 20, 15)
assert y3.dtype == self.dtype
def test_extract_cpu(self):
err = 'ignore' if self.dtype is numpy.float16 else None
with numpy.errstate(over=err): # ignore FP16 overflow
self.check_extract()
@attr.gpu
def test_extract_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_extract()
def check_predict(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
result = self.link.predict([x1, x2], oversample=False)
y = cuda.to_cpu(result.data)
assert y.shape == (2, 1000)
assert y.dtype == self.dtype
result = self.link.predict([x1, x2], oversample=True)
y = cuda.to_cpu(result.data)
assert y.shape == (2, 1000)
assert y.dtype == self.dtype
def test_predict_cpu(self):
err = 'ignore' if self.dtype is numpy.float16 else None
with numpy.errstate(over=err): # ignore FP16 overflow
self.check_predict()
@attr.gpu
def test_predict_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_predict()
def check_copy(self):
copied = self.link.copy()
assert copied.conv1 is copied.functions['conv1'][0]
def test_copy_cpu(self):
self.check_copy()
@attr.gpu
def test_copy_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_copy()
testing.run_module(__name__, __file__)
| 15,765
| 33.726872
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/normalization_tests/test_batch_renormalization.py
|
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import links
from chainer import testing
from chainer.testing import attr
def _batch_renormalization(expander, gamma, beta, x, mean, std, test,
r, d):
mean = mean[expander]
std = std[expander]
if test:
r, d = 1, 0
y_expect = gamma * ((x - mean) / std * r + d) + beta
return y_expect
@testing.parameterize(*(testing.product({
'test': [True, False],
'ndim': [0, 1, 2, 3],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'eps': [2e-5, 0.5],
})))
class BatchRenormalizationTest(unittest.TestCase):
def setUp(self):
self.expander = (None, Ellipsis) + (None,) * self.ndim
self.aggr_axes = (0,) + tuple(six.moves.range(2, self.ndim + 2))
self.rmax = self.dtype(3)
self.dmax = self.dtype(5)
self.link = links.BatchRenormalization(
3, rmax=self.rmax, dmax=self.dmax,
dtype=self.dtype, eps=self.eps)
gamma = self.link.gamma.array
gamma[...] = numpy.random.uniform(.5, 1, gamma.shape)
beta = self.link.beta.array
beta[...] = numpy.random.uniform(-1, 1, beta.shape)
self.link.cleargrads()
self.gamma = gamma.copy()[self.expander] # fixed on CPU
self.beta = beta.copy()[self.expander] # fixed on CPU
shape = (5, 3) + (2,) * self.ndim
self.x = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
if self.test:
self.mean = numpy.random.uniform(-1, 1, (3,)).astype(self.dtype)
self.var = numpy.random.uniform(0.5, 1, (3,)).astype(self.dtype)
self.running_mean = self.mean
self.running_var = self.var
else:
self.mean = self.x.mean(axis=self.aggr_axes)
self.var = self.x.var(axis=self.aggr_axes)
# Need to add some noise to running_mean and running_var,
# otherwise we will always get r=1, d=0
# Note that numpy.exp(3) > rmax ** 2 and 7 > dmax
self.running_var = self.var * numpy.exp(
numpy.random.uniform(-3, 3, self.var.shape)).astype(self.dtype)
self.running_mean = self.mean + (
(numpy.sqrt(self.running_var) + 0.1)
* numpy.random.uniform(-7, 7, self.mean.shape)
).astype(self.dtype)
self.link.avg_mean[...] = self.running_mean
self.link.avg_var[...] = self.running_var
self.check_forward_optionss = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_optionss = {'atol': 1e-4, 'rtol': 1e-3}
if self.dtype == numpy.float16:
self.check_forward_optionss = {'atol': 1e-2, 'rtol': 5e-3}
self.check_backward_optionss = {'atol': 5e-1, 'rtol': 1e-1}
def check_forward(self, x_data):
with chainer.using_config('train', not self.test):
x = chainer.Variable(x_data)
y = self.link(x)
self.assertEqual(y.dtype, self.dtype)
sigma_batch = numpy.sqrt(self.var + self.eps)
running_sigma = numpy.sqrt(self.running_var + self.eps)
r = numpy.clip(sigma_batch / running_sigma, 1.0 / self.rmax, self.rmax)
d = numpy.clip((self.mean - self.running_mean) / running_sigma,
-self.dmax, self.dmax)
y_expect = _batch_renormalization(
self.expander, self.gamma, self.beta, self.x, self.mean,
sigma_batch, self.test,
r[self.expander], d[self.expander])
testing.assert_allclose(
y.array, y_expect, **self.check_forward_optionss)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
@attr.multi_gpu(2)
def test_forward_multi_gpu(self):
with cuda.get_device_from_id(1):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
with cuda.get_device_from_id(0):
self.check_forward(x)
@testing.parameterize(
{'nx': 10, 'ny': 10, 'eps': 2e-5},
{'nx': 10, 'ny': 10, 'eps': 1e-1},
# TODO(Kenta Oono)
# Pass the case below (this test does not pass when nx != ny).
# {'nx': 10, 'ny': 15}
)
class TestPopulationStatistics(unittest.TestCase):
def setUp(self):
self.decay = 0.9
self.size = 3
self.link = links.BatchRenormalization(
self.size, decay=self.decay, eps=self.eps)
self.x = numpy.random.uniform(
-1, 1, (self.nx, self.size)).astype(numpy.float32)
self.y = numpy.random.uniform(
-1, 1, (self.ny, self.size)).astype(numpy.float32)
def check_statistics(self, x, y):
x = chainer.Variable(x)
self.link(x, finetune=True)
mean = self.x.mean(axis=0)
testing.assert_allclose(self.link.avg_mean, mean)
unbiased_var = self.x.var(axis=0) * self.nx / (self.nx - 1)
testing.assert_allclose(self.link.avg_var, unbiased_var)
y = chainer.Variable(y)
with chainer.using_config('train', False):
self.link(y, finetune=True)
testing.assert_allclose(self.link.avg_mean, mean)
testing.assert_allclose(self.link.avg_var, unbiased_var)
def test_statistics_cpu(self):
self.check_statistics(self.x, self.y)
@attr.gpu
def test_statistics_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_statistics(cuda.to_gpu(self.x), cuda.to_gpu(self.y))
def check_statistics2(self, x, y):
x = chainer.Variable(x)
y = chainer.Variable(y)
self.link(x, finetune=True)
self.link(y, finetune=True)
mean = (self.x.sum(axis=0) + self.y.sum(axis=0)) / (self.nx + self.ny)
var = (self.x.var(axis=0) * self.nx +
self.y.var(axis=0) * self.ny) / (self.nx + self.ny)
# TODO(Kenta Oono)
# Fix the estimate of the unbiased variance.
# Unbiased variance should be (nx + ny) / (nx + ny - 1) times of
# the variance.
# But the multiplier is ny / (ny - 1) in current implementation
# these two values are different when nx is not equal to ny.
unbiased_var = var * self.ny / (self.ny - 1)
testing.assert_allclose(self.link.avg_mean, mean)
testing.assert_allclose(self.link.avg_var, unbiased_var)
def test_statistics2_cpu(self):
self.check_statistics2(self.x, self.y)
@attr.gpu
def test_statistics2_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_statistics2(
cuda.to_gpu(self.x),
cuda.to_gpu(self.y))
testing.run_module(__name__, __file__)
| 7,033
| 35.827225
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/normalization_tests/test_batch_normalization.py
|
import unittest
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import gradient_check
from chainer import initializers
from chainer import links
from chainer import memory_layouts
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import type_check
import chainerx
_parameterize = testing.parameterize(*(testing.product_dict(
testing.product({
'test': [True, False],
'size': ['skip', 'explicit'],
'dtype': [numpy.float16, numpy.float32, numpy.float64,
chainer.mixed16],
}),
testing.product({
'ndim': [0, 1, 2, 3],
}) + [
{'input_shape': (5, 4, 3, 2), 'axis': (0, 2, 3)},
{'input_shape': (5, 4), 'axis': 0},
{'input_shape': (5, 4, 3), 'axis': (0, 1)},
]
)))
_inject_backend_tests = testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
def _batch_normalization(expander, gamma, beta, x, mean, var, eps, test):
mean = mean[expander]
if test:
std = numpy.sqrt(var[expander])
else:
std = numpy.sqrt(var[expander] + eps)
y_expect = gamma * (x - mean) / std + beta
return y_expect
class BatchNormalizationTestBase(object):
param_names = ('gamma', 'beta')
def setUp(self):
if self.dtype == chainer.mixed16:
self.highprec_dtype = numpy.float32
else:
self.highprec_dtype = self.dtype
if hasattr(self, 'axis') and hasattr(self, 'input_shape'):
aggr_axes = self.axis
if isinstance(aggr_axes, int):
aggr_axes = aggr_axes,
shape = self.input_shape
param_shape = tuple(
s for i, s in enumerate(shape) if i not in aggr_axes)
expander = tuple(
None if i in aggr_axes else slice(None)
for i in range(len(shape)))
elif hasattr(self, 'ndim'):
aggr_axes = (0,) + tuple(six.moves.range(2, self.ndim + 2))
shape = (5, 3) + (2,) * self.ndim
param_shape = shape[1]
expander = (None, Ellipsis) + (None,) * self.ndim
else:
assert False
self.aggr_axes = aggr_axes
self.shape = shape
self.param_shape = param_shape
self.expander = expander
self.finetune = False
self.eps = 2e-5
if self.test:
self.mean = numpy.random.uniform(
-1, 1, param_shape).astype(self.highprec_dtype)
self.var = numpy.random.uniform(
0.5, 1, param_shape).astype(self.highprec_dtype)
else:
self.mean = None
self.var = None
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'atol': 1e-4, 'rtol': 1e-3}
if self.dtype in (numpy.float16, chainer.mixed16):
self.check_forward_options = {'atol': 1e-2, 'rtol': 1e-1}
self.check_backward_options = {'atol': 5e-1, 'rtol': 1e-1}
def before_test(self, test_name):
if (self.dtype == chainer.mixed16
and self.backend_config.xp is chainerx):
raise unittest.SkipTest(
'ChainerX does not yet support mixed-FP16 mode.')
def generate_params(self):
initial_gamma = numpy.random.uniform(
-1, 1, self.param_shape).astype(self.highprec_dtype)
initial_beta = numpy.random.uniform(
-1, 1, self.param_shape).astype(self.highprec_dtype)
return initial_gamma, initial_beta
def create_link(self, initializers):
initial_gamma, initial_beta = initializers
size = self.param_shape if self.size == 'explicit' else None
initial_avg_mean = None if self.mean is None else self.mean.copy()
initial_avg_var = None if self.var is None else self.var.copy()
link = links.BatchNormalization(
size=size,
axis=self.aggr_axes,
eps=self.eps,
dtype=self.dtype,
initial_gamma=initial_gamma,
initial_beta=initial_beta,
initial_avg_mean=initial_avg_mean,
initial_avg_var=initial_avg_var)
return link
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, link, inputs, device):
x, = inputs
# The inputs might be of different dtype than what the link was
# initialized with. In that case, persistent values must be manually
# cast. This is needed when forward is called in order to compute
# numerical gradients.
if ((self.dtype == chainer.mixed16 and x.dtype != numpy.float16)
or (self.dtype != chainer.mixed16 and link.avg_mean is not None
and x.dtype != link.avg_mean.dtype)):
link.avg_mean = link.avg_mean.astype(x.dtype)
link.avg_var = link.avg_var.astype(x.dtype)
with chainer.using_config('train', not self.test):
y = link(x, finetune=self.finetune)
return y,
def forward_expected(self, link, inputs):
gamma = link.gamma.array
beta = link.beta.array
x, = inputs
if self.test:
mean = self.mean[self.expander]
var = self.var[self.expander]
std = numpy.sqrt(var)
else:
mean = x.mean(
axis=self.aggr_axes, dtype=self.highprec_dtype, keepdims=True)
var = x.var(
axis=self.aggr_axes, dtype=self.highprec_dtype, keepdims=True)
std = numpy.sqrt(var + self.eps)
y = gamma[self.expander] * (x - mean) / std + beta[self.expander]
return y.astype(self.dtype),
def check_forward_outputs(self, outputs, expected_outputs):
super(BatchNormalizationTestBase, self).check_forward_outputs(
outputs, expected_outputs)
y, = outputs
assert y.dtype == chainer.get_dtype(self.dtype)
@_inject_backend_tests
@_parameterize
class BatchNormalizationTest(BatchNormalizationTestBase, testing.LinkTestCase):
pass
# TODO(hvy): Safely remove this test class when BackendConfig no longer
# modifies the current device since those cases should be covered by the tests
# above.
@testing.inject_backend_tests(
None,
testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0],
}))
@_parameterize
@attr.multi_gpu(2)
class BatchNormalizationMultiGpuTest(
BatchNormalizationTestBase, testing.LinkTestCase):
skip_backward_test = True
# TODO(hvy): Remove this relaxation. It is currently needed as the
# inter-device copy in CuPy with non-contiguous arrays are broken.
contiguous = 'C'
def forward(self, link, inputs, device):
x, = inputs
device_1 = backend.GpuDevice.from_device_id(1)
link.to_device(device_1)
x.to_device(device_1)
device_0 = backend.GpuDevice.from_device_id(0)
with chainer.using_device(device_0):
with chainer.using_config('train', not self.test):
y = link(x, finetune=self.finetune)
return y,
@testing.parameterize(*(testing.product_dict(
testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))))
@_inject_backend_tests
class BatchNormalizationInitializersTest(
BatchNormalizationTestBase, testing.LinkInitializersTestCase):
test = False
size = 'skip'
ndim = 1
input_shape = (5, 4)
axis = 0
def get_initializers(self):
initial_gamma = [
initializers.Constant(2), 2, testing.InitializerArgument(None, 1)]
initial_beta = [
initializers.Constant(2), 2, testing.InitializerArgument(None, 0)]
return initial_gamma, initial_beta
@testing.parameterize(
{'nx': 10, 'ny': 10, 'eps': 2e-5},
{'nx': 10, 'ny': 10, 'eps': 1e-1},
# TODO(Kenta Oono)
# Pass the case below (this test does not pass when nx != ny).
# {'nx': 10, 'ny': 15}
)
class TestPopulationStatistics(unittest.TestCase):
def setUp(self):
self.decay = 0.9
self.size = 3
self.link = links.BatchNormalization(self.size, self.decay, self.eps)
self.x = numpy.random.uniform(
-1, 1, (self.nx, self.size)).astype(numpy.float32)
self.y = numpy.random.uniform(
-1, 1, (self.ny, self.size)).astype(numpy.float32)
def check_statistics(self, x, y):
x = chainer.Variable(x)
self.link(x, finetune=True)
mean = self.x.mean(axis=0)
testing.assert_allclose(mean, self.link.avg_mean)
unbiased_var = self.x.var(axis=0) * self.nx / (self.nx - 1)
testing.assert_allclose(unbiased_var, self.link.avg_var)
y = chainer.Variable(y)
with chainer.using_config('train', False):
self.link(y, finetune=True)
testing.assert_allclose(mean, self.link.avg_mean)
testing.assert_allclose(unbiased_var, self.link.avg_var)
def test_statistics_cpu(self):
self.check_statistics(self.x, self.y)
@attr.gpu
def test_statistics_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_statistics(cuda.to_gpu(self.x), cuda.to_gpu(self.y))
@attr.cudnn
def test_statistics_gpu_without_cudnn(self):
with chainer.using_config('use_cudnn', 'never'):
self.test_statistics_gpu()
def check_statistics2(self, x, y):
x = chainer.Variable(x)
y = chainer.Variable(y)
self.link(x, finetune=True)
self.link(y, finetune=True)
mean = (self.x.sum(axis=0) + self.y.sum(axis=0)) / (self.nx + self.ny)
var = (self.x.var(axis=0) * self.nx +
self.y.var(axis=0) * self.ny) / (self.nx + self.ny)
# TODO(Kenta Oono)
# Fix the estimate of the unbiased variance.
# Unbiased variance should be (nx + ny) / (nx + ny - 1) times of
# the variance.
# But the multiplier is ny / (ny - 1) in current implementation
# these two values are different when nx is not equal to ny.
unbiased_var = var * self.ny / (self.ny - 1)
testing.assert_allclose(mean, self.link.avg_mean)
testing.assert_allclose(unbiased_var, self.link.avg_var)
def test_statistics2_cpu(self):
self.check_statistics2(self.x, self.y)
@attr.gpu
def test_statistics2_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_statistics2(
cuda.to_gpu(self.x),
cuda.to_gpu(self.y))
@attr.cudnn
def test_statistics2_gpu_without_cudnn(self):
with chainer.using_config('use_cudnn', 'never'):
self.test_statistics2_gpu()
# TODO(hvy): Rewrite this test using testing.LinkTestCase.
@testing.parameterize(*testing.product({
'test': [True, False],
'ndim': [0, 1, 2, 3],
}))
class BatchNormalizationTestWithoutGammaAndBeta(unittest.TestCase):
def setUp(self):
self.link = links.BatchNormalization(
3, use_gamma=False, use_beta=False)
if self.test:
mean = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
self.link.avg_mean[...] = mean
var = numpy.random.uniform(0.5, 1, (3,)).astype(numpy.float32)
self.link.avg_var[...] = var
self.link.cleargrads()
shape = (7, 3) + (2,) * self.ndim
self.x = numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
expander = (None, Ellipsis) + (None,) * self.ndim
gamma = numpy.ones((3,), dtype=numpy.float32)[expander]
beta = numpy.zeros((3,), dtype=numpy.float32)[expander]
if self.test:
mean = self.link.avg_mean
var = self.link.avg_var
else:
aggr_axes = (0,) + tuple(six.moves.range(2, self.ndim + 2))
mean = self.x.mean(axis=aggr_axes)
var = self.x.var(axis=aggr_axes)
self.y_expected = _batch_normalization(
expander, gamma, beta, self.x, mean, var, self.link.eps, self.test)
def test_no_gamma_and_beta(self):
assert self.link.gamma is None
assert self.link.beta is None
def check_forward(self, x_data):
x = chainer.Variable(x_data)
with chainer.using_config('train', not self.test):
y = self.link(x)
testing.assert_allclose(self.y_expected, y.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
self.check_forward(x)
@attr.multi_gpu(2)
def test_forward_gpu_multi(self):
with cuda.get_device_from_id(0):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
with cuda.get_device_from_id(1):
self.check_forward(x)
@attr.cudnn
def test_forward_gpu_without_cudnn(self):
with chainer.using_config('use_cudnn', 'never'):
self.test_forward_gpu()
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(self.link, x_data, y_grad,
eps=1e-2, rtol=1e-3, atol=1e-4)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
gy = cuda.to_gpu(self.gy)
self.check_backward(x, gy)
@attr.cudnn
def test_backward_gpu_without_cudnn(self):
with chainer.using_config('use_cudnn', 'never'):
self.test_backward_gpu()
def _generate_uniform(low, high, shape, dtype=numpy.float32):
return numpy.random.uniform(low, high, shape).astype(dtype)
@testing.parameterize(*testing.product({
'size': [3, (2, 3)],
}))
class TestInitialize(unittest.TestCase):
def setUp(self):
self.decay = 0.9
self.initial_gamma = _generate_uniform(-1, 1, self.size)
self.initial_beta = _generate_uniform(-1, 1, self.size)
self.initial_avg_mean = _generate_uniform(-1, 1, self.size)
self.initial_avg_var = _generate_uniform(-1, 1, self.size)
self.link = links.BatchNormalization(
self.size, self.decay,
initial_gamma=self.initial_gamma,
initial_beta=self.initial_beta,
initial_avg_mean=self.initial_avg_mean,
initial_avg_var=self.initial_avg_var,
)
@condition.retry(3)
def test_initialize_cpu(self):
testing.assert_allclose(self.initial_gamma, self.link.gamma.data)
testing.assert_allclose(self.initial_beta, self.link.beta.data)
testing.assert_allclose(self.initial_avg_mean, self.link.avg_mean)
testing.assert_allclose(self.initial_avg_var, self.link.avg_var)
@attr.gpu
@condition.retry(3)
def test_initialize_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
testing.assert_allclose(self.initial_gamma, self.link.gamma.data)
testing.assert_allclose(self.initial_beta, self.link.beta.data)
testing.assert_allclose(self.initial_avg_mean, self.link.avg_mean)
testing.assert_allclose(self.initial_avg_var, self.link.avg_var)
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float16, chainer.mixed16],
}))
class TestDefaultInitializer(unittest.TestCase):
def setUp(self):
self.decay = 0.9
self.size = 3
with chainer.using_config('dtype', self.dtype):
self.link = links.BatchNormalization(self.size, self.decay)
dtype = numpy.float32 if self.dtype == chainer.mixed16 else self.dtype
assert self.link.beta.dtype == dtype
assert self.link.gamma.dtype == dtype
assert self.link.avg_mean.dtype == dtype
assert self.link.avg_var.dtype == dtype
self.x = numpy.arange(6, dtype=self.dtype).reshape(2, 3)
def check_initialize(self):
testing.assert_allclose(numpy.ones(self.size), self.link.gamma.array)
testing.assert_allclose(numpy.zeros(self.size), self.link.beta.array)
testing.assert_allclose(0, self.link.avg_mean)
testing.assert_allclose(1, self.link.avg_var)
y = self.link(self.x)
assert y.dtype == self.x.dtype
def test_initialize_cpu(self):
self.check_initialize()
@attr.gpu
def test_initialize_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.x = cuda.to_gpu(self.x)
self.check_initialize()
@testing.parameterize(*testing.product({
'shape': [(2, 4), (2, 5, 3, 4)],
}))
class TestInvalidInput(unittest.TestCase):
def setUp(self):
self.link = links.BatchNormalization(3)
def test_invalid_shape_cpu(self):
with self.assertRaises(type_check.InvalidType):
self.link(chainer.Variable(numpy.zeros(self.shape, dtype='f')))
@attr.gpu
def test_invalid_shape_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
with self.assertRaises(type_check.InvalidType):
self.link(chainer.Variable(cuda.cupy.zeros(self.shape, dtype='f')))
class TestInvalidInitialize(unittest.TestCase):
def test_invalid_type(self):
with self.assertRaises(TypeError):
self.link = links.BatchNormalization({})
class TestInvalidArgument(unittest.TestCase):
def setUp(self):
self.link = links.BatchNormalization(1)
self.x = numpy.random.uniform(-1, 1, (3,)).astype('f')
def test_test_argument(self):
with self.assertRaises(ValueError):
self.link(self.x, test=True)
def test_positional_argument(self):
# positional argument is prohibited from v2
with self.assertRaises(TypeError):
self.link(self.x, True)
def test_redundant_argument(self):
with self.assertRaises(TypeError):
self.link(self.x, unknown_argument=1)
@testing.parameterize(
{'shape': (5, 4, 3, 2), 'axis': (0, 2, 3)},
{'shape': (5, 4), 'axis': 0},
{'shape': (5, 4, 3), 'axis': (0, 1)},
)
class TestChannalSizeInference(unittest.TestCase):
def setUp(self):
self.x = numpy.random.randn(*self.shape).astype('f')
axis = self.axis
if isinstance(axis, int):
axis = (axis,)
self.expected_size = tuple(
n
for i, n in enumerate(self.shape)
if i not in axis
)
def test_no_inference(self):
bn = links.BatchNormalization(self.expected_size)
assert bn.avg_mean is not None
assert bn.avg_var is not None
def test_inference_cpu(self):
bn = links.BatchNormalization(axis=self.axis)
bn(self.x)
assert bn.beta.shape == self.expected_size
assert bn.gamma.shape == self.expected_size
assert bn.avg_mean.shape == self.expected_size
assert bn.avg_var.shape == self.expected_size
@attr.gpu
def test_inference_gpu(self):
bn = links.BatchNormalization(axis=self.axis)
with testing.assert_warns(DeprecationWarning):
bn.to_gpu()
bn(cuda.to_gpu(self.x))
assert isinstance(bn.beta.data, cuda.cupy.ndarray)
assert isinstance(bn.gamma.data, cuda.cupy.ndarray)
assert isinstance(bn.avg_mean, cuda.cupy.ndarray)
assert isinstance(bn.avg_var, cuda.cupy.ndarray)
assert bn.beta.shape == self.expected_size
assert bn.gamma.shape == self.expected_size
assert bn.avg_mean.shape == self.expected_size
assert bn.avg_var.shape == self.expected_size
def test_no_gamma(self):
bn = links.BatchNormalization(axis=self.axis, use_gamma=False)
assert bn.gamma is None
bn(self.x)
assert bn.gamma is None
def test_no_beta(self):
bn = links.BatchNormalization(axis=self.axis, use_beta=False)
assert bn.beta is None
bn(self.x)
assert bn.beta is None
class TestFailChannalSizeInference(unittest.TestCase):
def test_fail_inference(self):
with self.assertRaises(RuntimeError):
links.BatchNormalization()
@attr.multi_gpu(2)
class TestLazyInitializationWithNonZeroCurrentCudaDevice(unittest.TestCase):
def test_lazy_initialization_with_non_zero_current_cuda_device(self):
# Create a lazily initialized BatchNormalization link.
bn = links.BatchNormalization(axis=(0, 2, 3))
assert bn.xp is numpy
device = backend.GpuDevice.from_device_id(1)
bn.to_device(device)
assert bn.xp is cuda.cupy
assert bn.device == device
assert bn.beta.device == device
assert bn.gamma.device == device
assert bn.avg_mean is None
assert bn.avg_var is None
x = numpy.random.randn(5, 4, 3, 2).astype(numpy.float32)
x = device.send(x)
# All parameters and persistent values should correctly be initialized
# on device 1, and not device 0, meaning forward pass should not raise
# any errors.
bn(x)
assert bn.xp is cuda.cupy
assert bn.device == device
assert bn.beta.device == device
assert bn.gamma.device == device
assert bn.avg_mean is not None
assert bn.avg_var is not None
assert backend.GpuDevice.from_array(bn.avg_mean) == device
assert backend.GpuDevice.from_array(bn.avg_var) == device
@testing.parameterize(*testing.product({
'x_shape,bn_kwargs': [
((4, 3), {'axis': (0,)}),
((4, 3), {'size': (3,)}),
],
}))
class TestSerialize(unittest.TestCase):
def create_link(self):
return links.BatchNormalization(**self.bn_kwargs)
def train_link(self, bn):
x = numpy.random.rand(*self.x_shape).astype(numpy.float32)
bn(x)
x = numpy.random.rand(*self.x_shape).astype(numpy.float32)
bn(x, finetune=True)
# has non-trivial values to be stored
assert bn.avg_mean is not None
assert bn.N == 1
def create_serializer_pair(self):
target = {}
return (
chainer.serializers.DictionarySerializer(target),
chainer.serializers.NpzDeserializer(target),
)
def test_serialize(self):
ser, de = self.create_serializer_pair()
link1 = self.create_link()
self.train_link(link1)
link1.serialize(ser)
link2 = self.create_link()
link2.serialize(de)
testing.assert_allclose(link2.avg_mean, link1.avg_mean)
testing.assert_allclose(link2.avg_var, link1.avg_var)
testing.assert_allclose(link2.beta.array, link1.beta.array)
testing.assert_allclose(link2.gamma.array, link1.gamma.array)
assert link2.N == link1.N
@testing.inject_backend_tests(
[
'test_param_layout_to_device',
'test_forward',
],
# CPU tests
[{}]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
}))
class TestBatchNormalizationMemoryLayouts(unittest.TestCase):
batch = 2
channels = 3
height = 13
width = 11
axis = None
dtype = numpy.float32
def create_link(self):
channel_dims = (self.channels,)
link = links.BatchNormalization(
channel_dims,
axis=self.axis)
return link
def create_input_array(self, xp):
x_shape = (self.batch, self.height, self.width, self.channels)
x = xp.ones(x_shape, self.dtype)
return x
def test_param_layout(self):
with chainer.using_config('compute_mode', 'cudnn_fast'):
link = self.create_link()
assert link.gamma.layout is None
assert link.beta.layout is None
def test_param_layout_to_device(self, backend_config):
with chainer.using_config('compute_mode', 'cudnn_fast'):
link = self.create_link()
assert link.gamma.device == chainer.get_device('@numpy')
assert link.beta.device == chainer.get_device('@numpy')
link.to_device(backend_config.device)
assert link.gamma.device == backend_config.device
assert link.beta.device == backend_config.device
assert link.gamma.layout is None
assert link.beta.layout is None
def test_forward(self, backend_config):
with chainer.using_config('compute_mode', 'cudnn_fast'):
link = self.create_link()
link.to_device(backend_config.device)
x = self.create_input_array(backend_config.xp)
x = chainer.Variable(x, layout=memory_layouts.CUDNN_CHANNEL_LAST_X)
x.to_device(backend_config.device)
with backend_config:
y = link(x)
assert link.gamma.device == backend_config.device
assert link.beta.device == backend_config.device
assert y.layout == memory_layouts.CUDNN_CHANNEL_LAST_X
assert y.shape == (
self.batch,
self.channels,
self.height,
self.width)
testing.run_module(__name__, __file__)
| 26,024
| 32.580645
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/normalization_tests/test_group_normalization.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import links
from chainer import testing
from chainer.testing import attr
@testing.inject_backend_tests(
None,
# CPU tests
[{}]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
)
@testing.parameterize(*(testing.product({
'shape': [(1, 4, 5, 3), (5, 4, 7), (3, 20)],
'groups': [1, 2, 4],
'dtype': [numpy.float16, numpy.float32, numpy.float64,
chainer.mixed16],
})))
class GroupNormalizationTest(testing.LinkTestCase):
param_names = ('gamma', 'beta')
def setUp(self):
self.x, = self.generate_inputs()
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
if self.dtype in (numpy.float16, chainer.mixed16):
self.check_forward_options = {'atol': 1e-2, 'rtol': 1e-1}
self.check_backward_options = {'atol': 5e-1, 'rtol': 1e-1}
else:
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
def create_link(self, initializers):
initial_gamma, initial_beta = initializers
with chainer.using_config('dtype', self.dtype):
link = links.GroupNormalization(
self.groups,
initial_gamma=initial_gamma,
initial_beta=initial_beta,
)
return link
def generate_params(self):
highprec_dtype = chainer.get_dtype(
self.dtype, map_mixed16=numpy.float32)
initial_gamma = numpy.random.uniform(
-1, 1, (self.shape[1],)).astype(highprec_dtype)
initial_beta = numpy.random.uniform(
-1, 1, (self.shape[1],)).astype(highprec_dtype)
return initial_gamma, initial_beta
def generate_inputs(self):
shape = self.shape
# sample x such that x.std >= min_std
min_std = 0.02
retry = 0
while True:
x = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
x_groups = x.reshape(shape[0], self.groups, -1)
if x_groups.std(axis=2).min() >= min_std:
break
retry += 1
assert retry <= 20, 'Too many retries to generate inputs'
return x,
def forward_expected(self, link, inputs):
gamma = link.gamma.array
beta = link.beta.array
x, = inputs
shape = self.shape
param_reshape = tuple([
s if i == 1 else 1 for i, s in enumerate(shape)])
x = x.astype(chainer.get_dtype(
self.dtype, map_mixed16=numpy.float32))
x = x.reshape(shape[0] * self.groups, -1)
x -= x.mean(axis=1, keepdims=True)
x /= numpy.sqrt(link.eps + numpy.square(x).mean(axis=1, keepdims=True))
x = x.reshape(shape)
x = gamma.reshape(param_reshape) * x + beta.reshape(param_reshape)
if self.dtype == chainer.mixed16:
x = x.astype(numpy.float16)
return x,
@testing.parameterize(*testing.product({
'size': [3, 30],
'groups': [1, 3],
'dtype': [numpy.float16, numpy.float32, numpy.float64,
chainer.mixed16],
}))
class TestInitialize(unittest.TestCase):
def setUp(self):
self.lowprec_dtype = chainer.get_dtype(self.dtype)
highprec_dtype = chainer.get_dtype(
self.dtype, map_mixed16=numpy.float32)
self.initial_gamma = numpy.random.uniform(-1, 1, self.size)
self.initial_gamma = self.initial_gamma.astype(highprec_dtype)
self.initial_beta = numpy.random.uniform(-1, 1, self.size)
self.initial_beta = self.initial_beta.astype(highprec_dtype)
self.link = links.GroupNormalization(self.groups,
initial_gamma=self.initial_gamma,
initial_beta=self.initial_beta)
self.shape = (1, self.size, 1)
def test_initialize_cpu(self):
with chainer.using_config('dtype', self.dtype):
self.link(numpy.zeros(self.shape, dtype=self.lowprec_dtype))
testing.assert_allclose(self.initial_gamma, self.link.gamma.data)
testing.assert_allclose(self.initial_beta, self.link.beta.data)
@attr.gpu
def test_initialize_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
with chainer.using_config('dtype', self.dtype):
self.link(cuda.cupy.zeros(self.shape, dtype=self.lowprec_dtype))
testing.assert_allclose(self.initial_gamma, self.link.gamma.data)
testing.assert_allclose(self.initial_beta, self.link.beta.data)
@testing.parameterize(*testing.product({
'size': [3, 30],
'groups': [1, 3],
'dtype': [numpy.float16, numpy.float32, numpy.float64,
chainer.mixed16],
}))
class TestDefaultInitializer(unittest.TestCase):
def setUp(self):
self.lowprec_dtype = chainer.get_dtype(self.dtype)
self.highprec_dtype = chainer.get_dtype(
self.dtype, map_mixed16=numpy.float32)
self.size = 3
with chainer.using_config('dtype', self.dtype):
self.link = links.GroupNormalization(self.groups)
self.shape = (1, self.size, 1)
def test_initialize_cpu(self):
self.link(numpy.zeros(self.shape, dtype=self.lowprec_dtype))
testing.assert_allclose(numpy.ones(self.size), self.link.gamma.data)
self.assertEqual(self.link.gamma.dtype, self.highprec_dtype)
testing.assert_allclose(
numpy.zeros(self.size), self.link.beta.data)
self.assertEqual(self.link.beta.dtype, self.highprec_dtype)
@attr.gpu
def test_initialize_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.link(cuda.cupy.zeros(self.shape, dtype=self.lowprec_dtype))
testing.assert_allclose(numpy.ones(self.size), self.link.gamma.data)
self.assertEqual(self.link.gamma.dtype, self.highprec_dtype)
testing.assert_allclose(
numpy.zeros(self.size), self.link.beta.data)
self.assertEqual(self.link.beta.dtype, self.highprec_dtype)
@testing.parameterize(*testing.product({
'shape': [(2,), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestInvalidInput(unittest.TestCase):
def setUp(self):
self.link = links.GroupNormalization(groups=3)
def test_invalid_shape_cpu(self):
with self.assertRaises(ValueError):
self.link(chainer.Variable(
numpy.zeros(self.shape, dtype=self.dtype)))
@attr.gpu
def test_invalid_shape_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
with self.assertRaises(ValueError):
self.link(
chainer.Variable(
cuda.cupy.zeros(self.shape, dtype=self.dtype)))
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestInvalidInitialize(unittest.TestCase):
def setUp(self):
shape = (2, 5, 2)
self.x = chainer.Variable(numpy.zeros(shape, dtype=self.dtype))
def test_invalid_groups(self):
self.link = links.GroupNormalization(groups=3)
with self.assertRaises(ValueError):
self.link(self.x)
def test_invalid_type_groups(self):
self.link = links.GroupNormalization(groups=3.5)
with self.assertRaises(TypeError):
self.link(self.x)
testing.run_module(__name__, __file__)
| 7,679
| 33.439462
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/normalization_tests/test_decorrelated_batch_normalization.py
|
import contextlib
import os
import tempfile
import unittest
import numpy
import chainer
from chainer import links
from chainer import testing
def _decorrelated_batch_normalization(x, mean, projection, groups):
xs = numpy.split(x, groups, axis=1)
assert mean.shape[0] == groups
assert projection.shape[0] == groups
ys = [
_decorrelated_batch_normalization_1group(xi, m, p)
for (xi, m, p) in zip(xs, mean, projection)]
return numpy.concatenate(ys, axis=1)
def _decorrelated_batch_normalization_1group(x, mean, projection):
spatial_ndim = len(x.shape[2:])
spatial_axis = tuple(range(2, 2 + spatial_ndim))
b, C = x.shape[:2]
x_hat = x.transpose((1, 0) + spatial_axis).reshape(C, -1)
y_hat = projection.dot(x_hat - mean[:, None])
y = y_hat.reshape((C, b) + x.shape[2:]).transpose(
(1, 0) + spatial_axis)
return y
def _calc_projection(x, mean, eps, groups):
xs = numpy.split(x, groups, axis=1)
assert mean.shape[0] == groups
projections = [
_calc_projection_1group(xi, m, eps)
for (xi, m) in zip(xs, mean)]
return numpy.concatenate([p[None] for p in projections])
def _calc_projection_1group(x, mean, eps):
spatial_ndim = len(x.shape[2:])
spatial_axis = tuple(range(2, 2 + spatial_ndim))
b, C = x.shape[:2]
m = b
for i in spatial_axis:
m *= x.shape[i]
x_hat = x.transpose((1, 0) + spatial_axis).reshape(C, -1)
mean = x_hat.mean(axis=1)
x_hat = x_hat - mean[:, None]
cov = x_hat.dot(x_hat.T) / m + eps * numpy.eye(C, dtype=x.dtype)
eigvals, eigvectors = numpy.linalg.eigh(cov)
projection = eigvectors.dot(numpy.diag(eigvals ** -0.5)).dot(eigvectors.T)
return projection
def _calc_mean(x, groups):
axis = (0,) + tuple(range(2, x.ndim))
return x.mean(axis=axis).reshape(groups, -1)
@testing.parameterize(*(testing.product({
'n_channels': [8],
'groups': [1, 2],
'eps': [2e-5, 5e-1],
'test': [True, False],
'ndim': [0, 2],
# NOTE(crcrpar): np.linalg.eigh does not support float16
'dtype': [numpy.float32, numpy.float64],
})))
@testing.inject_backend_tests(
None,
# CPU tests
[{}]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class DecorrelatedBatchNormalizationTest(testing.LinkTestCase):
param_names = ()
def setUp(self):
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-3}
self.check_backward_options = {'atol': 5e-3, 'rtol': 1e-3}
if self.dtype == numpy.float32:
self.check_backward_options = {'atol': 5e-2, 'rtol': 5e-2}
def generate_params(self):
C = self.n_channels // self.groups
# TODO(ecastill) mean and projection are not
# parameters inside the link, just plain arrays
mean = numpy.random.uniform(
-1, 1, (self.groups, C)).astype(self.dtype)
projection = numpy.random.uniform(
0.5, 1, (self.groups, C, C)).astype(
self.dtype)
return mean, projection
def create_link(self, initializers):
mean, projection = initializers
link = links.DecorrelatedBatchNormalization(
self.n_channels, groups=self.groups, eps=self.eps,
dtype=self.dtype)
link.cleargrads()
if self.test:
link.avg_mean[...] = mean
link.avg_projection[...] = projection
return link
def generate_inputs(self):
dtype = self.dtype
ndim = self.ndim
shape = (5, self.n_channels) + (2,) * ndim
m = 5 * 2 ** ndim
# NOTE(kataoka): The current implementation uses linalg.eigh. Small
# eigenvalues of the correlation matrix, which can be as small as
# eps=2e-5, cannot be computed with good *relative* accuracy, but
# the eigenvalues are used later as `eigvals ** -0.5`. Require the
# following is sufficiently large:
# min(eigvals[:k]) == min(singular_vals ** 2 / m + eps)
min_singular_value = 0.1
# NOTE(kataoka): Decorrelated batch normalization should be free from
# "stochastic axis swapping". Requiring a gap between singular values
# just hides mistakes in implementations.
min_singular_value_gap = 0.001
g = self.groups
zca_shape = g, self.n_channels // g, m
x = numpy.random.uniform(-1, 1, zca_shape)
mean = x.mean(axis=2, keepdims=True)
a = x - mean
u, s, vh = numpy.linalg.svd(a, full_matrices=False)
# Decrement the latter dim because of the constraint `sum(_) == 0`
k = min(zca_shape[1], zca_shape[2] - 1)
s[:, :k] += (
min_singular_value
+ min_singular_value_gap * numpy.arange(k)
)[::-1]
a = numpy.einsum('bij,bj,bjk->bik', u, s, vh)
x = a + mean
x = x.reshape((self.n_channels, shape[0]) + shape[2:]).swapaxes(0, 1)
x = x.astype(dtype)
return x,
def forward_expected(self, link, inputs):
x, = inputs
if self.test:
mean = link.avg_mean
projection = link.avg_projection
else:
mean = _calc_mean(x, self.groups)
projection = _calc_projection(x, mean,
link.eps, self.groups)
y_expect = _decorrelated_batch_normalization(
x, mean, projection, self.groups)
return y_expect,
def forward(self, link, inputs, backend_config):
x, = inputs
with chainer.using_config('train', not self.test):
y = link(x)
return y,
# TODO(kataoka) Use `contextlib.nullcontext` if Python 3.7 or higher is assumed
@contextlib.contextmanager
def nullcontext():
yield
@testing.parameterize(*(testing.product({
'n_channels': [8],
'groups': [1, 2],
'dtype': [numpy.float32, numpy.float64],
})))
@testing.inject_backend_tests([
'test_model_compatibility_npz', 'test_model_compatibility_hdf5',
], [
{},
{'use_cuda': True},
{'use_cuda': True, 'cuda_device': 1},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestDecorrelatedBatchNormalizationCompat(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
def tearDown(self):
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def check_model_compatibility(self, backend_config, save, load):
C = self.n_channels // self.groups
old_model = {
'avg_mean': numpy.random.uniform(
-1, 1, (C,)).astype(self.dtype),
'avg_projection': numpy.random.uniform(
0.5, 1, (C, C)).astype(self.dtype),
'N': numpy.array(0)
}
save(self.temp_file_path, old_model)
model = links.DecorrelatedBatchNormalization(
self.n_channels, groups=self.groups, dtype=self.dtype)
model.to_device(backend_config.device)
with (
testing.assert_warns(UserWarning) if self.groups != 1
else nullcontext()):
load(self.temp_file_path, model)
x = numpy.random.rand(5, self.n_channels, 2).astype(self.dtype)
x = backend_config.get_array(x)
with chainer.using_config('train', False):
model(x)
model(x)
def test_model_compatibility_npz(self, backend_config):
self.check_model_compatibility(
backend_config,
chainer.serializers.save_npz,
chainer.serializers.load_npz,
)
@testing.with_requires('h5py')
def test_model_compatibility_hdf5(self, backend_config):
self.check_model_compatibility(
backend_config,
chainer.serializers.save_hdf5,
chainer.serializers.load_hdf5,
)
testing.run_module(__name__, __file__)
| 8,275
| 32.104
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/normalization_tests/test_layer_normalization.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import type_check
def _create_ln(*args, **kwargs):
flag = chainer.disable_experimental_feature_warning
chainer.disable_experimental_feature_warning = True
try:
return links.LayerNormalization(*args, **kwargs)
finally:
chainer.disable_experimental_feature_warning = flag
@testing.parameterize(*(testing.product({
'batchsize': [1, 5],
'size': [10, 20],
'dtype': [numpy.float32],
})))
class LayerNormalizationTest(unittest.TestCase):
def setUp(self):
self.link = _create_ln()
self.link.cleargrads()
self.shape = (self.batchsize, self.size)
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.check_forward_optionss = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_optionss = {'atol': 1e-4, 'rtol': 1e-3}
if self.dtype == numpy.float16:
self.check_forward_optionss = {'atol': 1e-3, 'rtol': 1e-2}
self.check_backward_optionss = {'atol': 5e-1, 'rtol': 1e-1}
def check_forward(self, x_data):
y = self.link(x_data)
self.assertEqual(y.data.dtype, self.dtype)
unbatched_concat_y = chainer.functions.concat(
[self.link(one_x[None, ]) for one_x in x_data], axis=0)
testing.assert_allclose(
y.data, unbatched_concat_y.data, **self.check_forward_optionss)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
@attr.cudnn
def test_forward_gpu_without_cudnn(self):
self.link.use_cudnn = False
self.test_forward_gpu()
@attr.multi_gpu(2)
@condition.retry(3)
def test_forward_multi_gpu(self):
with cuda.get_device_from_id(1):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
with cuda.get_device_from_id(0):
self.check_forward(x)
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
self.link, x_data, y_grad,
(self.link.gamma, self.link.beta),
eps=1e-2, **self.check_backward_optionss)
@condition.retry(3)
def test_backward_cpu(self):
self.link(numpy.zeros(self.shape, dtype='f'))
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.link(cuda.cupy.zeros(self.shape, dtype='f'))
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.cudnn
def test_backward_gpu_without_cudnn(self):
self.link.use_cudnn = False
self.link(numpy.zeros(self.shape, dtype='f'))
self.test_backward_gpu()
@testing.parameterize(*testing.product({
'size': [3, 50],
}))
class TestInitialize(unittest.TestCase):
def setUp(self):
self.initial_gamma = numpy.random.uniform(-1, 1, self.size)
self.initial_gamma = self.initial_gamma.astype(numpy.float32)
self.initial_beta = numpy.random.uniform(-1, 1, self.size)
self.initial_beta = self.initial_beta.astype(numpy.float32)
self.link = _create_ln(
initial_gamma=self.initial_gamma,
initial_beta=self.initial_beta)
self.shape = (1, self.size)
@condition.retry(3)
def test_initialize_cpu(self):
self.link(numpy.zeros(self.shape, dtype='f'))
testing.assert_allclose(self.initial_gamma, self.link.gamma.data)
testing.assert_allclose(self.initial_beta, self.link.beta.data)
@attr.gpu
@condition.retry(3)
def test_initialize_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.link(cuda.cupy.zeros(self.shape, dtype='f'))
testing.assert_allclose(self.initial_gamma, self.link.gamma.data)
testing.assert_allclose(self.initial_beta, self.link.beta.data)
class TestDefaultInitializer(unittest.TestCase):
def setUp(self):
self.size = 3
self.link = _create_ln()
self.shape = (1, self.size)
def test_initialize_cpu(self):
self.link(numpy.zeros(self.shape, dtype='f'))
testing.assert_allclose(numpy.ones(self.size), self.link.gamma.data)
testing.assert_allclose(
numpy.zeros(self.size), self.link.beta.data)
@attr.gpu
def test_initialize_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.link(cuda.cupy.zeros(self.shape, dtype='f'))
testing.assert_allclose(numpy.ones(self.size), self.link.gamma.data)
testing.assert_allclose(
numpy.zeros(self.size), self.link.beta.data)
class TestEmptyBatchInitialize(unittest.TestCase):
def setUp(self):
self.link = _create_ln()
self.shape = (0, 3)
self.x = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
def test_empty_batch_dim(self):
y = self.link(chainer.Variable(self.x))
assert y.shape == self.shape
@testing.parameterize(*testing.product({
'shape': [(2, 4, 3), (2, 5, 3, 4)],
}))
class TestInvalidInput(unittest.TestCase):
def setUp(self):
self.link = _create_ln()
def test_invalid_shape_cpu(self):
with self.assertRaises(type_check.InvalidType):
self.link(chainer.Variable(numpy.zeros(self.shape, dtype='f')))
@attr.gpu
def test_invalid_shape_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
with self.assertRaises(type_check.InvalidType):
self.link(chainer.Variable(cuda.cupy.zeros(self.shape, dtype='f')))
class TestInvalidInitialize(unittest.TestCase):
def test_invalid_type(self):
with self.assertRaises(TypeError):
self.link = _create_ln(None, 1e-6, {})
self.link(chainer.Variable(numpy.zeros((1, 5), dtype='f')))
testing.run_module(__name__, __file__)
| 6,543
| 31.39604
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/normalization_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/theano_tests/test_theano_function.py
|
import unittest
import warnings
import numpy
import pytest
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.with_requires('theano') # TODO(niboshi): Remove me
# chainer/chainer#5997
@testing.without_requires('Theano<=1.0.3', 'numpy>=1.16.0')
@pytest.mark.theano()
class TheanoFunctionTestBase(object):
forward_test_options = {}
backward_test_options = {'atol': 1e-4}
def setUp(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
# Theano 1.0.2 causes DeprecationWarning
import theano # NOQA
self.input_data = [
numpy.random.uniform(
-1, 1, d['shape']).astype(getattr(numpy, d['type']))
for d in self.inputs]
self.grad_data = [
numpy.random.uniform(
-1, 1, d['shape']).astype(getattr(numpy, d['type']))
for d in self.outputs]
def make_func(self):
raise NotImplementedError
def expect_forward(self):
raise NotImplementedError
def check_forward(self, input_data):
func = self.make_func()
inputs = [chainer.Variable(data) for data in input_data]
outputs = func(*inputs)
if isinstance(outputs, chainer.Variable):
outputs = (outputs,)
expect = self.expect_forward()
self.assertEqual(len(outputs), len(expect))
for o, e in zip(outputs, expect):
testing.assert_allclose(
o.data, e, **self.forward_test_options)
def test_forward_cpu(self):
self.check_forward(self.input_data)
@attr.gpu
def test_forward_gpu(self):
inputs = [cuda.to_gpu(x) for x in self.input_data]
self.check_forward(inputs)
def check_backward(self, input_data, grad_data):
func = self.make_func()
gradient_check.check_backward(
func, input_data, grad_data, **self.backward_test_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.input_data, self.grad_data)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
inputs = [cuda.to_gpu(x) for x in self.input_data]
grads = [cuda.to_gpu(x) for x in self.grad_data]
self.check_backward(inputs, grads)
@testing.parameterize(
{'inputs': [{'shape': (3, 2), 'type': 'float32'},
{'shape': (3, 2), 'type': 'float32'}],
'outputs': [{'shape': (3, 2), 'type': 'float32'}]},
{'inputs': [{'shape': (3, 2), 'type': 'float32'},
{'shape': (2,), 'type': 'float32'}],
'outputs': [{'shape': (3, 2), 'type': 'float32'}]},
{'inputs': [{'shape': (3, 2), 'type': 'float32'},
{'shape': (), 'type': 'float32'}],
'outputs': [{'shape': (3, 2), 'type': 'float32'}]},
{'inputs': [{'shape': (3, 2), 'type': 'float32'},
{'shape': (3, 2), 'type': 'float64'}],
'outputs': [{'shape': (3, 2), 'type': 'float64'}]},
{'inputs': [{'shape': (3, 2), 'type': 'float16'},
{'shape': (3, 2), 'type': 'float32'}],
'outputs': [{'shape': (3, 2), 'type': 'float32'}],
'forward_test_options': {'atol': 1e-3, 'rtol': 1e-3},
'backward_test_options': {'eps': 1, 'atol': 1e-3, 'rtol': 1e-3}},
)
class TestTheanoFunction(TheanoFunctionTestBase, unittest.TestCase):
def make_func(self):
import theano.tensor as T
x = T.TensorType(self.inputs[0]['type'],
(False,) * len(self.inputs[0]['shape']))('x')
y = T.TensorType(self.inputs[1]['type'],
(False,) * len(self.inputs[1]['shape']))('y')
z = x + y
return links.TheanoFunction([x, y], [z])
def expect_forward(self):
x, y = self.input_data
return x + y,
@testing.parameterize(
{'inputs': [{'shape': (3, 2), 'type': 'float32'},
{'shape': (3, 2), 'type': 'float32'}],
'outputs': [{'shape': (3, 2), 'type': 'float32'},
{'shape': (3, 2), 'type': 'float32'}]},
{'inputs': [{'shape': (3, 2), 'type': 'float32'},
{'shape': (2,), 'type': 'float32'}],
'outputs': [{'shape': (3, 2), 'type': 'float32'},
{'shape': (3, 2), 'type': 'float32'}]},
{'inputs': [{'shape': (3, 2), 'type': 'float32'},
{'shape': (), 'type': 'float32'}],
'outputs': [{'shape': (3, 2), 'type': 'float32'},
{'shape': (3, 2), 'type': 'float32'}]},
)
class TestTheanoFunctionTwoOutputs(TheanoFunctionTestBase, unittest.TestCase):
def make_func(self):
import theano.tensor as T
x = T.TensorType(self.inputs[0]['type'],
(False,) * len(self.inputs[0]['shape']))('x')
y = T.TensorType(self.inputs[1]['type'],
(False,) * len(self.inputs[1]['shape']))('y')
z = x + y
w = x - y
return links.TheanoFunction([x, y], [z, w])
def expect_forward(self):
x, y = self.input_data
return x + y, x - y
@testing.parameterize(
{'inputs': [{'shape': (3, 2), 'type': 'float32'},
{'shape': (2,), 'type': 'int32'}],
'outputs': [{'shape': (2, 2), 'type': 'float32'}]},
{'inputs': [{'shape': (3, 2), 'type': 'float32'},
{'shape': (), 'type': 'int32'}],
'outputs': [{'shape': (2,), 'type': 'float32'}]},
)
class TestTheanoFunctionNonDifferential(
TheanoFunctionTestBase, unittest.TestCase):
def make_func(self):
import theano.tensor as T
x = T.TensorType(self.inputs[0]['type'],
(False,) * len(self.inputs[0]['shape']))('x')
i = T.TensorType(self.inputs[1]['type'],
(False,) * len(self.inputs[1]['shape']))('y')
z = x[i]
return links.TheanoFunction([x, i], z)
def expect_forward(self):
x, i = self.input_data
return x[i],
testing.run_module(__name__, __file__)
| 6,133
| 33.852273
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/links_tests/theano_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainerx_tests/conftest.py
|
import pytest
import chainerx.testing
from chainerx_tests import cuda_utils
def pytest_configure(config):
_register_cuda_marker(config)
def pytest_runtest_setup(item):
_setup_cuda_marker(item)
def pytest_runtest_teardown(item, nextitem):
current_device = cuda_utils.get_current_device()
assert current_device is None or current_device == 0
def pytest_generate_tests(metafunc):
device_fixtures = {'parametrize_device': 'device',
'parametrize_device_name': 'device_name'}
marker = [
m for m in metafunc.definition.iter_markers()
if m.name in ('parametrize_device', 'parametrize_device_name')]
if marker:
marker, = marker # asserts len == 1
device_names, = marker.args
# fixture name is either device or device_name
fixture_name = device_fixtures[marker.name]
metafunc.parametrize(fixture_name, device_names, indirect=True)
def pytest_collection_modifyitems(session, config, items):
# Make the order of tests deterministic
items[:] = sorted(items, key=lambda item: item.location)
def _register_cuda_marker(config):
config.addinivalue_line(
'markers',
'cuda(num=1): mark tests needing the specified number of NVIDIA GPUs.')
def _setup_cuda_marker(item):
"""Pytest marker to indicate number of NVIDIA GPUs required to run the test.
Tests can be annotated with this decorator (e.g., ``@pytest.mark.cuda``) to
declare that one NVIDIA GPU is required to run.
Tests can also be annotated as ``@pytest.mark.cuda(2)`` to declare number
of NVIDIA GPUs required to run. When running tests, if
``CHAINERX_TEST_CUDA_DEVICE_LIMIT`` environment variable is set to value
greater than or equals to 0, test cases that require GPUs more than the
limit will be skipped.
"""
cuda_marker = [m for m in item.iter_markers() if m.name == 'cuda']
if cuda_marker:
cuda_marker, = cuda_marker # asserts len == 1
required_num = cuda_marker.args[0] if cuda_marker.args else 1
if cuda_utils.get_cuda_limit() < required_num:
pytest.skip('{} NVIDIA GPUs required'.format(required_num))
def _get_required_cuda_devices_from_device_name(device_name):
# Returns the number of required CUDA devices to run a test, given a
# device name. If the device is non-CUDA device, 0 is returned.
s = device_name.split(':')
assert len(s) == 2
if s[0] != 'cuda':
return 0
return int(s[1]) + 1
def _skip_if_no_cuda_device_available(device_name):
# Skip if the device is CUDA device and there's no sufficient CUDA devices.
cuda_device_count = _get_required_cuda_devices_from_device_name(
device_name)
if cuda_device_count > cuda_utils.get_cuda_limit():
pytest.skip()
@pytest.fixture
def device(request):
# A fixture to wrap a test with a device scope, given a device name.
# Device instance is passed to the test.
device_name = request.param
_skip_if_no_cuda_device_available(device_name)
device = chainerx.get_device(device_name)
device_scope = chainerx.using_device(device)
def finalize():
device_scope.__exit__()
request.addfinalizer(finalize)
device_scope.__enter__()
return device
@pytest.fixture
def device_name(request):
# A fixture to check if a device is available using its name
device_name = request.param
_skip_if_no_cuda_device_available(device_name)
return device_name
@pytest.fixture(params=chainerx.testing.all_dtypes)
def dtype(request):
return request.param
@pytest.fixture(params=chainerx.testing.float_dtypes)
def float_dtype(request):
return request.param
@pytest.fixture(params=chainerx.testing.signed_dtypes)
def signed_dtype(request):
return request.param
@pytest.fixture(params=chainerx.testing.numeric_dtypes)
def numeric_dtype(request):
return request.param
@pytest.fixture(params=[True, False])
def is_module(request):
return request.param
_shapes = [
(),
(0,),
(1,),
(2, 3),
(1, 1, 1),
(2, 0, 3),
]
_shapes_as_sequence_or_int = (
_shapes
+ [[], [0]] # shape as a list instead of tuple
+ [0, 1, 5])
@pytest.fixture(params=_shapes)
def shape(request):
return request.param
@pytest.fixture(params=_shapes_as_sequence_or_int)
def shape_as_sequence_or_int(request):
return request.param
| 4,412
| 26.240741
| 80
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/test_array_utils.py
|
import numpy
import pytest
import chainerx
from chainerx_tests import array_utils
@pytest.mark.parametrize('expected,shape', [
(1, ()),
(1, (1,)),
(0, (0,)),
(0, (2, 0)),
(2, (2,)),
(6, (2, 3)),
])
def test_total_size(expected, shape):
assert expected == array_utils.total_size(shape)
@pytest.mark.parametrize('xp', [numpy, chainerx])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('shape', [
(),
(0,),
(1,),
(2, 3),
(1, 1, 1),
(2, 0, 3),
])
@pytest.mark.parametrize('dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('pattern', [1, 2])
@pytest.mark.parametrize('padding', [True, False])
def test_dummy_ndarray(xp, device, shape, dtype, pattern, padding):
a = array_utils.create_dummy_ndarray(
xp, shape, dtype, device=device, pattern=pattern, padding=padding)
assert isinstance(a, xp.ndarray)
assert a.dtype == xp.dtype(dtype)
assert a.shape == shape
# Check values
if xp is chainerx:
a_np = chainerx.to_numpy(a)
else:
a_np = a
if pattern == 1:
if a.dtype.name == 'bool':
expected_data = [i % 2 == 1 for i in range(a.size)]
elif a.dtype.name in chainerx.testing.unsigned_dtypes:
expected_data = list(range(a.size))
else:
expected_data = list(range(-1, a.size - 1))
else:
if a.dtype.name == 'bool':
expected_data = [i % 3 == 0 for i in range(a.size)]
elif a.dtype.name in chainerx.testing.unsigned_dtypes:
expected_data = list(range(1, a.size + 1))
else:
expected_data = list(range(-2, a.size - 2))
numpy.testing.assert_equal(a_np.ravel(), expected_data)
# Check strides
if xp is chainerx:
assert a.device is device
if not padding:
if xp is chainerx:
assert a.is_contiguous
else:
assert a.flags.c_contiguous
@pytest.mark.parametrize('device_spec', [None, 'native', 'native:0'])
def test_dummy_ndarray_device_spec(device_spec):
a = array_utils.create_dummy_ndarray(
chainerx, (2, 3), 'float32', device=device_spec)
assert a.device is chainerx.get_device(device_spec)
@pytest.mark.parametrize('xp', [numpy, chainerx])
@pytest.mark.parametrize('shape,dtype,padding,expected_strides', [
# padding=None means unspecified.
((), 'bool_', (), ()),
((), 'int32', (), ()),
((), 'int32', 1, ()),
((2,), 'bool_', (0,), (1,)),
((2,), 'bool_', (1,), (2,)),
((2,), 'bool_', (2,), (3,)),
((2,), 'bool_', 0, (1,)),
((2,), 'bool_', 1, (2,)),
((2,), 'int32', (0,), (4,)),
((2,), 'int32', (1,), (8,)),
((2,), 'int32', (2,), (12,)),
((2,), 'int32', 0, (4,)),
((2,), 'int32', 1, (8,)),
((0,), 'int32', (0,), (4,)),
((0,), 'int32', (1,), (8,)),
((0,), 'int32', (2,), (12,)),
((2, 3), 'bool_', (0, 0), (3, 1)),
((2, 3), 'bool_', (0, 1), (6, 2)),
((2, 3), 'bool_', (1, 1), (7, 2)),
((2, 3), 'bool_', (2, 3), (14, 4)),
((2, 3), 'bool_', 0, (3, 1)),
((2, 3), 'bool_', 1, (7, 2)),
((2, 3), 'int32', (0, 0,), (12, 4)),
((2, 3), 'int32', (0, 1,), (24, 8)),
((2, 3), 'int32', (1, 1,), (28, 8)),
((2, 3), 'int32', (2, 3,), (56, 16)),
((2, 3), 'int32', 0, (12, 4)),
((2, 3), 'int32', 1, (28, 8)),
((2, 3), 'int32', False, (12, 4)),
((2, 3), 'int32', True, (28, 8)),
((2, 3), 'int32', None, (28, 8)),
((2, 3), 'int16', (2, 3), (28, 8)),
((2, 3, 4), 'int32', (7, 3, 5), (352, 108, 24)),
])
def test_dummy_ndarray_padding(xp, shape, dtype, padding, expected_strides):
if padding is None:
a = array_utils.create_dummy_ndarray(xp, shape, dtype)
else:
a = array_utils.create_dummy_ndarray(xp, shape, dtype, padding=padding)
assert isinstance(a, xp.ndarray)
assert a.shape == shape
assert a.dtype == xp.dtype(dtype)
assert a.strides == expected_strides
@pytest.mark.parametrize('shape', [
(),
(0,),
(2, 3),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_check_device(shape, device):
dtype = 'float32'
a = chainerx.empty(shape, dtype, device=device)
array_utils.check_device(a, device.name)
array_utils.check_device(a, device)
@pytest.mark.parametrize('device_spec', [None, 'native', 'native:0'])
def test_check_device_device_spec(shape, device_spec):
dtype = 'float32'
a = chainerx.empty(shape, dtype, device=device_spec)
device = chainerx.get_device(device_spec)
array_utils.check_device(a, device_spec)
array_utils.check_device(a, device)
@pytest.mark.parametrize_device(['native:0'])
@pytest.mark.parametrize('compare_device_spec', [None, 'native:1'])
@pytest.mark.parametrize('shape', [
(),
(0,),
(2, 3),
])
def test_check_device_fail(shape, device, compare_device_spec):
dtype = 'float32'
a = chainerx.empty(shape, dtype, device=device)
with chainerx.using_device('native:1'):
with pytest.raises(AssertionError):
array_utils.check_device(a, compare_device_spec)
| 5,096
| 29.704819
| 79
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/cuda_utils.py
|
import os
try:
import cupy
except Exception:
cupy = None
import chainerx
_cuda_limit = None
def get_cuda_limit():
global _cuda_limit
if _cuda_limit is not None:
return _cuda_limit
if os.getenv('CHAINERX_TEST_CUDA_DEVICE_LIMIT') is None:
try:
backend = chainerx.get_global_default_context().get_backend('cuda')
_cuda_limit = backend.get_device_count()
except chainerx.BackendError:
_cuda_limit = 0
else:
_cuda_limit = int(os.getenv('CHAINERX_TEST_CUDA_DEVICE_LIMIT'))
if _cuda_limit < 0:
raise chainerx.ChainerxError(
'CHAINERX_TEST_DUDA_DEVICE_LIMIT must be non-negative '
'integer: {}'.format(_cuda_limit))
return _cuda_limit
def get_current_device():
# Returns the current CUDA device.
# Returns None if cupy is not installed.
# TODO(niboshi): Better remove dependency to cupy
if cupy is None:
return None
return cupy.cuda.runtime.getDevice()
| 1,029
| 24.75
| 79
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/op_utils.py
|
import inspect
import sys
import unittest
import numpy
import pytest
import chainer
import chainer.testing
import chainerx
class OpTest(chainer.testing.function_link.FunctionTestBase):
"""Base class for op test.
It must be used in conjunction with `op_test` decorator.
Examples:
@op_utils.op_test(['native:0', 'cuda:0'])
class test_relu(op_utils.OpTest):
# ReLU function has a non-differentiable point around zero, so
# dodge_nondifferentiable should be set to True.
dodge_nondifferentiable = True
def setup(self, float_dtype):
self.dtype = float_dtype
def generate_inputs(self):
dtype = self.dtype
x = numpy.random.uniform(-1, 1, (1, 3)).astype(dtype)
return x, w, b
def forward_chainerx(self, inputs):
x, w, b = inputs
y = chainerx.relu(x)
return y,
def forward_expected(self, inputs):
x, w, b = inputs
expected = x.copy()
expected[expected < 0] = 0
return expected,
In this example, `float_dtype` is a Pytest fixture for parameterizing
floating-point dtypes (i.e. float16, float32, float64). As seen from
this, arguments in the `setup` method are treated as Pytest fixtures.
Test implementations must at least override the following methods:
* `generate_inputs`: Generates inputs to the test target.
* `forward_chainerx`: Forward implementation using ChainerX.
* `forward_expected`: Forward reference implementation.
It can have the same attributes as `chainer.testing.FunctionTestCase`.
"""
def setup(self):
# This method can be overridden by a concrete class with arbitrary
# arguments.
pass
def teardown(self):
pass
def forward(self, inputs, device):
# device is chainer.Device and it's ignored.
# chainerx's default device is used instead.
test_self = self
class MyFunc(chainer.FunctionNode):
def forward_chainerx(self, inputs):
return test_self.forward_chainerx(inputs)
return MyFunc().apply(inputs)
def forward_chainerx(self, inputs):
raise NotImplementedError(
'Op test implementation must override `forward_chainerx`.')
def run_test_forward(self, backend_config):
# Skipping Forward -> Test Skipped
if self.skip_forward_test:
raise unittest.SkipTest('skip_forward_test is set')
super(OpTest, self).run_test_forward(backend_config)
def run_test_backward(self, backend_config):
# Skipping Backward -> Test PASS
if self.skip_backward_test:
return
super(OpTest, self).run_test_backward(backend_config)
def run_test_double_backward(self, backend_config):
# Skipping Double Backward -> Test PASS
if self.skip_double_backward_test:
return
super(OpTest, self).run_test_double_backward(backend_config)
class ChainerOpTest(OpTest):
"""Base class for op test that compares the output with Chainer
implementation.
It must be used in conjunction with `op_test` decorator.
Examples:
@op_utils.op_test(['native:0', 'cuda:0'])
class test_conv(op_utils.ChainerOpTest):
def setup(self, float_dtype):
self.dtype = float_dtype
def generate_inputs(self):
dtype = self.dtype
x = numpy.random.uniform(-1, 1, (1, 3)).astype(dtype)
w = numpy.random.uniform(-1, 1, (5, 3)).astype(dtype)
b = numpy.random.uniform(-1, 1, (5,)).astype(dtype)
return x, w, b
def forward_chainerx(self, inputs):
x, w, b = inputs
y = chainerx.conv(x, w, b, self.stride, self.pad, self.cover_all)
return y,
def forward_chainer(self, inputs):
x, w, b = inputs
y = chainer.functions.convolution_nd(
x, w, b, self.stride, self.pad, self.cover_all)
return y,
In this example, `float_dtype` is a Pytest fixture for parameterizing
floating-point dtypes (i.e. float16, float32, float64). As seen from
this, arguments in the `setup` method are treated as Pytest fixtures.
Test implementations must at least override the following methods:
* `generate_inputs`: Generates inputs to the test target.
* `forward_chainerx`: Forward implementation using ChainerX.
* `forward_chainer`: Forward reference implementation using Chainer.
It can have the same attributes as `chainer.testing.FunctionTestCase`.
"""
def forward_expected(self, inputs):
output_vars = self.forward_chainer(inputs)
return tuple([y.array for y in output_vars])
def forward_chainerx(self, inputs):
raise NotImplementedError(
'Op test implementation must override `forward_chainerx`.')
def forward_chainer(self, inputs):
raise NotImplementedError(
'Op test implementation must override `forward_chainer`.')
class NumpyOpTest(OpTest):
"""Base class for op test that compares the output with NumPy
implementation.
It must be used in conjunction with `op_test` decorator.
Examples:
@op_utils.op_test(['native:0', 'cuda:0'])
class test_tanh(op_utils.NumpyOpTest):
def setup(self, float_dtype):
self.dtype = dtype
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
return x,
def forward_xp(self, inputs, xp):
x, = inputs
return xp.tanh(x),
In this example, `float_dtype` is a Pytest fixture for parameterizing
floating-point dtypes (i.e. float16, float32, float64). As seen from
this, arguments in the `setup` method are treated as Pytest fixtures.
Test implementations must at least override the following methods:
* `generate_inputs`: Generates inputs to the test target.
* `forward_xp`: Forward implementation using both ChainerX and NumPy.
It can have the same attributes as `chainer.testing.FunctionTestCase`.
This test also compares strides of forward output arrays with NumPy
outputs. Set ``check_numpy_strides_compliance`` attribute to ``False``
to skip this check.
Acceptable errors in forward computations can be configured with
``forward_accept_errors``. If both ChainerX/NumPy forward implementations
raise one of those errors, the test will succeed and
backward/double-backward tests will be skipped.
"""
check_numpy_strides_compliance = True
# Acceptable errors in forward computation.
forward_accept_errors = ()
# Detected acceptable error in forward_chainerx / forward_expected.
# None : Not computed yet
# 'ok' : Computed without error.
# Exception: Error was detected.
__forward_error_chainerx = None
__forward_error_expected = None
@property
def is_forward_successful_with_accept_errors(self):
# Returns True if chainerx/expected forward computations are finished
# and both raised acceptable errors.
# This is used from `_create_test_entry_function` to skip
# backward/double-backward tests.
return (self.__forward_error_chainerx not in (None, 'ok')
and self.__forward_error_expected not in (None, 'ok'))
def __get_accept_errors(self):
# Returns the acceptable errors. In backward/double-backward tests,
# no error is acceptable.
if self.test_name == 'test_forward':
return self.forward_accept_errors
return ()
def forward_chainerx(self, inputs):
# Computes the forward pass in ChainerX.
#
# In case of an acceptable error, the error is stored and a dummy
# output array is returned.
#
# The detected errors are checked in `check_forward_outputs`, and
# also in `_create_test_entry_function` to skip
# backward/double-backward tests.
accept_errors = self.__get_accept_errors()
try:
outputs = self.forward_xp(inputs, chainerx)
self.__forward_error_chainerx = 'ok'
except accept_errors as e:
# Keep detected error
self.__forward_error_chainerx = e
# A dummy output array is returned
y = chainerx.zeros((0,), 'float32')
outputs = y,
return outputs
def forward_expected(self, inputs):
# Computes the forward pass in NumPy.
# Also see comments in `forward_chainerx`.
accept_errors = self.__get_accept_errors()
try:
outputs = self.forward_xp(inputs, numpy)
self.__forward_error_expected = 'ok'
except accept_errors as e:
# Keep detected error
self.__forward_error_expected = e
# A dummy output array is returned
y = numpy.zeros((0,), 'float32')
outputs = y,
return tuple([numpy.asarray(y) for y in outputs])
def forward_xp(self, inputs, xp):
raise NotImplementedError(
'Op test implementation must override `forward_xp`.')
def check_forward_outputs(self, outputs, expected_outputs):
# Check for detected acceptable errors.
error_chainerx = self.__forward_error_chainerx
error_expected = self.__forward_error_expected
assert error_chainerx is not None
assert error_expected is not None
if not (error_chainerx == 'ok' and error_expected == 'ok'):
# If only one of chainerx/expected caused an error, make the
# forward test fail.
if error_chainerx == 'ok':
chainer.testing.FunctionTestError.fail(
'Error raised in NumPy while not in ChainerX.',
error_expected)
if error_expected == 'ok':
chainer.testing.FunctionTestError.fail(
'Error raised in ChainerX while not in NumPy.',
error_chainerx)
# Both caused acceptable errors
return
# Both successful
assert error_chainerx == 'ok'
assert error_expected == 'ok'
# Default check, including numeric comparison
super(NumpyOpTest, self).check_forward_outputs(
outputs, expected_outputs)
# Check strides
if self.check_numpy_strides_compliance:
if not all(
a.strides == e.strides
for a, e in zip(outputs, expected_outputs)):
msg = (
'Strides do not match with NumPy outputs.\n'
'Expected shapes and dtypes: {}\n'
'Actual shapes and dtypes: {}\n'
'Expected strides: {}\n'
'Actual strides: {}\n'.format(
chainer.utils._format_array_props(expected_outputs),
chainer.utils._format_array_props(outputs),
', '.join(str(e.strides) for e in expected_outputs),
', '.join(str(a.strides) for a in outputs)))
chainer.testing.FunctionTestError.fail(msg)
def _make_backend_config(device_name):
backend_config = chainer.testing.BackendConfig({
'use_chainerx': True,
'chainerx_device': device_name,
})
return backend_config
def _create_test_entry_function(cls, module, devices):
# Creates a test entry function from the template class, and places it in
# the same module as the class.
# We enforce 'Test' prefix in OpTest implementations so that they look like
# unittest.TestCase implementations. OTOH generated entry function must
# have a prefix 'test_' in order for it to be found in pytest test
# collection.
if not cls.__name__.startswith('Test'):
raise TypeError(
'OpTest class name must start with \'Test\'. Actual: {!r}'.format(
cls.__name__))
func_name = 'test_{}'.format(cls.__name__[len('Test'):])
@pytest.mark.parametrize_device(devices)
def entry_func(device, *args, **kwargs):
backend_config = _make_backend_config(device.name)
# Forward test
obj = cls()
try:
obj.setup(*args, **kwargs)
obj.run_test_forward(backend_config)
finally:
obj.teardown()
# If this is a NumpyOpTest instance, skip backward/double-backward
# tests if the forward test succeeds with acceptable errors.
if isinstance(obj, NumpyOpTest):
if obj.is_forward_successful_with_accept_errors:
return # success with expected errors
# Backward test
obj = cls()
try:
obj.setup(*args, **kwargs)
obj.run_test_backward(backend_config)
finally:
obj.teardown()
# Double-backward test
obj = cls()
try:
obj.setup(*args, **kwargs)
obj.run_test_double_backward(backend_config)
finally:
obj.teardown()
entry_func.__name__ = func_name
# Set the signature of the entry function
sig = inspect.signature(cls.setup)
params = list(sig.parameters.values())
params = params[1:] # Remove `self` argument
device_param = inspect.Parameter(
'device', inspect.Parameter.POSITIONAL_OR_KEYWORD)
params = [device_param] + params # Prepend `device` argument
entry_func.__signature__ = inspect.Signature(params)
# Set the pytest mark
try:
pytestmark = cls.pytestmark
entry_func.pytestmark += pytestmark
except AttributeError:
pass
# Place the entry function in the module of the class
setattr(module, func_name, entry_func)
def op_test(devices):
"""Decorator to set up an op test.
This decorator can be used in conjunction with either ``NumpyOpTest`` or
``ChainerOpTest`` to define an op test.
See the documentation of the respective classes for detailed explanation
and examples.
Args:
devices: List of devices to test.
"""
def wrap(cls):
# TODO(niboshi): Avoid using private entries in chainer.testing.
if isinstance(
cls, chainer.testing._bundle._ParameterizedTestCaseBundle):
classes = []
for c, m, name in cls.cases:
classes.append((c, m))
if m is not None:
# The input is a parameterized test case.
# Remove it from its module.
delattr(sys.modules[m], name)
else:
classes = [(cls, cls.__module__)]
for cls, mod in classes:
if not issubclass(cls, OpTest):
raise TypeError(
'@op_test decorator can only be applied to OpTest class '
'definition.')
_create_test_entry_function(cls, sys.modules[mod], devices)
# return None: no other decorator can be applied after this decorator.
return None
return wrap
def fix_random():
"""Decorator that fixes random numbers in an op test.
.. seealso:: :func:`~chainer.testing.fix_random`
"""
return chainer.testing.random.fix_random(
setup_method='setup',
teardown_method='teardown')
| 15,448
| 33.484375
| 79
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainerx_tests/array_utils.py
|
import functools
import operator
import numpy
import chainerx
def total_size(shape):
return functools.reduce(operator.mul, shape, 1)
def uniform(shape, dtype, low=None, high=None, *, random_state=numpy.random):
kind = numpy.dtype(dtype).kind
if kind == 'f':
return (random_state.uniform(
-1 if low is None else low, 1 if high is None else high, shape)
.astype(dtype, copy=False))
if kind == 'u':
return random_state.randint(
0 if low is None else low, 4 if high is None else high,
size=shape, dtype=dtype)
if kind == 'i':
return random_state.randint(
-2 if low is None else low, 3 if high is None else high,
size=shape, dtype=dtype)
if kind == 'b':
return random_state.randint(
0 if low is None else low, 2 if high is None else high,
size=shape, dtype=dtype)
assert False, dtype
def shaped_arange(shape, dtype):
size = total_size(shape)
a = numpy.arange(1, size + 1).reshape(shape)
dtype = numpy.dtype(dtype)
if dtype == numpy.bool_:
return a % 2 == 0
return a.astype(dtype, copy=False)
# TODO(beam2d): Think better way to make multiple different arrays
def create_dummy_ndarray(
xp, shape, dtype, device=None, pattern=1, padding=True, start=None):
dtype = chainerx.dtype(dtype).name
size = total_size(shape)
if dtype in ('bool', 'bool_'):
if pattern == 1:
data = [i % 2 == 1 for i in range(size)]
else:
data = [i % 3 == 0 for i in range(size)]
else:
if start is None:
if dtype in chainerx.testing.unsigned_dtypes:
start = 0 if pattern == 1 else 1
else:
start = -1 if pattern == 1 else -2
data = list(range(start, size + start))
if padding is True:
padding = 1
elif padding is False:
padding = 0
# Unpadded array
a_unpad = numpy.array(data, dtype=dtype).reshape(shape)
if padding == 0:
a_np = a_unpad
else:
# Create possibly padded (non-contiguous) array.
# Elements in each axis will be spaced with corresponding padding.
# The padding for axis `i` is computed as `itemsize * padding[i]`.
if numpy.isscalar(padding):
padding = (padding,) * len(shape)
assert len(padding) == len(shape)
# Allocate 1-dim raw buffer
buf_nitems = 1
for dim, pad in zip((1,) + shape[::-1], padding[::-1] + (0,)):
buf_nitems = buf_nitems * dim + pad
# intentionally using uninitialized padding values
buf_a = numpy.empty((buf_nitems,), dtype=dtype)
# Compute strides
strides = []
st = 1
itemsize = buf_a.itemsize
for dim, pad in zip(shape[::-1], padding[::-1]):
st += pad
strides.append(st * itemsize)
st *= dim
strides = tuple(strides[::-1])
# Create strided array and copy data
a_np = numpy.asarray(
numpy.lib.stride_tricks.as_strided(buf_a, shape, strides))
a_np[...] = a_unpad
numpy.testing.assert_array_equal(a_np, a_unpad)
# Convert to NumPy or chainerx array
if xp is chainerx:
a = chainerx.testing._fromnumpy(a_np, keepstrides=True, device=device)
assert a.strides == a_np.strides
else:
a = a_np
# Checks
if padding == 0 or all(pad == 0 for pad in padding):
if xp is chainerx:
assert a.is_contiguous
else:
assert a.flags.c_contiguous
assert a.shape == shape
assert a.dtype.name == dtype
return a
def check_device(a, device=None):
if device is None:
device = chainerx.get_default_device()
elif isinstance(device, str):
device = chainerx.get_device(device)
assert a.device is device
| 3,914
| 29.348837
| 78
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/dtype_utils.py
|
import itertools
import numpy
import chainerx
def _permutate_dtype_mapping(dtype_mapping_list):
# Permutates in dtypes of dtype mapping.
d = {}
for in_dtypes, out_dtype in dtype_mapping_list:
for in_dtypes_ in itertools.permutations(in_dtypes):
d[in_dtypes_] = out_dtype
return sorted(d.items())
# Used for e.g. testing power.
result_numeric_dtypes_two_arrays = [
# Floats.
(('float16', 'float16'), 'float16'),
(('float32', 'float32'), 'float32'),
(('float64', 'float64'), 'float64'),
(('float16', 'float32'), 'float32'),
(('float32', 'float64'), 'float64'),
(('float64', 'float16'), 'float64'),
# Signed ints.
(('int8', 'int8'), 'int8'),
(('int16', 'int16'), 'int16'),
(('int32', 'int32'), 'int32'),
(('int64', 'int64'), 'int64'),
(('int8', 'int16'), 'int16'),
(('int8', 'int64'), 'int64'),
(('int16', 'int32'), 'int32'),
(('int32', 'int8'), 'int32'),
(('int32', 'int64'), 'int64'),
(('int64', 'int16'), 'int64'),
# Unsigned ints.
(('uint8', 'uint8'), 'uint8'),
# Signed int and unsigned int.
(('uint8', 'int8'), 'int16'),
(('uint8', 'int16'), 'int16'),
(('int32', 'uint8'), 'int32'),
# Signed int and float.
(('int8', 'float16'), 'float16'),
(('int16', 'float64'), 'float64'),
(('int64', 'float32'), 'float32'),
(('float16', 'int64'), 'float16'),
(('float32', 'int32'), 'float32'),
# Unsigned int and float.
(('uint8', 'float16'), 'float16'),
(('float16', 'uint8'), 'float16'),
]
result_comparable_dtypes_two_arrays = [
# Bools.
(('bool_', 'bool_'), 'bool_'),
] + result_numeric_dtypes_two_arrays
result_dtypes_two_arrays = _permutate_dtype_mapping([
# Bools.
(('bool_', 'bool_'), 'bool_'),
# Bool and other.
(('bool_', 'uint8'), 'uint8'),
(('bool_', 'int8'), 'int8'),
(('bool_', 'int16'), 'int16'),
(('bool_', 'float16'), 'float16'),
(('bool_', 'float64'), 'float64'),
]) + result_numeric_dtypes_two_arrays
result_dtypes_three_arrays = _permutate_dtype_mapping([
# Signed ints.
(('int32', 'int32', 'int32'), 'int32'),
(('int8', 'int8', 'int32'), 'int32'),
(('int8', 'int16', 'int32'), 'int32'),
(('int8', 'int32', 'int32'), 'int32'),
(('int8', 'int64', 'int32'), 'int64'),
# Unsigned ints.
(('uint8', 'uint8', 'uint8'), 'uint8'),
(('uint8', 'uint8', 'int8'), 'int16'),
(('uint8', 'int8', 'int8'), 'int16'),
(('uint8', 'int8', 'int16'), 'int16'),
(('uint8', 'uint8', 'int16'), 'int16'),
# Float and signed int.
(('float16', 'int8', 'int8'), 'float16'),
(('float16', 'int32', 'int64'), 'float16'),
(('float16', 'float32', 'int64'), 'float32'),
# Float and unsigned int.
(('float16', 'int8', 'uint8'), 'float16'),
(('float16', 'int32', 'uint8'), 'float16'),
(('float16', 'float32', 'uint8'), 'float32'),
# Bool and other.
(('bool_', 'uint8', 'uint8'), 'uint8'),
(('bool_', 'bool_', 'uint8'), 'uint8'),
(('bool_', 'int8', 'uint8'), 'int16'),
(('bool_', 'bool_', 'int32'), 'int32'),
(('bool_', 'float16', 'float32'), 'float32'),
(('bool_', 'bool_', 'float64'), 'float64'),
])
result_float_dtypes_array_scalar = [
(('float16',), float, 'float16'),
(('float32',), float, 'float32'),
(('float64',), float, 'float64'),
(('float16',), numpy.float64, 'float16'),
(('float64',), numpy.float16, 'float64'),
]
result_numeric_dtypes_array_scalar = [
# Float scalar.
(('int8',), float, 'float32'),
(('int16',), float, 'float32'),
(('int32',), float, 'float32'),
(('int64',), float, 'float32'),
(('uint8',), float, 'float32'),
(('int8',), numpy.float32, 'float32'),
(('int64',), numpy.float16, 'float32'),
(('uint8',), numpy.float64, 'float32'),
# Int scalar.
(('int8',), int, 'int8'),
(('int16',), int, 'int16'),
(('int32',), int, 'int32'),
(('int64',), int, 'int64'),
(('uint8',), int, 'uint8'),
(('float16',), int, 'float16'),
(('float32',), int, 'float32'),
(('float64',), int, 'float64'),
(('int16',), numpy.int16, 'int16'),
(('uint8',), numpy.int8, 'uint8'),
(('float64',), numpy.int8, 'float64'),
(('float16',), numpy.int64, 'float16'),
] + result_float_dtypes_array_scalar
result_comparable_dtypes_array_scalar = [
(('bool_',), bool, 'bool_'),
(('bool_',), numpy.bool_, 'bool_'),
] + result_numeric_dtypes_array_scalar
result_dtypes_array_scalar = [
# Bool scalar.
(('bool_',), bool, 'bool_'),
(('int8',), bool, 'int8'),
(('int16',), bool, 'int16'),
(('int32',), bool, 'int32'),
(('int64',), bool, 'int64'),
(('uint8',), bool, 'uint8'),
(('float16',), bool, 'float16'),
(('float32',), bool, 'float32'),
(('float64',), bool, 'float64'),
(('bool_',), numpy.bool_, 'bool_'),
(('int16',), numpy.bool_, 'int16'),
(('uint8',), numpy.bool_, 'uint8'),
(('float32',), numpy.bool_, 'float32'),
] + result_numeric_dtypes_array_scalar
def cast_if_numpy_array(xp, array, chx_expected_dtype):
"""Casts NumPy result array to match the dtype of ChainerX's corresponding
result.
This function receives result arrays for both NumPy and ChainerX and only
converts dtype of the NumPy array.
"""
if xp is chainerx:
assert isinstance(array, chainerx.ndarray)
return array
if xp is numpy:
assert isinstance(array, (numpy.ndarray, numpy.generic))
# Dtype conversion to allow comparing the correctnesses of the values.
return array.astype(chx_expected_dtype, copy=False)
assert False
def make_same_in_out_dtypes(number_of_in_params, dtypes):
return [((dtype,) * number_of_in_params, dtype) for dtype in dtypes]
| 5,772
| 30.205405
| 78
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/math_utils.py
|
import unittest
import numpy
import chainerx
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
class IgnoreNumpyFloatingPointError(object):
def __enter__(self):
self.old_settings = numpy.seterr(all='ignore')
def __exit__(self, *args):
numpy.seterr(**self.old_settings)
class UnaryMathTestBase(object):
input = None
def setup(self):
in_dtype, = self.in_dtypes
in_kind = numpy.dtype(in_dtype).kind
if numpy.dtype(in_dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
if in_dtype == 'float16':
self.check_forward_options.update({'rtol': 1e-3, 'atol': 1e-3})
self.check_backward_options.update({'rtol': 3e-3, 'atol': 3e-3})
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 1e-2})
else:
self.check_backward_options.update({'rtol': 1e-3, 'atol': 1e-4})
self.check_double_backward_options.update(
{'rtol': 1e-3, 'atol': 1e-4})
input = self.input
if (in_kind == 'u'
and isinstance(input, (int, float))
and input < 0):
raise unittest.SkipTest(
'Combination of uint dtype and negative input cannot be '
'tested')
def generate_inputs(self):
in_dtype, = self.in_dtypes
if isinstance(self.input, numpy.ndarray):
return self.input.astype(in_dtype),
if self.input == 'random':
return array_utils.uniform(self.shape, in_dtype),
if isinstance(self.input, (bool, int, float)):
return numpy.full(self.shape, self.input, dtype=in_dtype),
assert False
def forward_xp(self, inputs, xp):
a, = inputs
# This cast was introduced in order to avoid decreasing precision.
# ex.) numpy.sqrt(x) becomes a float16 array where x is an int8 array.
a = dtype_utils.cast_if_numpy_array(xp, a, self.out_dtype)
with IgnoreNumpyFloatingPointError():
y = self.func(xp, a)
y = dtype_utils.cast_if_numpy_array(xp, y, self.out_dtype)
return y,
class BinaryMathTestBase(object):
def setup(self):
in_dtype1, in_dtype2 = self.in_dtypes
kind1 = numpy.dtype(in_dtype1).kind
kind2 = numpy.dtype(in_dtype2).kind
if kind1 != 'f' or kind2 != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
if in_dtype1 == 'float16' or in_dtype2 == 'float16':
self.check_forward_options.update({'rtol': 1e-3, 'atol': 1e-3})
self.check_backward_options.update({'rtol': 1e-2, 'atol': 3e-3})
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 3e-3})
def generate_inputs(self):
in_dtype1, in_dtype2 = self.in_dtypes
in_shape1, in_shape2 = self.in_shapes
if self.input_lhs == 'random':
a = array_utils.uniform(in_shape1, in_dtype1)
elif isinstance(self.input_lhs, (bool, int, float)):
a = numpy.full(in_shape1, self.input_lhs, dtype=in_dtype1)
else:
assert False
if self.input_rhs == 'random':
b = array_utils.uniform(in_shape2, in_dtype2)
elif isinstance(self.input_rhs, (bool, int, float)):
b = numpy.full(in_shape2, self.input_rhs, dtype=in_dtype2)
else:
assert False
return a, b
def forward_xp(self, inputs, xp):
a, b = inputs
# This cast was introduced in order to avoid decreasing precision.
# ex.) x / y becomes a float16 array where x and y are an int8 arrays.
a = dtype_utils.cast_if_numpy_array(xp, a, self.out_dtype)
b = dtype_utils.cast_if_numpy_array(xp, b, self.out_dtype)
with IgnoreNumpyFloatingPointError():
y = self.func(xp, a, b)
y = dtype_utils.cast_if_numpy_array(xp, y, self.out_dtype)
return y,
class InplaceUnaryMathTestBase(UnaryMathTestBase):
skip_backward_test = True
skip_double_backward_test = True
def forward_xp(self, inputs, xp):
a, = inputs
if xp is chainerx:
a_ = a.as_grad_stopped().copy()
else:
a_ = a.copy()
with IgnoreNumpyFloatingPointError():
ret = self.func(xp, a_)
assert ret is None # func should not return anything
return a_,
class InplaceBinaryMathTestBase(BinaryMathTestBase):
skip_backward_test = True
skip_double_backward_test = True
def forward_xp(self, inputs, xp):
a, b = inputs
b = dtype_utils.cast_if_numpy_array(xp, b, a.dtype)
if xp is chainerx:
a_ = a.as_grad_stopped().copy()
b_ = b.as_grad_stopped()
else:
a_ = a.copy()
b_ = b
with IgnoreNumpyFloatingPointError():
ret = self.func(xp, a_, b_)
assert ret is None # func should not return anything
return a_,
def _convert_numpy_scalar(scalar, dtype):
# Implicit casting in NumPy's multiply depends on the 'casting' argument,
# which is not yet supported (ChainerX always casts).
# Therefore, we explicitly cast the scalar to the dtype of the ndarray
# before the multiplication for NumPy.
return numpy.dtype(dtype).type(scalar)
class MathScalarTestBase(UnaryMathTestBase):
def func(self, xp, a):
scalar = self.scalar_type(self.scalar_value)
return self.func_scalar(xp, a, scalar)
class InplaceMathScalarTestBase(InplaceUnaryMathTestBase):
def func(self, xp, a):
scalar = self.scalar_type(self.scalar_value)
if xp is numpy:
# This cast is to avoid TypeError in the following case
# a: uint8 0-dim numpy.ndarray
# scalar: int
in_dtype, = self.in_dtypes
scalar = _convert_numpy_scalar(scalar, in_dtype)
return self.func_scalar(xp, a, scalar)
def _permutate_shapes(shapes_list):
# Permutates input shapes
permutated_shapes_list = []
for in_shape1, in_shape2 in shapes_list:
permutated_shapes_list.append((in_shape1, in_shape2))
permutated_shapes_list.append((in_shape2, in_shape1))
return list(set(permutated_shapes_list))
shapes_combination_inplace_binary = [
# Same shapes
((1,), (1,)),
((3, 4), (3, 4)),
# Broadcast
((10,), (1,)),
((3, 4), (3, 1)),
((3, 4), (1, 4)),
((3, 4), (4,)),
((3, 4), (1, 1)),
((3, 4), (1,)),
((2, 3, 4), (1, 1, 1)),
# 0-dim shape
((), ()),
((1,), ()),
((3,), ()),
((2, 3), ()),
# 0-size shape
((0,), (0,)),
((0,), (1,)),
((0,), ()),
((2, 0, 3), (2, 0, 3)),
# TODO(imanishi): Fix strides
# ((2, 0, 3), (0, 1)),
]
shapes_combination_binary = _permutate_shapes([
# Broadcast
((3, 1), (1, 4)),
((2, 1, 4), (3, 1)),
# 0-size shape
# TODO(imanishi): Fix strides
# ((0, 1), (0, 1, 0)),
]) + _permutate_shapes(shapes_combination_inplace_binary)
# An association list that associates a dtype to the type which ChainerX's
# real-valued functions should return.
in_out_float_dtypes_math_functions = [
# Float.
(('float16',), 'float16'),
(('float32',), 'float32'),
(('float64',), 'float64'),
]
in_out_dtypes_math_functions = in_out_float_dtypes_math_functions + [
# Signed int.
(('int8',), 'float32'),
(('int16',), 'float32'),
(('int32',), 'float32'),
(('int64',), 'float32'),
# Unsigned int.
(('uint8',), 'float32'),
# Bool.
(('bool_',), 'float32'),
]
in_out_dtypes_math_binary_functions = [
# integer mixed
(('int8', 'int16'), 'float32'),
(('int8', 'int64'), 'float32'),
(('int8', 'uint8'), 'float32'),
(('int16', 'int32'), 'float32'),
(('int16', 'int64'), 'float32'),
(('int32', 'uint8'), 'float32'),
(('int32', 'int8'), 'float32'),
(('int64', 'int32'), 'float32'),
(('int64', 'uint8'), 'float32'),
(('uint8', 'int16'), 'float32'),
# integer float mixed
(('int8', 'float16'), 'float16'),
(('int8', 'float64'), 'float64'),
(('int16', 'float16'), 'float16'),
(('int16', 'float32'), 'float32'),
(('int32', 'float32'), 'float32'),
(('int32', 'float64'), 'float64'),
(('int64', 'float16'), 'float16'),
(('int64', 'float64'), 'float64'),
(('uint8', 'float16'), 'float16'),
(('uint8', 'float32'), 'float32'),
(('float32', 'int8'), 'float32'),
(('float64', 'int16'), 'float64'),
(('float16', 'int32'), 'float16'),
(('float32', 'int64'), 'float32'),
(('float64', 'uint8'), 'float64'),
# float mixed
(('float16', 'float16'), 'float16'),
(('float16', 'float32'), 'float32'),
(('float16', 'float64'), 'float64'),
(('float32', 'float16'), 'float32'),
(('float32', 'float32'), 'float32'),
(('float32', 'float64'), 'float64'),
(('float64', 'float16'), 'float64'),
(('float64', 'float32'), 'float64'),
(('float64', 'float64'), 'float64'),
]
| 9,139
| 30.517241
| 78
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/test_cupy_interop.py
|
# These tests are integration tests with CuPy.
import numpy
import pytest
import sys
import chainerx
import chainerx.testing
try:
import cupy
except Exception:
cupy = None
_fromrawpointer = chainerx._core._fromrawpointer
@pytest.mark.cuda()
def test_cupy_to_chainerx_contiguous():
dtype = numpy.float32
a_cupy = cupy.arange(6, dtype=dtype).reshape((2, 3))
a_cupy_refcount_before = sys.getrefcount(a_cupy)
a_chx = _fromrawpointer(
a_cupy.data.mem.ptr,
a_cupy.shape,
a_cupy.dtype,
a_cupy.strides,
'cuda:0',
0,
a_cupy)
assert sys.getrefcount(a_cupy) == a_cupy_refcount_before + 1
assert a_chx.device.name == 'cuda:0'
chainerx.testing.assert_array_equal_ex(a_chx, a_cupy.get())
# Write to a_cupy
a_cupy[0, 1] = 8
chainerx.testing.assert_array_equal_ex(
a_chx, numpy.array([[0, 8, 2], [3, 4, 5]], dtype))
# Write to a_chx
a_chx += 1
chainerx.testing.assert_array_equal_ex(
a_cupy.get(), numpy.array([[1, 9, 3], [4, 5, 6]], dtype))
@pytest.mark.cuda()
def test_cupy_to_chainerx_delete_cupy_first():
dtype = numpy.float32
a_cupy = cupy.arange(6, dtype=dtype).reshape((2, 3))
a_chx = _fromrawpointer(
a_cupy.data.mem.ptr,
a_cupy.shape,
a_cupy.dtype,
a_cupy.strides,
'cuda:0',
0,
a_cupy)
del a_cupy
a_chx += 1
chainerx.testing.assert_array_equal_ex(
a_chx, numpy.array([[1, 2, 3], [4, 5, 6]], dtype))
@pytest.mark.cuda()
def test_cupy_to_chainerx_delete_chainerx_first():
dtype = numpy.float32
a_cupy = cupy.arange(6, dtype=dtype).reshape((2, 3))
a_chx = _fromrawpointer(
a_cupy.data.mem.ptr,
a_cupy.shape,
a_cupy.dtype,
a_cupy.strides,
'cuda:0',
0,
a_cupy)
del a_chx
a_cupy += 1
chainerx.testing.assert_array_equal_ex(
a_cupy.get(), numpy.array([[1, 2, 3], [4, 5, 6]], dtype))
@pytest.mark.cuda()
def test_cupy_to_chainerx_from_invalid_pointer():
dtype = numpy.float32
a_numpy = numpy.arange(6, dtype=dtype).reshape((2, 3))
with pytest.raises(chainerx.ChainerxError):
_fromrawpointer(
a_numpy.ctypes.data,
a_numpy.shape,
a_numpy.dtype,
a_numpy.strides,
'cuda:0',
0,
a_numpy)
@pytest.mark.cuda()
def test_cupy_to_chainerx_noncontiguous_with_offset():
dtype = numpy.float32
a_cupy = cupy.arange(12, dtype=dtype).reshape((2, 6))[::-1, ::2]
offset = a_cupy.data.ptr - a_cupy.data.mem.ptr
# test preconditions
assert offset > 0
assert not a_cupy.flags.c_contiguous
a_chx = _fromrawpointer(
a_cupy.data.mem.ptr,
a_cupy.shape,
a_cupy.dtype,
a_cupy.strides,
'cuda:0',
offset,
a_cupy)
assert a_chx.strides == a_cupy.strides
chainerx.testing.assert_array_equal_ex(
a_chx, a_cupy.get(), strides_check=False)
a_cupy[1, 1] = 53
assert a_chx.strides == a_cupy.strides
chainerx.testing.assert_array_equal_ex(
a_chx, a_cupy.get(), strides_check=False)
@pytest.mark.cuda()
def test_cupy_to_chainerx_noncontiguous_without_offset():
# This test includes access to address before the given pointer (because of
# a negative stride).
dtype = numpy.float32
a_cupy = cupy.arange(12, dtype=dtype).reshape((2, 6))[::-1, ::2]
# test preconditons
assert a_cupy.data.mem.ptr < a_cupy.data.ptr
assert not a_cupy.flags.c_contiguous
a_chx = _fromrawpointer(
a_cupy.data.ptr,
a_cupy.shape,
a_cupy.dtype,
a_cupy.strides,
'cuda:0',
0,
a_cupy)
assert a_chx.strides == a_cupy.strides
chainerx.testing.assert_array_equal_ex(
a_chx, a_cupy.get(), strides_check=False)
a_cupy[1, 1] = 53
assert a_chx.strides == a_cupy.strides
chainerx.testing.assert_array_equal_ex(
a_chx, a_cupy.get(), strides_check=False)
@pytest.mark.cuda(2)
def test_cupy_to_chainerx_nondefault_device():
dtype = numpy.float32
with cupy.cuda.Device(1):
a_cupy = cupy.arange(6, dtype=dtype).reshape((2, 3))
a_chx = _fromrawpointer(
a_cupy.data.mem.ptr,
a_cupy.shape,
a_cupy.dtype,
a_cupy.strides,
'cuda:1',
0,
a_cupy)
assert a_chx.device.name == 'cuda:1'
chainerx.testing.assert_array_equal_ex(a_chx, a_cupy.get())
@pytest.mark.cuda(2)
def test_cupy_to_chainerx_invalid_device():
dtype = numpy.float32
with cupy.cuda.Device(1):
a_cupy = cupy.arange(6, dtype=dtype).reshape((2, 3))
with pytest.raises(chainerx.ChainerxError):
_fromrawpointer(
a_cupy.data.mem.ptr,
a_cupy.shape,
a_cupy.dtype,
a_cupy.strides,
'cuda:0',
0,
a_cupy)
@pytest.mark.cuda()
def test_chainerx_to_cupy_contiguous():
dtype = 'float32'
a_chx = chainerx.arange(6, dtype=dtype, device='cuda:0').reshape((2, 3))
a_cupy = cupy.ndarray(
a_chx.shape,
cupy.dtype(a_chx.dtype.name),
cupy.cuda.MemoryPointer(cupy.cuda.UnownedMemory(
a_chx.data_ptr + a_chx.offset,
a_chx.data_size,
a_chx,
0), 0),
strides=a_chx.strides,
)
assert a_cupy.device.id == 0
chainerx.testing.assert_array_equal_ex(a_chx, a_cupy.get())
# Write to a_cupy
a_cupy[0, 1] = 8
chainerx.testing.assert_array_equal_ex(
a_chx, numpy.array([[0, 8, 2], [3, 4, 5]], dtype))
# Write to a_chx
a_chx += 1
chainerx.testing.assert_array_equal_ex(
a_cupy.get(), numpy.array([[1, 9, 3], [4, 5, 6]], dtype))
@pytest.mark.cuda()
def test_chainerx_to_cupy_delete_cupy_first():
dtype = 'float32'
a_chx = chainerx.arange(6, dtype=dtype, device='cuda:0').reshape((2, 3))
a_cupy = cupy.ndarray(
a_chx.shape,
cupy.dtype(a_chx.dtype.name),
cupy.cuda.MemoryPointer(cupy.cuda.UnownedMemory(
a_chx.data_ptr + a_chx.offset,
a_chx.data_size,
a_chx,
0), 0),
strides=a_chx.strides,
)
del a_cupy
a_chx += 1
chainerx.testing.assert_array_equal_ex(
a_chx, numpy.array([[1, 2, 3], [4, 5, 6]], dtype))
@pytest.mark.cuda()
def test_chainerx_to_cupy_delete_chainerx_first():
dtype = 'float32'
a_chx = chainerx.arange(6, dtype=dtype, device='cuda:0').reshape((2, 3))
a_cupy = cupy.ndarray(
a_chx.shape,
cupy.dtype(a_chx.dtype.name),
cupy.cuda.MemoryPointer(cupy.cuda.UnownedMemory(
a_chx.data_ptr + a_chx.offset,
a_chx.data_size,
a_chx,
0), 0),
strides=a_chx.strides,
)
del a_chx
a_cupy += 1
chainerx.testing.assert_array_equal_ex(
a_cupy.get(), numpy.array([[1, 2, 3], [4, 5, 6]], dtype))
@pytest.mark.cuda()
def test_chainerx_to_cupy_noncontiguous():
dtype = 'float32'
a_chx = chainerx.arange(
12, dtype=dtype, device='cuda:0').reshape((2, 6))[::-1, ::2]
offset = a_chx.offset
# test preconditions
assert offset > 0
assert not a_chx.is_contiguous
a_cupy = cupy.ndarray(
a_chx.shape,
cupy.dtype(a_chx.dtype.name),
cupy.cuda.MemoryPointer(cupy.cuda.UnownedMemory(
a_chx.data_ptr,
a_chx.data_size,
a_chx,
0), offset),
strides=a_chx.strides,
)
assert a_chx.strides == a_cupy.strides
chainerx.testing.assert_array_equal_ex(
a_chx, a_cupy.get(), strides_check=False)
a_cupy[1, 1] = 53
assert a_chx.strides == a_cupy.strides
chainerx.testing.assert_array_equal_ex(
a_chx, a_cupy.get(), strides_check=False)
@pytest.mark.cuda(2)
def test_chainerx_to_cupy_nondefault_device():
dtype = 'float32'
a_chx = chainerx.arange(6, dtype=dtype, device='cuda:1').reshape((2, 3))
a_cupy = cupy.ndarray(
a_chx.shape,
cupy.dtype(a_chx.dtype.name),
cupy.cuda.MemoryPointer(cupy.cuda.UnownedMemory(
a_chx.data_ptr + a_chx.offset,
a_chx.data_size,
a_chx,
-1), 0),
strides=a_chx.strides,
)
assert a_cupy.device.id == 1
chainerx.testing.assert_array_equal_ex(a_chx, a_cupy.get())
| 8,419
| 25.149068
| 79
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/test_constants.py
|
import numpy
import chainerx
def test_constants():
assert chainerx.Inf is numpy.Inf
assert chainerx.Infinity is numpy.Infinity
assert chainerx.NAN is numpy.NAN
assert chainerx.NINF is numpy.NINF
assert chainerx.NZERO is numpy.NZERO
assert chainerx.NaN is numpy.NaN
assert chainerx.PINF is numpy.PINF
assert chainerx.PZERO is numpy.PZERO
assert chainerx.e is numpy.e
assert chainerx.euler_gamma is numpy.euler_gamma
assert chainerx.inf is numpy.inf
assert chainerx.infty is numpy.infty
assert chainerx.nan is numpy.nan
assert chainerx.newaxis is numpy.newaxis
assert chainerx.pi is numpy.pi
| 653
| 28.727273
| 52
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/test_device.py
|
import copy
import pickle
import pytest
import chainerx
_devices_data = [
{'index': 0},
{'index': 1},
]
@pytest.fixture(params=_devices_data)
def device_data1(request):
return request.param
@pytest.fixture(params=_devices_data)
def device_data2(request):
return request.param
@pytest.fixture
def device_instance1(request, device_data1):
return chainerx.get_global_default_context().get_device(
'native', device_data1['index'])
@pytest.fixture
def device_instance2(request, device_data2):
return chainerx.get_global_default_context().get_device(
'native', device_data2['index'])
@pytest.fixture
def cache_restore_device(request):
device = chainerx.get_default_device()
def restore_device():
chainerx.set_default_device(device)
request.addfinalizer(restore_device)
def test_creation():
ctx = chainerx.get_global_default_context()
backend = ctx.get_backend('native')
device = backend.get_device(0)
assert device.name == 'native:0'
assert device.backend is backend
assert device.context is ctx
assert device.index == 0
device = backend.get_device(1)
assert device.name == 'native:1'
assert device.backend is backend
assert device.context is ctx
assert device.index == 1
def test_synchronize():
ctx = chainerx.get_global_default_context()
device = ctx.get_device('native', 0)
device.synchronize()
@pytest.mark.usefixtures('cache_restore_device')
def test_default_device(device_instance1):
device = device_instance1
chainerx.set_default_device(device)
assert chainerx.get_default_device() is device
@pytest.mark.usefixtures('cache_restore_device')
def test_default_device_with_name(device_instance1):
device = device_instance1
chainerx.set_default_device(device.name)
assert chainerx.get_default_device() is device
@pytest.mark.usefixtures('cache_restore_device')
def test_eq(device_instance1, device_instance2):
if device_instance1 == device_instance2:
return
device1 = device_instance1
device2 = device_instance2
device1_1 = device1.backend.get_device(device1.index)
device1_2 = device1.backend.get_device(device1.index)
device2_1 = device2.backend.get_device(device2.index)
assert device1_1 == device1_2
assert device1_1 != device2_1
assert not (device1_1 != device1_2)
assert not (device1_1 == device2_1)
@pytest.mark.usefixtures('cache_restore_device')
def test_using_device(device_instance1, device_instance2):
if device_instance1 == device_instance2:
return
device1 = device_instance1
device2 = device_instance2
chainerx.set_default_device(device1)
with chainerx.using_device(device2) as scope:
assert chainerx.get_default_device() is device2
assert scope.device is device2
scope = chainerx.using_device(device2)
assert chainerx.get_default_device() == device1
assert scope.device is device2
with scope:
assert chainerx.get_default_device() == device2
assert scope.device is device2
assert chainerx.get_default_device() == device1
assert scope.device is device2
@pytest.mark.usefixtures('cache_restore_device')
def test_using_device_with_name(device_instance1, device_instance2):
if device_instance1 == device_instance2:
return
device1 = device_instance1
device2 = device_instance2
chainerx.set_default_device(device1)
with chainerx.using_device(device2.name) as scope:
assert chainerx.get_default_device() == device2
assert scope.device is device2
with chainerx.using_device(device2.backend.name, device2.index) as scope:
assert chainerx.get_default_device() == device2
assert scope.device is device2
# TODO(niboshi): Add pickle test involving context destruction and re-creation
@pytest.mark.parametrize_device(['native:0', 'native:1', 'cuda:0'])
def test_device_pickle(device):
s = pickle.dumps(device)
device2 = pickle.loads(s)
assert device is device2
# TODO(niboshi): Add deepcopy test with arbitrary context
@pytest.mark.parametrize_device(['native:0', 'native:1', 'cuda:0'])
def test_device_deepcopy(device):
device2 = copy.deepcopy(device)
assert device is device2
| 4,278
| 26.785714
| 78
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/test_array_index.py
|
import pytest
import chainerx
def test_newaxis():
assert chainerx.newaxis is None
@pytest.mark.parametrize('xp', [chainerx])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('shape, transpose', [
((1,), None),
((2,), None),
((2, 3), None),
((2, 3, 4), None),
((2, 3, 4, 5), None),
((2, 3, 4, 5, 6), None),
((2, 3), (0, 1)),
((2, 3, 4), (0, 2)),
((2, 3, 4, 5), (0, 2)),
((2, 3, 4, 5, 6), (1, 3)),
])
def test_array_indexing(xp, device, shape, transpose):
a = xp.zeros(shape=shape, dtype=chainerx.int8, device=device)
if transpose:
a = a.swapaxes(*transpose)
assert not a.is_contiguous
a += 1
assert a.sum() == a.size
@pytest.mark.slow
@pytest.mark.parametrize('xp', [chainerx])
@pytest.mark.parametrize_device(['cuda:0'])
@pytest.mark.parametrize('shape', [
(64, 32, 6*1024*4), # Less than 2^32 elems
(64, 32, 6*1024*512), # More than 2^32 elems
])
def test_large_array_contiguous_indexing(xp, device, shape):
try:
a = xp.zeros(shape=shape, dtype=chainerx.int8, device=device)
except chainerx.ChainerxError as ex:
assert 'Out of memory' in ex.args
pytest.skip('Not enough memory to test large indexing')
a += 1
assert a.is_contiguous
assert a.sum() == a.size
@pytest.mark.slow
@pytest.mark.parametrize('xp', [chainerx])
@pytest.mark.parametrize_device(['cuda:0'])
@pytest.mark.parametrize('shape', [
(64, 32, 6*1024*4), # Less than 2^32 elems
(64, 32, 6*1024*512) # More than 2^32 elems
])
def test_large_array_noncontiguous_indexing(xp, device, shape):
try:
a = xp.zeros(shape=shape, dtype=chainerx.int8, device=device)
except chainerx.ChainerxError as ex:
assert 'Out of memory' in ex.args
pytest.skip('Not enough memory to test large indexing')
a = a.swapaxes(2, 0)
a += 1
assert not a.is_contiguous
assert a.sum() == a.size
| 1,955
| 28.19403
| 69
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/test_backend.py
|
import pytest
import chainerx
def test_name_native():
backend = chainerx.get_global_default_context().get_backend('native')
assert 'native' == backend.name
def test_get_device_native():
backend = chainerx.get_global_default_context().get_backend('native')
device = backend.get_device(0)
assert 0 == device.index
assert 'native:0' == device.name
assert device is backend.get_device(0)
def test_get_device_count_native():
backend = chainerx.get_global_default_context().get_backend('native')
assert backend.get_device_count() > 0
@pytest.mark.cuda
def test_name_cuda():
backend = chainerx.get_global_default_context().get_backend('cuda')
assert 'cuda' == backend.name
@pytest.mark.cuda
def test_get_device_cuda():
backend = chainerx.get_global_default_context().get_backend('cuda')
device = backend.get_device(0)
assert 0 == device.index
assert 'cuda:0' == device.name
assert device is backend.get_device(0)
@pytest.mark.cuda
def test_get_device_count_cuda():
backend = chainerx.get_global_default_context().get_backend('cuda')
assert backend.get_device_count() > 0
| 1,149
| 25.744186
| 73
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/test_dtype.py
|
import numpy
import chainerx
def test_py_types():
assert chainerx.bool is bool
assert chainerx.int is int
assert chainerx.float is float
def test_dtypes():
assert chainerx.dtype is numpy.dtype
assert chainerx.bool_ is numpy.bool_
assert chainerx.int8 is numpy.int8
assert chainerx.int16 is numpy.int16
assert chainerx.int32 is numpy.int32
assert chainerx.int64 is numpy.int64
assert chainerx.uint8 is numpy.uint8
assert chainerx.float16 is numpy.float16
assert chainerx.float32 is numpy.float32
assert chainerx.float64 is numpy.float64
| 593
| 24.826087
| 44
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/test_core_module.py
|
import chainerx
def test_core():
assert chainerx.__name__ == 'chainerx'
def test_is_available():
assert chainerx.is_available()
| 140
| 13.1
| 42
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/test_context.py
|
import pytest
import chainerx
@pytest.fixture
def cache_restore_context(request):
device = chainerx.get_default_device()
context = chainerx.get_default_context()
global_context = chainerx.get_global_default_context()
def restore_context():
chainerx.set_global_default_context(global_context)
chainerx.set_default_context(context)
chainerx.set_default_device(device)
request.addfinalizer(restore_context)
def test_creation():
chainerx.Context()
def test_get_backend():
context = chainerx.Context()
backend = context.get_backend('native')
assert backend.name == 'native'
assert context.get_backend('native') is backend
with pytest.raises(chainerx.BackendError):
context.get_backend('something_that_does_not_exist')
def test_get_device():
context = chainerx.Context()
device = context.get_device('native')
assert device.name == 'native:0'
assert device.index == 0
assert context.get_device('native:0') is device
assert context.get_device('native', 0) is device
with pytest.raises(chainerx.BackendError):
context.get_device('something_that_does_not_exist:0')
@pytest.mark.usefixtures('cache_restore_context')
def test_chainerx_get_backend():
context = chainerx.Context()
with chainerx.context_scope(context):
backend = chainerx.get_backend('native')
assert backend.context is context
assert backend.name == 'native'
@pytest.mark.usefixtures('cache_restore_context')
def test_chainerx_get_device():
context = chainerx.Context()
with chainerx.context_scope(context):
device = chainerx.get_device('native:0')
assert device.context is context
assert device.name == 'native:0'
assert device is chainerx.get_device('native', 0)
assert device is chainerx.get_device(device)
assert chainerx.get_default_device() is chainerx.get_device()
@pytest.mark.usefixtures('cache_restore_context')
def test_default_context():
context = chainerx.Context()
global_context = chainerx.Context()
chainerx.set_global_default_context(None)
chainerx.set_default_context(None)
with pytest.raises(chainerx.ContextError):
chainerx.get_default_context()
chainerx.set_global_default_context(None)
chainerx.set_default_context(context)
assert chainerx.get_default_context() is context
chainerx.set_global_default_context(global_context)
chainerx.set_default_context(None)
assert chainerx.get_default_context() is global_context
chainerx.set_global_default_context(global_context)
chainerx.set_default_context(context)
assert chainerx.get_default_context() is context
@pytest.mark.usefixtures('cache_restore_context')
def test_global_default_context():
context = chainerx.Context()
chainerx.set_global_default_context(None)
with pytest.raises(chainerx.ContextError):
chainerx.get_global_default_context()
chainerx.set_global_default_context(context)
assert chainerx.get_global_default_context() is context
@pytest.mark.usefixtures('cache_restore_context')
def test_context_scope():
context1 = chainerx.Context()
context2 = chainerx.Context()
chainerx.set_default_context(context1)
with chainerx.context_scope(context2):
assert chainerx.get_default_context() is context2
scope = chainerx.context_scope(context2)
assert chainerx.get_default_context() is context1
with scope:
assert chainerx.get_default_context() is context2
assert chainerx.get_default_context() is context1
| 3,609
| 29.336134
| 69
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/test_check_backward.py
|
import pytest
import chainerx
def _check_backward_unary(fprop):
x = chainerx.array([1, 2, 1], chainerx.float32)
x.require_grad()
chainerx.check_backward(
fprop,
(x,),
(chainerx.array([0, -2, 1], chainerx.float32),),
(chainerx.full((3,), 1e-3, chainerx.float32),),
)
def test_correct_backward_unary():
_check_backward_unary(lambda xs: (xs[0] * xs[0],))
def test_incorrect_backward_unary():
# as_grad_stopped() makes backward not corresponding to the mathematical
# differentiation of the forward computation, which should be detected by
# check_backward.
def fprop(xs):
x, = xs
return (x * x).as_grad_stopped() + x,
with pytest.raises(chainerx.GradientCheckError):
_check_backward_unary(fprop)
def _check_backward_binary(fprop):
chainerx.check_backward(
fprop,
(chainerx.array([1, -2, 1], chainerx.float32).require_grad(),
chainerx.array([0, 1, 2], chainerx.float32).require_grad()),
(chainerx.array([1, -2, 3], chainerx.float32),),
(chainerx.full((3,), 1e-3, chainerx.float32),
chainerx.full((3,), 1e-3, chainerx.float32)),
)
def test_correct_backward_binary():
_check_backward_binary(lambda xs: (xs[0] * xs[1],))
def test_incorrect_backward_binary():
# See the comment of test_incorrect_backward_unary().
def fprop(xs):
x, y = xs
return (x * y).as_grad_stopped() + x + y,
with pytest.raises(chainerx.GradientCheckError):
_check_backward_binary(fprop)
def test_correct_double_backward_unary():
chainerx.check_double_backward(
lambda xs: (xs[0] * xs[0],),
(chainerx.array([1, 2, 3], chainerx.float32).require_grad(),),
(chainerx.ones((3,), chainerx.float32).require_grad(),),
(chainerx.ones((3,), chainerx.float32),),
(chainerx.full((3,), 1e-3, chainerx.float32),
chainerx.full((3,), 1e-3, chainerx.float32)),
1e-4,
1e-3,
)
def test_correct_double_backward_binary():
chainerx.check_double_backward(
lambda xs: (xs[0] * xs[1],),
(chainerx.array([1, 2, 3], chainerx.float32).require_grad(),
chainerx.ones((3,), chainerx.float32).require_grad()),
(chainerx.ones((3,), chainerx.float32).require_grad(),),
(chainerx.ones((3,), chainerx.float32),
chainerx.ones((3,), chainerx.float32)),
(chainerx.full((3,), 1e-3, chainerx.float32),
chainerx.full((3,), 1e-3, chainerx.float32),
chainerx.full((3,), 1e-3, chainerx.float32)),
1e-4,
1e-3,
)
| 2,616
| 30.154762
| 77
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/test_array.py
|
import copy
import math
import pickle
import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
def _check_array(
array, expected_dtype, expected_shape, expected_data_list=None,
device=None):
expected_dtype = chainerx.dtype(expected_dtype)
assert isinstance(array.dtype, chainerx.dtype)
assert isinstance(array.shape, tuple)
assert array.dtype == expected_dtype
assert array.shape == expected_shape
assert array.itemsize == expected_dtype.itemsize
assert array.size == array_utils.total_size(expected_shape)
assert array.nbytes == expected_dtype.itemsize * \
array_utils.total_size(expected_shape)
if expected_data_list is not None:
assert array._debug_flat_data == expected_data_list
assert array.is_contiguous
array_utils.check_device(array, device)
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_init(shape, dtype_spec):
array = chainerx.ndarray(shape, dtype_spec)
_check_array(array, dtype_spec, shape)
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_init_with_device(shape, dtype_spec, device):
array = chainerx.ndarray(shape, dtype_spec, device=device)
_check_array(array, dtype_spec, shape, device=device)
@pytest.mark.parametrize('value', [
0, 1, -1, 0.1, 0.9, -0.1, -0.9, 1.1, -1.1, 1.9, -
1.9, True, False, float('inf'), -float('inf'), float('nan'), -0.0
])
@pytest.mark.parametrize('shape', [
(), (1,), (1, 1, 1)
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_cast_scalar(device, value, shape, dtype):
np_dtype = numpy.dtype(dtype)
try:
np_value = np_dtype.type(value)
except (ValueError, OverflowError):
return
a_np = numpy.asarray([np_value], dtype).reshape(shape)
a_chx = chainerx.array(a_np)
def should_cast_succeed(typ):
try:
typ(np_value)
return True
except (ValueError, OverflowError):
return False
# Cast to float
if should_cast_succeed(float):
assert type(float(a_chx)) is float
if math.isnan(float(a_np)):
assert math.isnan(float(a_chx))
else:
assert float(a_np) == float(a_chx)
# Cast to int
if should_cast_succeed(int):
assert type(int(a_chx)) is int
assert int(a_np) == int(a_chx)
# Cast to bool
if should_cast_succeed(bool):
assert type(bool(a_chx)) is bool
assert bool(a_np) == bool(a_chx)
# item()
item_actual = a_chx.item()
np_dtype = numpy.dtype(dtype)
item_expected = np_dtype.type(value).item()
assert isinstance(item_actual, type(item_expected))
assert (
(numpy.isnan(item_actual) and numpy.isnan(item_expected))
or item_actual == item_expected)
@pytest.mark.parametrize('shape', [
(0,), (1, 0), (2,), (1, 2), (2, 3),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_cast_scalar_invalid(device, shape):
dtype = chainerx.float32
a = chainerx.ones(shape, dtype)
with pytest.raises(chainerx.DimensionError):
float(a)
a = chainerx.ones(shape, dtype)
with pytest.raises(chainerx.DimensionError):
int(a)
a = chainerx.ones(shape, dtype)
with pytest.raises(chainerx.DimensionError):
bool(a)
a = chainerx.ones(shape, dtype)
with pytest.raises(chainerx.DimensionError):
a.item()
def test_to_device():
a = chainerx.ones((2,), chainerx.float32, device='native:0')
dst_device = chainerx.get_device('native:1')
b0 = a.to_device(dst_device) # by device instance
assert b0.device is dst_device
chainerx.testing.assert_array_equal_ex(a, b0)
b1 = a.to_device('native:1') # by device name
assert b1.device is dst_device
chainerx.testing.assert_array_equal_ex(a, b1)
b2 = a.to_device('native', 1) # by backend name and index
assert b2.device is dst_device
chainerx.testing.assert_array_equal_ex(a, b2)
def _check_to_numpy(a_np, a_chx, device, copy):
chainerx.testing.assert_array_equal_ex(a_chx, a_np, strides_check=False)
if a_np.size > 0:
# test buffer is shared or not
a_np.fill(1)
expected = not copy and device.backend.name == 'native'
actual = numpy.array_equal(a_np, chainerx.to_numpy(a_chx))
assert expected == actual
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('copy', [True, False])
def test_to_numpy(shape, dtype, device, copy):
a_chx = array_utils.create_dummy_ndarray(chainerx, shape, dtype)
a_np = chainerx.to_numpy(a_chx, copy)
_check_to_numpy(a_np, a_chx, device, copy)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('copy', [True, False])
def test_to_numpy_non_contiguous(shape, dtype, device, copy):
a_chx = array_utils.create_dummy_ndarray(chainerx, shape, dtype).T
a_np = chainerx.to_numpy(a_chx, copy)
_check_to_numpy(a_np, a_chx, device, copy)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('copy', [True, False])
def test_to_numpy_positive_offset(device, copy):
a_chx = chainerx.arange(6).reshape(2, 3)[:, 1:]
a_np = chainerx.to_numpy(a_chx, copy)
_check_to_numpy(a_np, a_chx, device, copy)
def test_view(shape, dtype):
array = array_utils.create_dummy_ndarray(chainerx, shape, dtype)
view = array.view()
chainerx.testing.assert_array_equal_ex(view, array)
assert view.device is chainerx.get_default_device()
# inplace modification
if array.size > 0:
array *= array
assert array._debug_flat_data == view._debug_flat_data
def test_view_must_not_share_properties():
array = chainerx.array([3.0], chainerx.float32)
view = array.view()
# Test preconditions
assert not array.is_grad_required()
assert not view.is_grad_required()
assert not array.is_backprop_required()
assert not view.is_backprop_required()
array.require_grad()
assert array.is_grad_required()
assert array.is_backprop_required()
assert not view.is_grad_required(
), 'A view must not share is_grad_required with the original array.'
assert not view.is_backprop_required(
), 'A view must not share is_backprop_required with the original array.'
@chainerx.testing.numpy_chainerx_array_equal(strides_check=False)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('copy', [False, True])
# TODO(beam2d): use fixtures.
@pytest.mark.parametrize(
'src_dtype',
['bool_', 'uint8', 'int8', 'int16', 'int32', 'int64', 'float16',
'float32', 'float64'])
@pytest.mark.parametrize(
'dst_dtype',
['bool_', 'uint8', 'int8', 'int16', 'int32', 'int64', 'float16',
'float32', 'float64'])
def test_astype(xp, shape, device, copy, src_dtype, dst_dtype):
a = array_utils.create_dummy_ndarray(xp, shape, src_dtype)
# Casting negative value to unsigned int behaves different in CUDA
if device.name == 'cuda:0' and \
src_dtype in chainerx.testing.signed_dtypes and \
dst_dtype in chainerx.testing.unsigned_dtypes:
a = xp.maximum(a, 0)
b = a.astype(dst_dtype, copy=copy)
assert a is b if src_dtype == dst_dtype and not copy else a is not b
return b
def test_as_grad_stopped_copy(shape, float_dtype):
dtype = float_dtype
def check(array_a, array_b):
chainerx.testing.assert_array_equal_ex(
array_a, array_b, strides_check=False)
assert array_b.is_contiguous
# Check memory addresses only if >0 bytes are allocated
if array_a.size > 0:
assert (array_a._debug_data_memory_address
!= array_b._debug_data_memory_address)
# Stop gradients on all graphs
with chainerx.backprop_scope('bp1') as bp1, \
chainerx.backprop_scope('bp2') as bp2, \
chainerx.backprop_scope('bp3') as bp3:
a = array_utils.create_dummy_ndarray(chainerx, shape, dtype)
a.require_grad(bp1)
a.require_grad(bp2)
assert a.is_grad_required(bp1)
assert a.is_grad_required(bp2)
assert a.is_backprop_required(bp1)
assert a.is_backprop_required(bp2)
b = a.as_grad_stopped(copy=True)
check(a, b)
assert not b.is_grad_required(bp1)
assert not b.is_grad_required(bp2)
assert not b.is_backprop_required(bp1)
assert not b.is_backprop_required(bp2)
assert a.is_grad_required(bp1)
assert a.is_grad_required(bp2)
assert a.is_backprop_required(bp1)
assert a.is_backprop_required(bp2)
# Stop gradients on some graphs
a = array_utils.create_dummy_ndarray(chainerx, shape, dtype)
a.require_grad(bp1)
a.require_grad(bp2)
a.require_grad(bp3)
assert a.is_grad_required(bp1)
assert a.is_grad_required(bp2)
assert a.is_grad_required(bp3)
assert a.is_backprop_required(bp1)
assert a.is_backprop_required(bp2)
assert a.is_backprop_required(bp3)
b = a.as_grad_stopped([bp1, bp2], copy=True)
check(a, b)
assert not b.is_grad_required(bp1)
assert not b.is_grad_required(bp2)
assert not b.is_grad_required(bp3)
assert not b.is_backprop_required(bp1)
assert not b.is_backprop_required(bp2)
assert b.is_backprop_required(bp3)
assert a.is_grad_required(bp1)
assert a.is_grad_required(bp2)
assert a.is_grad_required(bp3)
assert a.is_backprop_required(bp1)
assert a.is_backprop_required(bp2)
assert a.is_backprop_required(bp3)
def test_as_grad_stopped_view(shape, float_dtype):
dtype = float_dtype
# Stop gradients on all graphs
with chainerx.backprop_scope('bp1') as bp1, \
chainerx.backprop_scope('bp2') as bp2, \
chainerx.backprop_scope('bp3') as bp3:
a = array_utils.create_dummy_ndarray(chainerx, shape, dtype)
a.require_grad(bp1)
a.require_grad(bp2)
assert a.is_grad_required(bp1)
assert a.is_grad_required(bp2)
assert a.is_backprop_required(bp1)
assert a.is_backprop_required(bp2)
b = a.as_grad_stopped(copy=False)
chainerx.testing.assert_array_equal_ex(a, b)
assert b.device is a.device
assert not b.is_grad_required(bp1)
assert not b.is_grad_required(bp2)
assert not b.is_backprop_required(bp1)
assert not b.is_backprop_required(bp2)
assert a.is_backprop_required(bp1)
assert a.is_backprop_required(bp2)
# Stop gradients on some graphs
a = array_utils.create_dummy_ndarray(chainerx, shape, dtype)
a.require_grad(bp1)
a.require_grad(bp2)
a.require_grad(bp3)
assert a.is_grad_required(bp1)
assert a.is_grad_required(bp2)
assert a.is_grad_required(bp3)
assert a.is_backprop_required(bp1)
assert a.is_backprop_required(bp2)
assert a.is_backprop_required(bp3)
b = a.as_grad_stopped([bp1, bp2], copy=False)
chainerx.testing.assert_array_equal_ex(a, b)
assert b.device is a.device
assert not b.is_grad_required(bp1)
assert not b.is_grad_required(bp2)
assert not b.is_grad_required(bp3)
assert not b.is_backprop_required(bp1)
assert not b.is_backprop_required(bp2)
assert b.is_backprop_required(bp3)
assert a.is_grad_required(bp1)
assert a.is_grad_required(bp2)
assert a.is_grad_required(bp3)
assert a.is_backprop_required(bp1)
assert a.is_backprop_required(bp2)
assert a.is_backprop_required(bp3)
def test_array_repr():
array = chainerx.array([], chainerx.bool_)
assert ('array([], shape=(0,), dtype=bool, '
'device=\'native:0\')' == str(array))
array = chainerx.array([False], chainerx.bool_)
assert ('array([False], shape=(1,), dtype=bool, '
'device=\'native:0\')' == str(array))
array = chainerx.array([[0, 1, 2], [3, 4, 5]], chainerx.int8)
assert ('array([[0, 1, 2],\n'
' [3, 4, 5]], shape=(2, 3), dtype=int8, '
'device=\'native:0\')') == str(array)
array = chainerx.array([[0, 1, 2], [3.25, 4, 5]], chainerx.float32)
assert ('array([[0. , 1. , 2. ],\n'
' [3.25, 4. , 5. ]], shape=(2, 3), dtype=float32, '
'device=\'native:0\')') == str(array)
def test_array_repr_default_backprop_id():
array = chainerx.array([3.0], chainerx.float32)
array.require_grad()
assert ('array([3.], shape=(1,), dtype=float32, device=\'native:0\', '
'backprop_ids=[\'<default>\'])' == str(array))
def test_array_repr_expired_backprop_id():
with chainerx.backprop_scope('bp1') as bp1:
array = chainerx.array([3.0], chainerx.float32)
array.require_grad(bp1)
assert ('array([3.], shape=(1,), dtype=float32, device=\'native:0\', '
'backprop_ids=[\'<expired>\'])' == str(array))
@pytest.mark.parametrize('backprop_args', [(None,), ()])
def test_array_require_grad_without_backprop_id(backprop_args):
array = chainerx.array([1, 1, 1], chainerx.float32)
assert not array.is_grad_required(*backprop_args)
assert not array.is_backprop_required(*backprop_args)
assert not array.is_backprop_required(chainerx.anygraph)
array.require_grad(*backprop_args)
assert array.is_grad_required(*backprop_args)
assert array.is_backprop_required(*backprop_args)
assert array.is_backprop_required(chainerx.anygraph)
# Repeated calls should not fail, but do nothing
array.require_grad(*backprop_args)
assert array.is_grad_required(*backprop_args)
assert array.is_backprop_required(*backprop_args)
assert array.is_backprop_required(chainerx.anygraph)
def test_array_require_grad_with_backprop_id():
array = chainerx.array([1, 1, 1], chainerx.float32)
with chainerx.backprop_scope('bp1') as bp1:
assert not array.is_backprop_required(bp1)
array.require_grad(bp1)
assert array.is_grad_required(bp1)
assert array.is_backprop_required(bp1)
# Repeated calls should not fail, but do nothing
array.require_grad(bp1)
assert array.is_grad_required(bp1)
assert array.is_backprop_required(bp1)
# keyword arguments
with chainerx.backprop_scope('bp2') as bp2:
assert not array.is_backprop_required(backprop_id=bp2)
array.require_grad(backprop_id=bp2)
assert array.is_grad_required(bp2)
assert array.is_grad_required(backprop_id=bp2)
assert array.is_backprop_required(bp2)
assert array.is_backprop_required(backprop_id=bp2)
# Repeated calls should not fail, but do nothing
array.require_grad(backprop_id=bp2)
assert array.is_grad_required(backprop_id=bp2)
assert array.is_backprop_required(backprop_id=bp2)
@pytest.mark.parametrize('backprop_args', [(None,), ()])
def test_array_grad_without_backprop_id(backprop_args):
array = chainerx.array([1., 1., 1.], chainerx.float32)
grad = chainerx.array([0.5, 0.5, 0.5], chainerx.float32)
with pytest.raises(chainerx.ChainerxError):
array.get_grad(*backprop_args)
with pytest.raises(chainerx.ChainerxError):
array.set_grad(grad, *backprop_args)
with pytest.raises(chainerx.ChainerxError):
array.cleargrad(*backprop_args)
# Gradient methods
array.require_grad().set_grad(grad, *backprop_args)
assert array.get_grad(*backprop_args) is not None
assert array.get_grad(
*backprop_args)._debug_flat_data == grad._debug_flat_data
array.cleargrad(*backprop_args) # clear
assert array.get_grad(*backprop_args) is None
array.set_grad(grad, *backprop_args)
assert array.get_grad(*backprop_args) is not None
assert array.get_grad(
*backprop_args)._debug_flat_data == grad._debug_flat_data
array.set_grad(None, *backprop_args) # clear
assert array.get_grad(*backprop_args) is None
# Gradient attributes
array.grad = grad
assert array.get_grad(*backprop_args) is not None
assert array.get_grad(*backprop_args) is array.grad
array.grad = None # clear
assert array.get_grad(*backprop_args) is None
def test_array_grad_with_backprop_id():
array = chainerx.array([1., 1., 1.], chainerx.float32)
grad = chainerx.array([0.5, 0.5, 0.5], chainerx.float32)
with chainerx.backprop_scope('bp1') as bp1:
with pytest.raises(chainerx.ChainerxError):
array.get_grad(bp1)
with pytest.raises(chainerx.ChainerxError):
array.set_grad(grad, bp1)
with pytest.raises(chainerx.ChainerxError):
array.cleargrad(bp1)
array.require_grad(bp1).set_grad(grad, bp1)
assert array.get_grad(bp1) is not None
assert array.get_grad(bp1)._debug_flat_data == grad._debug_flat_data
array.cleargrad(bp1) # clear
assert array.get_grad(bp1) is None
# keyword arguments
with chainerx.backprop_scope('bp2') as bp2:
with pytest.raises(chainerx.ChainerxError):
array.get_grad(backprop_id=bp2)
with pytest.raises(chainerx.ChainerxError):
array.set_grad(grad, backprop_id=bp2)
with pytest.raises(chainerx.ChainerxError):
array.cleargrad(backprop_id=bp2)
array.require_grad(backprop_id=bp2).set_grad(grad, backprop_id=bp2)
assert array.get_grad(bp2) is not None
assert array.get_grad(backprop_id=bp2) is not None
assert array.get_grad(bp2)._debug_flat_data == grad._debug_flat_data
assert array.get_grad(
backprop_id=bp2)._debug_flat_data == grad._debug_flat_data
array.cleargrad(backprop_id=bp2) # clear
assert array.get_grad(bp2) is None
assert array.get_grad(backprop_id=bp2) is None
def test_array_grad_no_deepcopy():
dtype = chainerx.float32
array = chainerx.array([2, 5, 1], dtype)
grad = chainerx.array([5, 7, 8], dtype)
# Set grad
array.require_grad().set_grad(grad)
# Retrieve grad twice and assert they share the same underlying data
grad1 = array.get_grad()
grad2 = array.get_grad()
grad1 *= chainerx.array([2, 2, 2], dtype)
assert grad2._debug_flat_data == [
10, 14, 16], 'grad getter must not incur a copy'
def test_array_cleargrad():
dtype = chainerx.float32
array = chainerx.array([2, 5, 1], dtype)
grad = chainerx.array([5, 7, 8], dtype)
# Set grad, get it and save it
array.require_grad().set_grad(grad)
del grad
saved_grad = array.get_grad()
# Clear grad
array.cleargrad()
assert array.get_grad() is None
assert saved_grad._debug_flat_data == [
5, 7, 8], 'Clearing grad must not affect previously retrieved grad'
def test_array_grad_identity():
array = chainerx.array([1., 1., 1.], chainerx.float32)
grad = chainerx.array([0.5, 0.5, 0.5], chainerx.float32)
array.require_grad().set_grad(grad)
assert array.get_grad() is grad, (
'grad must preserve physical identity')
assert array.get_grad() is grad, (
'grad must preserve physical identity in repeated retrieval')
# array.grad and grad share the same data
grad += chainerx.array([2, 2, 2], chainerx.float32)
assert array.get_grad()._debug_flat_data == [
2.5, 2.5, 2.5], 'A modification to grad must affect array.grad'
array_grad = array.get_grad()
array_grad += chainerx.array([1, 1, 1], chainerx.float32)
assert grad._debug_flat_data == [
3.5, 3.5, 3.5], 'A modification to array.grad must affect grad'
def test_array_require_grad_multiple_graphs_forward():
x1 = chainerx.array([1, 1, 1], chainerx.float32)
x2 = chainerx.array([1, 1, 1], chainerx.float32)
with chainerx.backprop_scope('bp1') as bp1, \
chainerx.backprop_scope('bp2') as bp2, \
chainerx.backprop_scope('bp3') as bp3:
x1.require_grad(bp1)
x2.require_grad(bp2)
assert x1.is_grad_required(bp1)
assert x2.is_grad_required(bp2)
assert x1.is_backprop_required(bp1)
assert x2.is_backprop_required(bp2)
assert not x1.is_grad_required(bp2)
assert not x2.is_grad_required(bp1)
assert not x1.is_backprop_required(bp2)
assert not x2.is_backprop_required(bp1)
y = x1 * x2
assert not y.is_grad_required(bp1)
assert not y.is_grad_required(bp2)
assert y.is_backprop_required(bp1)
assert y.is_backprop_required(bp2)
# No unspecified graphs are generated
assert not y.is_backprop_required(None)
assert not y.is_backprop_required(bp3)
@pytest.mark.parametrize(
'invalid_shape,invalid_dtype,invalid_device',
[
(None, chainerx.float32, None),
((2, 1), None, None),
(None, None, 'native:1'),
])
def test_array_grad_invalid_grad(invalid_shape, invalid_dtype, invalid_device):
shape = (3, 1)
dtype = chainerx.float64
device = 'native:0'
array = chainerx.ones(shape, dtype, device=device)
array.require_grad()
grad_shape = shape if invalid_shape is None else invalid_shape
grad_dtype = dtype if invalid_dtype is None else invalid_dtype
grad_device = device if invalid_device is None else invalid_device
invalid_grad = chainerx.ones(
grad_shape, grad_dtype, device=grad_device)
with pytest.raises(chainerx.GradientError):
array.set_grad(invalid_grad)
with pytest.raises(chainerx.GradientError):
array.grad = invalid_grad
def test_array_backward():
with chainerx.backprop_scope('bp1') as bp1:
x1 = chainerx.array(
[1, 1, 1], chainerx.float32).require_grad(backprop_id=bp1)
x2 = chainerx.array(
[1, 1, 1], chainerx.float32).require_grad(backprop_id=bp1)
y = x1 * x2
y.backward(backprop_id=bp1, enable_double_backprop=True)
gx1 = x1.get_grad(backprop_id=bp1)
x1.set_grad(None, backprop_id=bp1)
gx1.backward(backprop_id=bp1)
with pytest.raises(chainerx.ChainerxError):
gx1.get_grad(backprop_id=bp1)
@chainerx.testing.numpy_chainerx_array_equal(strides_check=False)
@pytest.mark.parametrize(
'value', [-1, 0, 1, 2, 2.3, float('inf'), float('nan')])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_fill(xp, shape, dtype, value, device):
a = xp.empty(shape, dtype)
a.fill(value)
return a
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize(
'slice1', [(0, 30, 1), (30, 0, -1), (10, 40, 7), (40, 10, -7)])
@pytest.mark.parametrize(
'slice2', [(0, 50, 1), (50, 0, -1), (10, 40, 7), (40, 10, -7)])
def test_array_to_numpy_identity(device, slice1, slice2):
start1, end1, step1 = slice1
start2, end2, step2 = slice2
x = numpy.arange(1500).reshape((30, 50))[
start1:end1:step1, start2:end2:step2]
y = chainerx.array(x)
z = chainerx.to_numpy(y)
chainerx.testing.assert_array_equal_ex(x, y, strides_check=False)
chainerx.testing.assert_array_equal_ex(x, z, strides_check=False)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize(
'slice1', [(0, 30, 1), (30, 0, -1), (10, 40, 7), (40, 10, -7)])
@pytest.mark.parametrize(
'slice2', [(0, 50, 1), (50, 0, -1), (10, 40, 7), (40, 10, -7)])
def test_asarray_to_numpy_identity(device, slice1, slice2):
start1, end1, step1 = slice1
start2, end2, step2 = slice2
x = numpy.arange(1500).reshape((30, 50))[
start1:end1:step1, start2:end2:step2]
y = chainerx.asarray(x)
z = chainerx.to_numpy(y)
chainerx.testing.assert_array_equal_ex(x, y)
chainerx.testing.assert_array_equal_ex(x, z, strides_check=False)
# TODO(niboshi): Add pickle test involving context destruction and re-creation
@pytest.mark.parametrize_device(['native:0', 'native:1', 'cuda:0'])
def test_array_pickle(device):
arr = chainerx.array([1, 2], chainerx.float32, device=device)
s = pickle.dumps(arr)
del arr
arr2 = pickle.loads(s)
assert isinstance(arr2, chainerx.ndarray)
assert arr2.device is device
assert arr2.dtype == chainerx.float32
chainerx.testing.assert_array_equal(
arr2,
chainerx.array([1, 2], chainerx.float32))
# TODO(niboshi): Add pickle test involving context destruction and re-creation
@pytest.mark.parametrize_device_name(['native:0', 'native:1', 'cuda:0'])
def test_array_pickle_device_name(device_name):
arr = chainerx.array([1, 2], chainerx.float32, device=device_name)
s = pickle.dumps(arr)
del arr
arr2 = pickle.loads(s)
assert isinstance(arr2, chainerx.ndarray)
assert arr2.device.name == device_name
assert arr2.dtype == chainerx.float32
chainerx.testing.assert_array_equal(
arr2,
chainerx.array([1, 2], chainerx.float32))
# TODO(niboshi): Add deepcopy test with arbitrary context
@pytest.mark.parametrize_device(['native:0', 'native:1', 'cuda:0'])
def test_array_deepcopy(device):
arr = chainerx.array([1, 2], chainerx.float32, device=device)
arr2 = copy.deepcopy(arr)
assert isinstance(arr2, chainerx.ndarray)
assert arr2.device is device
assert arr2.dtype == chainerx.float32
chainerx.testing.assert_array_equal(
arr2,
chainerx.array([1, 2], chainerx.float32))
def test_is_chained():
arr = chainerx.array([1, 2], chainerx.float32)
with pytest.raises(chainerx.ChainerxError):
arr._is_chained()
arr.require_grad()
assert not arr._is_chained()
arr2 = 2 * arr
assert arr2._is_chained()
| 25,885
| 33.560748
| 79
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/test_backprop_mode.py
|
import pytest
import chainerx
def test_no_backprop_mode():
with chainerx.backprop_scope('bp1') as bp1, \
chainerx.backprop_scope('bp2') as bp2:
assert chainerx.is_backprop_required()
assert chainerx.is_backprop_required(bp1)
assert chainerx.is_backprop_required(bp2)
with chainerx.no_backprop_mode():
assert not chainerx.is_backprop_required()
assert not chainerx.is_backprop_required(bp1)
assert not chainerx.is_backprop_required(bp2)
assert chainerx.is_backprop_required()
assert chainerx.is_backprop_required(bp1)
assert chainerx.is_backprop_required(bp2)
with chainerx.no_backprop_mode(chainerx.get_default_context()):
assert not chainerx.is_backprop_required()
assert not chainerx.is_backprop_required(bp1)
assert not chainerx.is_backprop_required(bp2)
assert chainerx.is_backprop_required()
assert chainerx.is_backprop_required(bp1)
assert chainerx.is_backprop_required(bp2)
with chainerx.no_backprop_mode(bp1):
assert chainerx.is_backprop_required()
assert not chainerx.is_backprop_required(bp1)
assert chainerx.is_backprop_required(bp2)
assert chainerx.is_backprop_required()
assert chainerx.is_backprop_required(bp1)
assert chainerx.is_backprop_required(bp2)
with chainerx.no_backprop_mode((bp1, bp2)):
assert chainerx.is_backprop_required()
assert not chainerx.is_backprop_required(bp1)
assert not chainerx.is_backprop_required(bp2)
assert chainerx.is_backprop_required()
assert chainerx.is_backprop_required(bp1)
assert chainerx.is_backprop_required(bp2)
def test_force_backprop_mode():
with chainerx.backprop_scope('bp1') as bp1, \
chainerx.backprop_scope('bp2') as bp2:
with chainerx.no_backprop_mode():
assert not chainerx.is_backprop_required()
assert not chainerx.is_backprop_required(bp1)
assert not chainerx.is_backprop_required(bp2)
with chainerx.force_backprop_mode():
assert chainerx.is_backprop_required()
assert chainerx.is_backprop_required(bp1)
assert chainerx.is_backprop_required(bp2)
assert not chainerx.is_backprop_required()
assert not chainerx.is_backprop_required(bp1)
assert not chainerx.is_backprop_required(bp2)
with chainerx.force_backprop_mode(chainerx.get_default_context()):
assert chainerx.is_backprop_required()
assert chainerx.is_backprop_required(bp1)
assert chainerx.is_backprop_required(bp2)
assert not chainerx.is_backprop_required()
assert not chainerx.is_backprop_required(bp1)
assert not chainerx.is_backprop_required(bp2)
with chainerx.force_backprop_mode(bp1):
assert not chainerx.is_backprop_required()
assert chainerx.is_backprop_required(bp1)
assert not chainerx.is_backprop_required(bp2)
assert not chainerx.is_backprop_required()
assert not chainerx.is_backprop_required(bp1)
assert not chainerx.is_backprop_required(bp2)
with chainerx.force_backprop_mode((bp1, bp2)):
assert not chainerx.is_backprop_required()
assert chainerx.is_backprop_required(bp1)
assert chainerx.is_backprop_required(bp2)
assert not chainerx.is_backprop_required()
assert not chainerx.is_backprop_required(bp1)
assert not chainerx.is_backprop_required(bp2)
with chainerx.force_backprop_mode():
assert chainerx.is_backprop_required()
assert chainerx.is_backprop_required(bp1)
assert chainerx.is_backprop_required(bp2)
def test_is_backprop_required():
current_context = chainerx.get_default_context()
another_context = chainerx.Context()
with chainerx.backprop_scope('bp1') as bp1, \
chainerx.backprop_scope('bp2') as bp2:
with chainerx.no_backprop_mode():
with chainerx.force_backprop_mode(bp1):
assert not chainerx.is_backprop_required()
assert chainerx.is_backprop_required(bp1)
assert not chainerx.is_backprop_required(bp2)
assert not chainerx.is_backprop_required(
context=current_context)
assert chainerx.is_backprop_required(context=another_context)
with pytest.raises(TypeError):
chainerx.is_backprop_required(context='foo')
| 4,743
| 42.522936
| 78
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/test_graph.py
|
import chainerx
def test_anygraph():
assert hasattr(chainerx, 'anygraph')
| 80
| 12.5
| 40
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/test_backward.py
|
import chainerx
import pytest
def _assert_arrays_equal(array1, array2):
if array1 is None:
assert array1 == array2
else:
assert array1.dtype == array2.dtype
assert array1.shape == array2.shape
assert array1._debug_flat_data == array2._debug_flat_data
def _check_backward(fprop, xs, expected_gxs, gys=None, backprop_id=None):
# Checks for test validity.
assert callable(fprop)
assert isinstance(xs, tuple)
assert isinstance(expected_gxs, tuple)
assert len(xs) == len(expected_gxs)
assert all([isinstance(a, chainerx.ndarray) for a in xs])
assert all([isinstance(a, chainerx.ndarray) or a is None
for a in expected_gxs])
# Forward.
ys = fprop(*xs)
# Set output gradients.
if gys is not None:
assert len(gys) == len(ys)
for y, gy in zip(ys, gys):
assert not y.is_grad_required()
y.set_grad(gy, backprop_id)
# Backward.
chainerx.backward(ys, backprop_id)
# Check gradients of input arrays.
for x, expected_gx in zip(xs, expected_gxs):
if expected_gx is None:
with pytest.raises(chainerx.ChainerxError):
x.get_grad(backprop_id)
else:
gx = x.get_grad(backprop_id)
_assert_arrays_equal(gx, expected_gx)
# Check gradients of output arrays.
if gys is None:
gys = (None,) * len(xs)
for y, gy in zip(ys, gys):
if gy is None:
assert not y.is_grad_required(backprop_id)
with pytest.raises(chainerx.ChainerxError):
y.get_grad(backprop_id)
else:
assert y.is_grad_required(backprop_id)
_assert_arrays_equal(gy, y.get_grad(backprop_id))
def _check_grad(
fprop, xs, expected_gxs, gys=None, backprop_id=None, xs_indices=None,
ys_indices=None, grad_outputs=[],
set_grad=False, retain_grad=False):
# Checks for test validity.
assert callable(fprop)
assert isinstance(xs, tuple)
assert isinstance(expected_gxs, tuple)
assert all([isinstance(a, chainerx.ndarray) for a in xs])
assert all([isinstance(a, chainerx.ndarray) or a is None
for a in expected_gxs])
# Forward.
ys = fprop(*xs)
# Set output gradients.
if gys is not None:
assert len(gys) == len(ys)
for y, gy in zip(ys, gys):
assert not y.is_grad_required()
y.set_grad(gy, backprop_id)
# Backward using grad.
initial_gxs = [
x.get_grad(backprop_id) if x.is_grad_required(backprop_id)
else chainerx.ChainerxError for x in xs]
if xs_indices is not None:
actual_xs = tuple([xs[i] for i in xs_indices])
assert len(actual_xs) == len(expected_gxs)
else:
actual_xs = xs
if ys_indices is not None:
actual_ys = tuple([ys[i] for i in ys_indices])
else:
actual_ys = ys
gxs = chainerx.grad(actual_ys, actual_xs, backprop_id,
grad_outputs=grad_outputs,
set_grad=set_grad, retain_grad=retain_grad)
# Check gradients.
for gx, expected_gx in zip(gxs, expected_gxs):
_assert_arrays_equal(gx, expected_gx)
# Check gradients of output arrays.
if gys is None:
gys = (None,) * len(xs)
for y, gy in zip(ys, gys):
if gy is None:
assert not y.is_grad_required(backprop_id)
with pytest.raises(chainerx.ChainerxError):
y.get_grad(backprop_id)
else:
assert y.is_grad_required(backprop_id)
_assert_arrays_equal(gy, y.get_grad(backprop_id))
if set_grad:
for x, gx in zip(xs, gxs):
_assert_arrays_equal(gx, x.get_grad(backprop_id))
else:
# Check initial gradients of inputs and that they are not modified.
for x, initial_gx in zip(xs, initial_gxs):
if initial_gx is chainerx.ChainerxError:
assert not x.is_grad_required(backprop_id)
with pytest.raises(chainerx.ChainerxError):
x.get_grad(backprop_id)
else:
assert x.is_grad_required(backprop_id)
_assert_arrays_equal(initial_gx, x.get_grad(backprop_id))
# Delegates work to either _check_backward or _check_grad.
def _check_backprop(
method, fprop, xs, expected_gxs, gys=None, backprop_id=None):
if method == 'backward':
check_func = _check_backward
elif method == 'grad':
check_func = _check_grad
else:
assert False
check_func(fprop, xs, expected_gxs, gys=gys, backprop_id=backprop_id)
def parametrize_backprop(argname='method'):
return pytest.mark.parametrize(argname, ['backward', 'grad'])
@parametrize_backprop()
def test_backprop_identity(method):
shape = (1,)
dtype = chainerx.float32
xs = (chainerx.full(shape, 5, dtype).require_grad(),)
expected_gxs = (chainerx.full(shape, 1, dtype),)
def fprop(x):
return x.copy(),
_check_backprop(method, fprop, xs, expected_gxs)
@parametrize_backprop()
def test_backprop_add(method):
shape = (1,)
dtype = chainerx.float32
xs = (
chainerx.full(shape, 3, dtype).require_grad(),
chainerx.full(shape, 5, dtype).require_grad(),)
expected_gxs = (
chainerx.full(shape, 1, dtype),
chainerx.full(shape, 1, dtype),)
def fprop(x0, x1):
return x0 + x1,
_check_backprop(method, fprop, xs, expected_gxs)
@parametrize_backprop()
def test_backprop_mul(method):
shape = (1,)
dtype = chainerx.float32
xs = (
chainerx.full(shape, 3, dtype).require_grad(),
chainerx.full(shape, 5, dtype).require_grad(),)
expected_gxs = (
chainerx.full(shape, 5, dtype),
chainerx.full(shape, 3, dtype),)
def fprop(x0, x1):
return x0 * x1,
_check_backprop(method, fprop, xs, expected_gxs)
@parametrize_backprop()
def test_backprop_add_mul(method):
shape = (1,)
dtype = chainerx.float32
xs = (
chainerx.full(shape, 2, dtype).require_grad(),
chainerx.full(shape, 9, dtype).require_grad(),
chainerx.full(shape, 5, dtype).require_grad(),)
expected_gxs = (
chainerx.full(shape, 14, dtype),
chainerx.full(shape, 2, dtype),
chainerx.full(shape, 2, dtype))
def fprop(x0, x1, x2):
return x0 * (x1 + x2),
_check_backprop(method, fprop, xs, expected_gxs)
@parametrize_backprop()
def test_backprop_add_mul_extra_inputs(method):
shape = (1,)
dtype = chainerx.float32
xs = (
chainerx.full(shape, 2, dtype).require_grad(),
chainerx.full(shape, 3, dtype).require_grad(),
chainerx.full(shape, 4, dtype))
expected_gxs = (
chainerx.full(shape, 7, dtype),
chainerx.full(shape, 2, dtype),
None)
def fprop(x0, x1, x2):
return x0 * (x1 + x2),
_check_backprop(method, fprop, xs, expected_gxs)
@parametrize_backprop()
def test_backprop_sole_array_node(method):
shape = (1,)
dtype = chainerx.float32
x = chainerx.full(shape, 2, dtype).require_grad()
expected_gx = chainerx.full(shape, 1, dtype)
if method == 'backward':
chainerx.backward(x)
gx = x.get_grad()
elif method == 'grad':
gx, = chainerx.grad([x], [x])
else:
assert False
_assert_arrays_equal(gx, expected_gx)
@parametrize_backprop()
def test_backprop_double_backprop(method):
shape = (1,)
dtype = chainerx.float32
xs = (
chainerx.full(shape, 2, dtype).require_grad(),
chainerx.full(shape, 3, dtype),)
expected_gxs = (
chainerx.full(shape, 2, dtype),
None,)
def fprop(x0, x1):
assert x0.is_grad_required()
h = x0 * (x0 + x1)
chainerx.backward(h, enable_double_backprop=True)
gx0 = x0.get_grad()
x0.cleargrad()
return gx0,
_check_backprop(method, fprop, xs, expected_gxs)
@parametrize_backprop('method0')
@parametrize_backprop('method1')
def test_backprop_multiple_graphs_double_backprop(method0, method1):
shape = (1,)
dtype = chainerx.float32
with chainerx.backprop_scope('bp_x1') as bp_x1, \
chainerx.backprop_scope('bp_x0') as bp_x0:
xs = (
chainerx.full(shape, 2, dtype).require_grad(bp_x0),
chainerx.full(shape, 3, dtype).require_grad(bp_x1),)
expected_gxs = (
None,
chainerx.full(shape, 2, dtype),)
def fprop(x0, x1):
assert x0.is_grad_required(bp_x0)
h = x0 * (x0 + x1)
if method0 == 'backward':
chainerx.backward(h, backprop_id=bp_x0)
gx0 = x0.get_grad(bp_x0)
elif method0 == 'grad':
gx0, = chainerx.grad([h], [x0], backprop_id=bp_x0)
else:
assert False
assert not gx0.is_backprop_required(bp_x0)
assert gx0.is_backprop_required(bp_x1)
return x0 * gx0,
_check_backprop(method1, fprop, xs, expected_gxs, backprop_id=bp_x1)
@parametrize_backprop()
def test_backprop_identical_input_to_multiple_ops(method):
shape = (1,)
dtype = chainerx.float32
xs = (
chainerx.full(shape, 2, dtype).require_grad(),
chainerx.full(shape, 3, dtype),)
expected_gxs = (
chainerx.full(shape, 7, dtype),
None,)
def fprop(x0, x1):
return x0 * (x0 + x1),
_check_backprop(method, fprop, xs, expected_gxs)
@parametrize_backprop()
def test_backprop_identical_inputs(method):
shape = (1,)
dtype = chainerx.float32
xs = (chainerx.full(shape, 2, dtype).require_grad(),)
expected_gxs = (chainerx.full(shape, 2, dtype),)
def fprop(x):
return x + x,
_check_backprop(method, fprop, xs, expected_gxs)
@parametrize_backprop()
def test_backprop_identical_intermediate_nodes(method):
shape = (1,)
dtype = chainerx.float32
xs = (chainerx.full(shape, 2, dtype).require_grad(),)
expected_gxs = (chainerx.full(shape, 4, dtype),)
def fprop(x):
h = x + x
return h + h,
_check_backprop(method, fprop, xs, expected_gxs)
@parametrize_backprop()
def test_backprop_given_input_grad(method):
shape = (1,)
dtype = chainerx.float32
xs = (chainerx.full(shape, 1, dtype).require_grad(),)
expected_gx_value = 2 if method == 'backward' else 1
expected_gxs = (chainerx.full(shape, expected_gx_value, dtype),)
def fprop(x):
x.set_grad(chainerx.full(shape, 1, dtype))
return x.copy(),
_check_backprop(method, fprop, xs, expected_gxs)
@parametrize_backprop()
def test_backprop_given_output_grad(method):
shape = (1,)
dtype = chainerx.float32
xs = (
chainerx.full(shape, 2, dtype).require_grad(),
chainerx.full(shape, 3, dtype),)
expected_gxs = (
chainerx.full(shape, 6, dtype),
None,)
gys = (
chainerx.full(shape, 2, dtype),)
def fprop(x0, x1):
return x0 * x1,
_check_backprop(method, fprop, xs, expected_gxs, gys=gys)
@parametrize_backprop()
def test_backprop_multiple_graphs_basic(method):
shape = (1,)
dtype = chainerx.float32
with chainerx.backprop_scope('bp1') as backprop_id1, \
chainerx.backprop_scope('bp2') as backprop_id2:
xs = (
chainerx.full(shape, 2, dtype).require_grad(backprop_id1),
chainerx.full(shape, 5, dtype).require_grad(backprop_id2),)
expected_gxs = (
chainerx.full(shape, 5, dtype),
None,)
def fprop(x0, x1):
return x0 * x1,
_check_backprop(
method, fprop, xs, expected_gxs, backprop_id=backprop_id1)
@parametrize_backprop()
def test_backprop_multiple_graphs_non_existing(method):
shape = (1,)
dtype = chainerx.float32
with chainerx.backprop_scope('bp1') as backprop_id1, \
chainerx.backprop_scope('bp2') as backprop_id2:
xs = (
chainerx.full(shape, 2, dtype).require_grad(backprop_id1),
chainerx.full(shape, 5, dtype).require_grad(backprop_id1),)
y = xs[0] * xs[1]
if method == 'backward':
chainerx.backward(y, backprop_id2)
assert xs[0].get_grad(backprop_id1) is None
assert xs[1].get_grad(backprop_id1) is None
elif method == 'grad':
grads = chainerx.grad([y], xs, backprop_id2)
assert len(grads) == 2
assert grads[0] is None
assert grads[1] is None
else:
assert False
with pytest.raises(chainerx.ChainerxError):
xs[0].get_grad(backprop_id2)
with pytest.raises(chainerx.ChainerxError):
xs[1].get_grad(backprop_id2)
@parametrize_backprop('method0')
@parametrize_backprop('method1')
@parametrize_backprop('method2')
def test_backprop_multiple_graphs_reuse(method0, method1, method2):
shape = (1,)
dtype = chainerx.float32
def fprop(x0, x1):
return x0 * x1,
with chainerx.backprop_scope('bp2') as backprop_id2, \
chainerx.backprop_scope('bp1') as backprop_id1:
xs = (
chainerx.full(shape, 2, dtype).require_grad(backprop_id1),
chainerx.full(shape, 5, dtype).require_grad(backprop_id2),)
expected_gxs = (
chainerx.full(shape, 5, dtype),
None,)
_check_backprop(
method0, fprop, xs, expected_gxs, backprop_id=backprop_id1)
x1, x2 = xs
x1.cleargrad(backprop_id1)
x2.cleargrad(backprop_id2)
assert x1.get_grad(backprop_id1) is None
assert x2.get_grad(backprop_id2) is None
expected_gxs = (
None,
chainerx.full(shape, 2, dtype),)
_check_backprop(
method1, fprop, xs, expected_gxs, backprop_id=backprop_id2)
x1.cleargrad(backprop_id1)
x2.cleargrad(backprop_id2)
x1.require_grad(backprop_id2)
x2.require_grad(backprop_id1)
expected_gxs = (
chainerx.full(shape, 5, dtype),
chainerx.full(shape, 2, dtype),)
_check_backprop(
method2, fprop, xs, expected_gxs, backprop_id=backprop_id2)
assert x1.get_grad(backprop_id1) is None
assert x2.get_grad(backprop_id1) is None
@parametrize_backprop()
def test_backprop_multiple_outputs(method):
shape = (1,)
dtype = chainerx.float32
xs = (
chainerx.full(shape, 3, dtype).require_grad(),
chainerx.full(shape, 5, dtype).require_grad(),)
expected_gxs = (
chainerx.full(shape, 6, dtype),
chainerx.full(shape, 4, dtype),)
def fprop(x0, x1):
return x0 + x1, x0 * x1
_check_backprop(method, fprop, xs, expected_gxs)
def test_create_and_release_backprop_id():
context = chainerx.Context()
backprop_id = context.make_backprop_id('bp1')
assert 'bp1' == backprop_id.name
assert context == backprop_id.context
context._check_valid_backprop_id(backprop_id)
context.release_backprop_id(backprop_id)
with pytest.raises(chainerx.ChainerxError):
context._check_valid_backprop_id(backprop_id)
@pytest.mark.parametrize('xs_indices', [[], [0], [1], [0, 1], [1, 0]])
@pytest.mark.parametrize('ys_indices', [[], [0], [1], [0, 1], [1, 0]])
def test_grad_not_all_inputs_outputs_in_graph(xs_indices, ys_indices):
shape = (1,)
dtype = chainerx.float32
xs = (
chainerx.full(shape, 3, dtype).require_grad(),
chainerx.full(shape, 5, dtype).require_grad(),)
gxs = (
(chainerx.full(shape, 1, dtype), # gy1gx1
chainerx.full(shape, 1, dtype)), # gy1gx2
(chainerx.full(shape, 5, dtype), # gy2gx1
chainerx.full(shape, 3, dtype)),) # gy2gx2
expected_gxs = [None] * len(xs_indices)
for ys_index in ys_indices:
for i, xs_index in enumerate(xs_indices):
if expected_gxs[i] is None:
expected_gxs[i] = chainerx.full(shape, 0, dtype)
expected_gxs[i] += gxs[ys_index][xs_index]
def fprop(x0, x1):
return x0 + x1, x0 * x1
_check_grad(
fprop, xs, tuple(expected_gxs), xs_indices=xs_indices,
ys_indices=ys_indices)
def test_grad_with_grad_outputs():
shape = (1,)
dtype = chainerx.float32
xs = (
chainerx.full(shape, 3, dtype).require_grad(),
chainerx.full(shape, 5, dtype).require_grad(),)
gysi = (
chainerx.full(shape, 10, dtype).require_grad(),
chainerx.full(shape, 20, dtype).require_grad(),)
expected_gxs = (
chainerx.full(shape, 110, dtype),
chainerx.full(shape, 70, dtype),)
def fprop(x0, x1):
return x0 + x1, x0 * x1
_check_grad(fprop, xs, expected_gxs, grad_outputs=gysi)
def test_grad_with_invalid_grad_outputs():
shape = (1,)
dtype = chainerx.float32
xs = (
chainerx.full(shape, 3, dtype).require_grad(),
chainerx.full(shape, 5, dtype).require_grad(),)
gysi = (
chainerx.full(shape, 10, dtype).require_grad(),)
expected_gxs = (
chainerx.full(shape, 110, dtype),
chainerx.full(shape, 70, dtype),)
def fprop(x0, x1):
return x0 + x1, x0 * x1
with pytest.raises(chainerx.GradientError):
_check_grad(fprop, xs, expected_gxs, grad_outputs=gysi)
def test_grad_with_set_grad():
shape = (1,)
dtype = chainerx.float32
xs = (
chainerx.full(shape, 3, dtype).require_grad(),
chainerx.full(shape, 5, dtype).require_grad(),)
expected_gxs = (
chainerx.full(shape, 6, dtype),
chainerx.full(shape, 4, dtype),)
def fprop(x0, x1):
return x0 + x1, x0 * x1
_check_grad(fprop, xs, expected_gxs, set_grad=True)
def test_grad_with_retain_grad():
shape = (1,)
backprop_id = None
dtype = chainerx.float32
xs = (
chainerx.full(shape, 3, dtype).require_grad(),
chainerx.full(shape, 5, dtype).require_grad(),)
expected_gxs = (
chainerx.full(shape, 4, dtype),)
# This test can't use _check_grad
# because when using a forward function
# it is not possible to easily expose the intermediate
# values of the graph to verify the gradients
a = xs[0] * 2
b = a + xs[1]
c = a + b
expected_retain = (
chainerx.full(shape, 1, dtype),
chainerx.full(shape, 2, dtype),)
gxs = chainerx.grad([c], xs, backprop_id, retain_grad=True)
# Check gradients.
for gx, expected_gx in zip(gxs, expected_gxs):
_assert_arrays_equal(gx, expected_gx)
_assert_arrays_equal(expected_retain[0], b.get_grad(backprop_id))
_assert_arrays_equal(expected_retain[1], a.get_grad(backprop_id))
| 18,821
| 27.561457
| 77
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/testing_tests/test_device_buffer.py
|
import pytest
import chainerx
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
# TODO(niboshi): This test causes segv with CUDA device in certain situations.
# Fix it and remove skip.
@pytest.mark.skip()
def test_device_buffer(device):
buf = chainerx.testing._DeviceBuffer(
[1, 2, 3, 4, 5, 6], (2, 3), chainerx.float32, device)
mv = memoryview(buf)
assert mv.format == 'f'
assert mv.itemsize == 4
assert mv.contiguous
assert not mv.f_contiguous
assert not mv.readonly
assert mv.ndim == 2
assert mv.shape == (2, 3)
assert mv.strides == (12, 4)
assert mv.tolist() == [[1, 2, 3], [4, 5, 6]]
| 650
| 27.304348
| 78
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/testing_tests/test_helper.py
|
import numpy
import pytest
import chainerx
import chainerx.testing
class FooError(Exception):
pass
class BarError(Exception):
pass
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('np_result, chx_result', [
(1.0, 1.0),
(numpy.full((1,), 1.0, numpy.float32),
chainerx.full((1,), 1.0, chainerx.float32)),
])
def test_numpy_chainerx_array_equal_both_return_nonarray(
xp, np_result, chx_result):
if xp is numpy:
return np_result
else:
return chx_result
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('np_result, chx_result', [
(None, None), # Both return None
(None, chainerx.full((1,), 1.0, chainerx.float32)), # NumPy returns None
(numpy.full((1,), 1.0, numpy.float32), None), # ChainerX returns None
(numpy.full((1,), 1.0, numpy.float32), chainerx.full(
(1,), 2.0, chainerx.float32)), # Value mismatch
# NumPy returns non-array
(1.0, chainerx.full((1,), 1.0, chainerx.float64)),
(numpy.full((1,), 1.0, numpy.float64), 1.0), # ChainerX returns non-array
(1.0, 1), # Scalar type mismatch
(numpy.int64(1), numpy.int64(1)), # ChainerX returns NumPy scalar
(numpy.full((1,), 1.0, numpy.float64), numpy.full(
(1,), 1.0, numpy.float64)), # Both return NumPy array
(chainerx.full((1,), 1.0, chainerx.float64), chainerx.full(
(1,), 1.0, chainerx.float64)), # Both return ChainerX array
(chainerx.full((1,), 1.0, chainerx.float64), numpy.full(
(1,), 1.0, numpy.float64)), # Return arrays wrong way around
(numpy.full((1,), 1.0, numpy.float64), chainerx.full(
(1,), 1.0, chainerx.float32)), # Dtype mismatch
(numpy.full((1,), 1.0, numpy.float64), chainerx.full(
(), 1.0, chainerx.float32)), # Shape mismatch
# Strides mismatch
(numpy.array(
[[0, 1, 2, 0], [0, 3, 4, 0], [0, 0, 0, 0]], numpy.float32)[0:2, 1:3],
chainerx.array(
numpy.array(
[[0, 0, 0], [1, 2, 0], [3, 4, 0]], numpy.float32))[1:3, 0:2]),
])
def test_numpy_chainerx_array_equal_fail_invalid_return(
xp, np_result, chx_result):
if xp is numpy:
return np_result
else:
return chx_result
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_array_equal()
def test_numpy_chainerx_array_equal_fail_both_raise(xp):
if xp is numpy:
raise TypeError('NumPy error')
else:
raise TypeError('ChainerX error')
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_array_equal()
def test_numpy_chainerx_array_equal_fail_numpy_raise(xp):
if xp is numpy:
raise TypeError('NumPy error')
else:
return chainerx.full((1,), 1.0, chainerx.float32)
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_array_equal()
def test_numpy_chainerx_array_equal_fail_chainerx_raise(xp):
if xp is numpy:
return numpy.full((1,), 1.0, numpy.float32)
else:
raise TypeError('ChainerX error')
@chainerx.testing.numpy_chainerx_array_equal()
def test_numpy_chainerx_array_equal_parametrize_dtype(xp, dtype):
assert isinstance(dtype, str)
assert dtype in chainerx.testing.all_dtypes
if xp is numpy:
return numpy.full((1,), 1.0, dtype)
else:
return chainerx.full((1,), 1.0, dtype)
@chainerx.testing.numpy_chainerx_array_equal(dtype_check=False)
def test_numpy_chainerx_array_equal_dtype_check_disabled(xp):
if xp is numpy:
return numpy.full((1,), 1.0, numpy.float64)
else:
return chainerx.full((1,), 1.0, chainerx.float32)
@chainerx.testing.numpy_chainerx_array_equal(name='foo')
def test_numpy_chainerx_array_equal_name(foo):
if foo is numpy:
return numpy.full((1,), 1.0, numpy.float32)
else:
return chainerx.full((1,), 1.0, chainerx.float32)
@chainerx.testing.numpy_chainerx_array_equal(accept_error=FooError)
def test_numpy_chainerx_array_equal_accept_error(xp):
raise FooError()
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_array_equal(accept_error=FooError)
def test_numpy_chainerx_array_equal_fail_accept_error_differ(xp):
raise BarError()
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_array_equal(accept_error=FooError)
def test_numpy_chainerx_array_equal_fail_accept_error_only_numpy(xp):
if xp is numpy:
raise FooError()
else:
return chainerx.full((1,), 1.0, chainerx.float32)
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_array_equal(accept_error=FooError)
def test_numpy_chainerx_array_equal_fail_accept_error_only_chainerx(xp):
if xp is numpy:
return numpy.full((1,), 1.0, numpy.float32)
else:
raise FooError()
@chainerx.testing.numpy_chainerx_array_equal(accept_error=(FooError, BarError))
def test_numpy_chainerx_array_equal_accept_error_multiple(xp):
if xp is numpy:
raise FooError()
else:
raise BarError()
@chainerx.testing.numpy_chainerx_array_equal()
def test_numpy_chainerx_array_equal_nan(xp):
a = numpy.zeros((5, 3), numpy.float32)
if xp is numpy:
a[2, 1] = float('nan')
else:
a[2, 1] = float('nan')
return xp.array(a)
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_array_equal()
def test_numpy_chainerx_array_equal_fail_nan_inf(xp):
a = numpy.zeros((5, 3), numpy.float32)
if xp is numpy:
a[2, 1] = float('nan')
else:
a[2, 1] = float('inf')
return xp.array(a)
@chainerx.testing.numpy_chainerx_array_equal()
def test_numpy_chainerx_array_equal_accept_ignore(xp):
return chainerx.testing.ignore()
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_array_equal()
def test_numpy_chainerx_array_equal_fail_numpy_ignore(xp):
if xp is numpy:
return chainerx.testing.ignore()
else:
return xp.arange(10)
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_array_equal()
def test_numpy_chainerx_array_equal_fail_chainerx_ignore(xp):
if xp is numpy:
return xp.arange(10)
else:
return chainerx.testing.ignore()
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_array_equal()
def test_numpy_chainerx_array_equal_fail_ignore_none(xp):
if xp is numpy:
return chainerx.testing.ignore()
else:
return None
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_array_equal()
def test_numpy_chainerx_array_equal_fail_ignore_error(xp):
if xp is numpy:
return chainerx.testing.ignore()
else:
raise FooError()
@chainerx.testing.numpy_chainerx_allclose()
def test_numpy_chainerx_allclose_exact(xp):
assert xp is numpy or xp is chainerx
a = numpy.zeros((5, 3), numpy.float32)
if xp is numpy:
a[2, 1] = 1.0
else:
a[2, 1] = 1.0
return xp.array(a)
@chainerx.testing.numpy_chainerx_allclose()
def test_numpy_chainerx_allclose_close(xp):
a = numpy.zeros((5, 3), numpy.float32)
if xp is numpy:
a[2, 1] = 1.0
else:
a[2, 1] = 1.0 + 5e-8
return xp.array(a)
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_allclose()
def test_numpy_chainerx_allclose_fail_not_close(xp):
a = numpy.zeros((5, 3), numpy.float32)
if xp is numpy:
a[2, 1] = 1.0
else:
a[2, 1] = 1.0 + 2e-7
return xp.array(a)
@chainerx.testing.numpy_chainerx_allclose()
def test_numpy_chainerx_allclose_close2(xp):
a = numpy.zeros((5, 3), numpy.float32)
if xp is numpy:
a[2, 1] = 1e8
else:
a[2, 1] = 1e8 + 5
return xp.array(a)
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_allclose()
def test_numpy_chainerx_allclose_fail_not_close2(xp):
a = numpy.zeros((5, 3), numpy.float32)
if xp is numpy:
a[2, 1] = 1e8
else:
a[2, 1] = 1e8 + 20
return xp.array(a)
@chainerx.testing.numpy_chainerx_allclose(rtol=1e-2, atol=0)
def test_numpy_chainerx_allclose_rtol(xp):
a = numpy.zeros((5, 3), numpy.float32)
if xp is numpy:
a[2, 1] = 1e8
else:
a[2, 1] = 1e8 + 5e5
return xp.array(a)
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_allclose(rtol=1e-2, atol=0)
def test_numpy_chainerx_allclose_fail_rtol(xp):
a = numpy.zeros((5, 3), numpy.float32)
if xp is numpy:
a[2, 1] = 1e8
else:
a[2, 1] = 1e8 + 2e6
return xp.array(a)
@chainerx.testing.numpy_chainerx_allclose(rtol=0, atol=1e2)
def test_numpy_chainerx_allclose_atol(xp):
a = numpy.zeros((5, 3), numpy.float32)
if xp is numpy:
a[2, 1] = 1e8
else:
a[2, 1] = 1e8 + 5e1
return xp.array(a)
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_allclose(rtol=0, atol=1e2)
def test_numpy_chainerx_allclose_fail_atol(xp):
a = numpy.zeros((5, 3), numpy.float32)
if xp is numpy:
a[2, 1] = 1e8
else:
a[2, 1] = 1e8 + 2e2
return xp.array(a)
@chainerx.testing.numpy_chainerx_allclose()
def test_numpy_chainerx_allclose_nan(xp):
a = numpy.zeros((5, 3), numpy.float32)
if xp is numpy:
a[2, 1] = float('nan')
else:
a[2, 1] = float('nan')
return xp.array(a)
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_allclose(equal_nan=False)
def test_numpy_chainerx_allclose_fail_nan_disabled(xp):
a = numpy.zeros((5, 3), numpy.float32)
if xp is numpy:
a[2, 1] = float('nan')
else:
a[2, 1] = float('nan')
return xp.array(a)
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_allclose()
def test_numpy_chainerx_allclose_fail_nan_inf(xp):
a = numpy.zeros((5, 3), numpy.float32)
if xp is numpy:
a[2, 1] = float('nan')
else:
a[2, 1] = float('inf')
return xp.array(a)
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_allclose()
def test_numpy_chainerx_allclose_fail_both_numpy(xp):
if xp is numpy:
return numpy.full((1,), 1.0, numpy.float64)
else:
return numpy.full((1,), 1.0, numpy.float64)
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_allclose()
def test_numpy_chainerx_allclose_fail_both_chainerx(xp):
if xp is numpy:
return chainerx.full((1,), 1.0, chainerx.float64)
else:
return chainerx.full((1,), 1.0, chainerx.float64)
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_allclose()
def test_numpy_chainerx_allclose_fail_wrong_way_around(xp):
if xp is numpy:
return chainerx.full((1,), 1.0, chainerx.float64)
else:
return numpy.full((1,), 1.0, numpy.float64)
@chainerx.testing.numpy_chainerx_allclose(name='foo')
def test_numpy_chainerx_allclose_name(foo):
if foo is numpy:
return numpy.full((1,), 1.0, numpy.float64)
else:
return chainerx.full((1,), 1.0, chainerx.float64)
@chainerx.testing.numpy_chainerx_allclose(accept_error=FooError)
def test_numpy_chainerx_allclose_accept_error(xp):
raise FooError()
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_allclose(accept_error=FooError)
def test_numpy_chainerx_allclose_fail_accept_error_differ(xp):
raise BarError()
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_allclose(accept_error=FooError)
def test_numpy_chainerx_allclose_fail_accept_error_only_numpy(xp):
if xp is numpy:
raise FooError()
else:
return chainerx.full((1,), 1.0, chainerx.float32)
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_allclose(accept_error=FooError)
def test_numpy_chainerx_allclose_fail_accept_error_only_chainerx(xp):
if xp is numpy:
return numpy.full((1,), 1.0, numpy.float32)
else:
raise FooError()
@chainerx.testing.numpy_chainerx_allclose(accept_error=(FooError, BarError))
def test_numpy_chainerx_allclose_accept_error_multiple(xp):
if xp is numpy:
raise FooError()
else:
raise BarError()
@chainerx.testing.numpy_chainerx_allclose()
def test_numpy_chainerx_allclose_accept_ignore(xp):
return chainerx.testing.ignore()
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_allclose()
def test_numpy_chainerx_allclose_fail_numpy_ignore(xp):
if xp is numpy:
return chainerx.testing.ignore()
else:
return xp.arange(10)
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_allclose()
def test_numpy_chainerx_allclose_fail_chainerx_ignore(xp):
if xp is numpy:
return xp.arange(10)
else:
return chainerx.testing.ignore()
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_allclose()
def test_numpy_chainerx_allclose_fail_ignore_none(xp):
if xp is numpy:
return chainerx.testing.ignore()
else:
return None
@pytest.mark.xfail(strict=True)
@chainerx.testing.numpy_chainerx_allclose()
def test_numpy_chainerx_allclose_fail_ignore_error(xp):
if xp is numpy:
return chainerx.testing.ignore()
else:
raise FooError()
| 13,084
| 27.569869
| 79
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/testing_tests/test_array.py
|
import numpy
import pytest
import chainerx
import chainerx.testing
def _make_onehot_arrays(shape, dtype, value1, value2):
a = numpy.zeros(shape, dtype)
b = numpy.zeros(shape, dtype)
indices = list(numpy.ndindex(*shape))
a[indices[len(indices) // 2]] = value1
b[indices[len(indices) // 2]] = value2
return a, b
@pytest.mark.parametrize(
'dtype1,dtype2',
list(zip(chainerx.testing.all_dtypes, chainerx.testing.all_dtypes)) + [
(numpy.float32, numpy.int64), # arrays with different dtypes
])
@pytest.mark.parametrize('shape,transpose', [
((), False),
((0,), False),
((1,), False),
((2, 3), False),
((2, 3), True), # arrays with different strides
])
def test_assert_array_equal(shape, transpose, dtype1, dtype2):
np_a = numpy.arange(2, 2 + numpy.prod(shape)).astype(dtype1).reshape(shape)
if transpose:
np_b = numpy.empty(np_a.T.shape, dtype=dtype2).T
np_b[:] = np_a
else:
np_b = numpy.arange(2, 2 + numpy.prod(shape)
).astype(dtype2).reshape(shape)
chx_a = chainerx.array(np_a)
chx_b = chainerx.array(np_b)
# Test precondition checks
assert np_a.shape == np_b.shape
if transpose:
assert np_a.strides != np_b.strides, 'transpose=True is meaningless'
# Test checks
chainerx.testing.assert_array_equal(np_a, np_a) # np-np (same obj)
chainerx.testing.assert_array_equal(chx_a, chx_a) # chx-chx (same obj)
chainerx.testing.assert_array_equal(np_a, np_b) # np-np (diff. obj)
chainerx.testing.assert_array_equal(chx_a, chx_b) # chx-chx (diff. obj)
chainerx.testing.assert_array_equal(np_a, chx_b) # np-chx
chainerx.testing.assert_array_equal(chx_a, np_b) # chx-np
@pytest.mark.parametrize('shape', [(), (1,), (2, 3)])
def test_assert_array_equal_fail(shape, dtype):
a, b = _make_onehot_arrays(shape, dtype, 0, 2)
with pytest.raises(AssertionError):
chainerx.testing.assert_array_equal(a, b)
@pytest.mark.parametrize('value1,value2', [
(True, 1),
(True, 1.0),
(False, 0),
(False, 0.0),
(2.0, 2),
(numpy.int32(2), 2.0),
(float('nan'), numpy.float32('nan')),
])
def test_assert_array_equal_scalar(value1, value2):
chainerx.testing.assert_array_equal(value1, value2)
chainerx.testing.assert_array_equal(value2, value1)
@pytest.mark.parametrize('value1,value2', [
(2, 3),
(2.0, 3),
(True, 0),
(True, -1),
(False, 1),
(float('nan'), float('inf')),
])
def test_assert_array_equal_fail_scalar(value1, value2):
with pytest.raises(AssertionError):
chainerx.testing.assert_array_equal(value1, value2)
with pytest.raises(AssertionError):
chainerx.testing.assert_array_equal(value2, value1)
@pytest.mark.parametrize(
'dtype1,dtype2',
list(zip(chainerx.testing.all_dtypes, chainerx.testing.all_dtypes)) + [
(numpy.float32, numpy.int64), # arrays with different dtypes
])
@pytest.mark.parametrize('shape,transpose', [
((), False),
((0,), False),
((1,), False),
((2, 3), False),
((2, 3), True), # arrays with different strides
])
def test_assert_allclose(shape, transpose, dtype1, dtype2):
atol = 1e-3 if numpy.dtype('float16') in [dtype1, dtype2] else 1e-5
np_a = numpy.arange(2, 2 + numpy.prod(shape)).astype(dtype1).reshape(shape)
if transpose:
np_b = numpy.empty(np_a.T.shape, dtype=dtype2).T
np_b[:] = np_a
else:
np_b = numpy.arange(2, 2 + numpy.prod(shape)
).astype(dtype2).reshape(shape)
# Give some perturbation only if dtype is float
if np_a.dtype.kind in ('f', 'c'):
np_a += atol * 1e-1
if np_b.dtype.kind in ('f', 'c'):
np_b -= atol * 1e-1
chx_a = chainerx.array(np_a)
chx_b = chainerx.array(np_b)
# Test precondition checks
assert np_a.shape == np_b.shape
if transpose:
assert np_a.strides != np_b.strides, 'transpose=True is meaningless'
# Test checks
chainerx.testing.assert_allclose(np_a, np_a, atol=atol) # np-np (same obj)
chainerx.testing.assert_allclose(
chx_a, chx_a, atol=atol) # chx-chx (same obj)
chainerx.testing.assert_allclose(
np_a, np_b, atol=atol) # np-np (diff. obj)
chainerx.testing.assert_allclose(
chx_a, chx_b, atol=atol) # chx-chx (diff. obj)
chainerx.testing.assert_allclose(np_a, chx_b, atol=atol) # np-chx
chainerx.testing.assert_allclose(chx_a, np_b, atol=atol) # chx-np
@pytest.mark.parametrize('shape', [(), (1,), (2, 3)])
def test_assert_allclose_fail(shape, dtype):
a, b = _make_onehot_arrays(shape, dtype, 0, 2)
with pytest.raises(AssertionError):
chainerx.testing.assert_allclose(a, b)
@pytest.mark.parametrize('value1,value2', [
(True, 1),
(True, 1.0),
(False, 0),
(False, 0.0),
(2.0, 2),
(numpy.int32(2), 2.0),
(float('nan'), numpy.float32('nan')),
])
def test_assert_allclose_scalar(value1, value2):
chainerx.testing.assert_allclose(value1, value2)
chainerx.testing.assert_allclose(value2, value1)
@pytest.mark.parametrize('value1,value2', [
(2, 3),
(2.0, 3),
(True, 0),
(True, -1),
(False, 1),
(float('nan'), float('inf')),
])
def test_assert_allclose_fail_scalar(value1, value2):
with pytest.raises(AssertionError):
chainerx.testing.assert_allclose(value1, value2)
with pytest.raises(AssertionError):
chainerx.testing.assert_allclose(value2, value1)
def test_assert_allclose_fail_equal_nan():
chainerx.testing.assert_allclose(float('nan'), float('nan'))
with pytest.raises(AssertionError):
chainerx.testing.assert_allclose(
float('nan'), float('nan'), equal_nan=False)
shape = (2, 3)
dtype = numpy.float32
a, b = _make_onehot_arrays(shape, dtype, float('nan'), float('nan'))
chainerx.testing.assert_allclose(a, b)
with pytest.raises(AssertionError):
chainerx.testing.assert_allclose(a, b, equal_nan=False)
@pytest.mark.parametrize('shape', [(), (1,), (2, 3)])
def test_assert_allclose_exact(shape, dtype):
a, b = _make_onehot_arrays(shape, dtype, 1.0, 1.0)
chainerx.testing.assert_allclose(a, b)
def test_assert_allclose_close_default_tol():
dtype = numpy.float64
shape = (2, 3)
# small absolute error
a, b = _make_onehot_arrays(shape, dtype, 1.0, 1.0 + 5e-8)
chainerx.testing.assert_allclose(a, b)
# large absolute error
a, b = _make_onehot_arrays(shape, dtype, 1e8, 1e8 + 5)
chainerx.testing.assert_allclose(a, b)
# expected failure: small absolute error
a, b = _make_onehot_arrays(shape, dtype, 1.0, 1.0 + 2e-7)
with pytest.raises(AssertionError):
chainerx.testing.assert_allclose(a, b)
# expected failure: large absolute error
a, b = _make_onehot_arrays(shape, dtype, 1e8, 1e8 + 20)
with pytest.raises(AssertionError):
chainerx.testing.assert_allclose(a, b)
def test_assert_allclose_rtol(float_dtype):
dtype = float_dtype
shape = (2, 3)
# relative error < rtol
a, b = _make_onehot_arrays(shape, dtype, 1e4, 1e4 + 50)
chainerx.testing.assert_allclose(a, b, rtol=1e-2, atol=0)
# relative error > rtol
a, b = _make_onehot_arrays(shape, dtype, 1e4, 1e4 + 200)
with pytest.raises(AssertionError):
chainerx.testing.assert_allclose(a, b, rtol=1e-2, atol=0)
def test_assert_allclose_atol(float_dtype):
dtype = float_dtype
shape = (2, 3)
# absolute error < atol
a, b = _make_onehot_arrays(shape, dtype, 1e-3, 1e-3 + 1e-2)
chainerx.testing.assert_allclose(a, b, rtol=0, atol=2e-2)
# absolute error > atol
a, b = _make_onehot_arrays(shape, dtype, 1e-3, 1e-3 + 1e-2)
with pytest.raises(AssertionError):
chainerx.testing.assert_allclose(a, b, rtol=0, atol=5e-3)
def test_assert_array_equal_ex_fail_dtype():
shape = (3, 2)
dtype1 = numpy.float32
dtype2 = numpy.int64
a = numpy.arange(2, 2 + numpy.prod(shape)).astype(dtype1).reshape(shape)
b = a.astype(dtype2)
with pytest.raises(AssertionError):
chainerx.testing.assert_array_equal_ex(a, b)
with pytest.raises(AssertionError):
# strides_check does not affect dtype_check
chainerx.testing.assert_array_equal_ex(a, b, strides_check=False)
chainerx.testing.assert_array_equal_ex(a, b, dtype_check=False)
def test_assert_array_equal_ex_fail_strides():
shape = (3, 2)
dtype = numpy.float32
a = numpy.arange(2, 2 + numpy.prod(shape)).astype(dtype).reshape(shape)
b = numpy.empty(a.T.shape, dtype).T
b[:] = a
with pytest.raises(AssertionError):
chainerx.testing.assert_array_equal_ex(a, b)
chainerx.testing.assert_array_equal_ex(a, b, strides_check=False)
# dtype_check=False implies strides_check=False
chainerx.testing.assert_array_equal_ex(a, b, dtype_check=False)
| 8,908
| 31.753676
| 79
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/testing_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/testing_tests/test_dtypes.py
|
import numpy
import chainerx
import chainerx.testing
def test_dtype_lists():
# Each dtype list must be a subset of all_dtypes.
dtype_lists = [
chainerx.testing.all_dtypes,
chainerx.testing.float_dtypes,
chainerx.testing.signed_dtypes,
chainerx.testing.unsigned_dtypes,
chainerx.testing.integral_dtypes,
chainerx.testing.nonfloat_dtypes,
]
for dtype_list in dtype_lists:
assert isinstance(dtype_list, tuple)
assert all([
dtype in chainerx.testing.all_dtypes
for dtype in dtype_list])
# Check dtype kind
for dtype in chainerx.testing.all_dtypes:
is_float = dtype in chainerx.testing.float_dtypes
assert is_float == (numpy.dtype(dtype).kind in ('f', 'c'))
is_signed = dtype in chainerx.testing.signed_dtypes
assert is_signed == (numpy.dtype(dtype).kind in ('i', 'f', 'c'))
is_unsigned = dtype in chainerx.testing.unsigned_dtypes
assert is_unsigned == (numpy.dtype(dtype).kind == 'u')
is_integral = dtype in chainerx.testing.integral_dtypes
assert is_integral == (numpy.dtype(dtype).kind in ('i', 'u'))
is_nonfloat = dtype in chainerx.testing.nonfloat_dtypes
assert is_nonfloat == (numpy.dtype(dtype).kind != 'f')
@chainerx.testing.parametrize_dtype_specifier('spec')
def test_parametrize_dtype_specifier(spec):
assert numpy.dtype(spec).type.__name__ in chainerx.testing.all_dtypes
@chainerx.testing.parametrize_dtype_specifier(
'spec', dtypes=['int32', 'float64'])
def test_parametrize_dtype_specifier_with_dtypes(spec):
assert chainerx.dtype(spec).name in ('int32', 'float64')
@chainerx.testing.parametrize_dtype_specifier(
'spec', dtypes=[], additional_args=('foo',))
def test_parametrize_dtype_specifier_with_additional_args(spec):
assert spec == 'foo'
| 1,879
| 32.571429
| 73
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_activation.py
|
import random
import chainer
import numpy
import chainerx
from chainer import utils
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import op_utils
n_step_lstm_dtypes_valid = dtype_utils._permutate_dtype_mapping([
# Floats.
(('float16', ), ()),
(('float32', ), ()),
(('float64', ), ()),
])
# A special parameter object used to represent an unspecified argument.
class Unspecified(object):
pass
class IgnoreNumpyFloatingPointError(object):
def __enter__(self):
self.old_settings = numpy.seterr(all='ignore')
def __exit__(self, *args):
numpy.seterr(**self.old_settings)
class UnaryMathTestBase(object):
def setup(self):
in_dtype, = self.in_dtypes
if numpy.dtype(in_dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
if in_dtype == 'float16':
self.check_forward_options.update({'rtol': 1e-3, 'atol': 1e-3})
self.check_backward_options.update({'rtol': 1e-3, 'atol': 1e-3})
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 1e-2})
def generate_inputs(self):
in_dtype, = self.in_dtypes
if isinstance(self.input, numpy.ndarray):
return self.input.astype(in_dtype),
if self.input == 'random':
return array_utils.uniform(self.shape, in_dtype),
if isinstance(self.input, (bool, int, float)):
return numpy.full(self.shape, self.input, dtype=in_dtype),
assert False
def forward_xp(self, inputs, xp):
a, = inputs
# This cast was introduced in order to avoid decreasing precision.
# ex.) numpy.sqrt(x) becomes a float16 array where x is an int8 array.
a = dtype_utils.cast_if_numpy_array(xp, a, self.out_dtype)
with IgnoreNumpyFloatingPointError():
y = self.func(xp, a)
y = dtype_utils.cast_if_numpy_array(xp, y, self.out_dtype)
return y,
_in_out_float_dtypes_math_functions = [
# Float.
(('float16',), 'float16'),
(('float32',), 'float32'),
(('float64',), 'float64'),
]
_in_out_dtypes_math_functions = _in_out_float_dtypes_math_functions + [
# Signed int.
(('int8',), 'float32'),
(('int16',), 'float32'),
(('int32',), 'float32'),
(('int64',), 'float32'),
# Unsigned int.
(('uint8',), 'float32'),
# Bool.
(('bool_',), 'float32'),
]
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'batch_size,n_units', [
((2, 3)),
((2, 2)),
((3, 8)),
((4, 12)),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes, out_dtype', n_step_lstm_dtypes_valid)
])
))
class TestSLstm(op_utils.ChainerOpTest):
def setup(self):
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 5e-3, 'atol': 5e-2})
def generate_inputs(self):
batch_size = self.batch_size
n_units = self.n_units
c1 = array_utils.uniform((batch_size, n_units), self.in_dtypes[0])
c2 = array_utils.uniform((batch_size, n_units), self.in_dtypes[0])
x1 = array_utils.uniform((batch_size, 4 * n_units), self.in_dtypes[0])
x2 = array_utils.uniform((batch_size, 4 * n_units), self.in_dtypes[0])
return tuple([c1, c2, x1, x2])
def forward_chainerx(self, inputs):
c1, c2, x1, x2 = inputs
out = chainerx.slstm(c1, c2, x1, x2)
return out
def forward_chainer(self, inputs):
c1, c2, x1, x2 = inputs
out = chainer.functions.slstm(c1, c2, x1, x2)
return out
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': _in_out_dtypes_math_functions,
'input': [-2, 2],
'contiguous': [None, 'C'],
'alpha_range': [(-2.0, 0.0), 0.0, (0.0, 2.0), Unspecified],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': _in_out_float_dtypes_math_functions,
'input': [0, float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
'alpha_range': [(-2.0, 0.0), 0.0, (0.0, 2.0), Unspecified],
})
))
class TestClippedRelu(UnaryMathTestBase, op_utils.NumpyOpTest):
z = 0.75
def func(self, xp, a):
dtype = self.out_dtype
if xp is numpy:
y = utils.force_array(a.clip(0, self.z))
return numpy.asarray(y.astype(dtype))
return xp.clipped_relu(a, self.z)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape,axis': [
((5, 4), 0),
((5, 4), 1),
((5, 4), -1),
((5, 4), -2),
((5, 4, 3, 2), 0),
((5, 4, 3, 2), 1),
((5, 4, 3, 2), 2),
((5, 4, 3, 2), 3),
((5, 4, 3, 2), -1),
((5, 4, 3, 2), -2),
((5, 4, 3, 2), -3),
((5, 4, 3, 2), -4),
],
'in_dtypes,out_dtype': _in_out_dtypes_math_functions,
})
))
class TestCRelu(UnaryMathTestBase, op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
dodge_nondifferentiable = True
def generate_inputs(self):
in_dtype, = self.in_dtypes
a = array_utils.uniform(self.shape, in_dtype)
return a,
def func(self, xp, a):
if xp is numpy:
expected_former = numpy.maximum(a, 0)
expected_latter = numpy.maximum(-a, 0)
expected = numpy.concatenate(
(expected_former, expected_latter), axis=self.axis)
return expected
return xp.crelu(a, self.axis)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': _in_out_dtypes_math_functions,
'input': [-2, 2],
'contiguous': [None, 'C'],
'alpha_range': [(-2.0, 0.0), 0.0, (0.0, 2.0), Unspecified],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': _in_out_float_dtypes_math_functions,
'input': [0, float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
'alpha_range': [(-2.0, 0.0), 0.0, (0.0, 2.0), Unspecified],
})
))
class TestElu(UnaryMathTestBase, op_utils.NumpyOpTest):
def setup(self):
in_dtype, = self.in_dtypes
if isinstance(self.alpha_range, tuple):
l, u = self.alpha_range
self.alpha = random.uniform(l, u)
elif self.alpha_range is Unspecified:
self.alpha = 1.0
else:
self.alpha = self.alpha_range
if numpy.dtype(in_dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
if in_dtype == 'float16':
self.check_forward_options.update({'rtol': 1e-3, 'atol': 1e-3})
self.check_backward_options.update({'rtol': 2e-3, 'atol': 2e-3})
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 1e-2})
def func(self, xp, a):
if xp is numpy:
y = a.copy()
negzero_indices = y <= 0
y[negzero_indices] = self.alpha * numpy.expm1(y[negzero_indices])
return y
elif self.alpha_range is Unspecified:
return xp.elu(a)
else:
return xp.elu(a, self.alpha)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': _in_out_dtypes_math_functions,
'input': [-2, 2],
'contiguous': [None, 'C'],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': _in_out_float_dtypes_math_functions,
'input': [0, float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestRelu(UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
if xp is numpy:
return numpy.maximum(a, 0)
return xp.relu(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': _in_out_dtypes_math_functions,
'input': [0, -1, 1, -2, 2, 10],
'contiguous': [None, 'C'],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': _in_out_float_dtypes_math_functions,
'input': [0, float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestSigmoid(UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
if xp is numpy:
return numpy.asarray(
numpy.reciprocal(1 + numpy.exp(-a))).astype(a.dtype)
return xp.sigmoid(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': _in_out_dtypes_math_functions,
'input': [-2, 2],
'contiguous': [None, 'C'],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': _in_out_float_dtypes_math_functions,
'input': [0, float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestLeakyRelu(UnaryMathTestBase, op_utils.NumpyOpTest):
slope = 0.2
check_numpy_strides_compliance = False
def func(self, xp, a):
if xp is numpy:
expected = numpy.where(a >= 0, a, a * self.slope)
return expected
return xp.leaky_relu(a, self.slope)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': _in_out_dtypes_math_functions,
'input': [-2, 2],
'contiguous': [None, 'C'],
'beta_range': [(-2.0, -1.0), (1.0, 2.0), Unspecified],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': _in_out_float_dtypes_math_functions,
'input': [0, float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
'beta_range': [(-2.0, -1.0), (1.0, 2.0), Unspecified],
})
))
class TestSoftplus(UnaryMathTestBase, op_utils.NumpyOpTest):
def setup(self):
in_dtype, = self.in_dtypes
if isinstance(self.beta_range, tuple):
l, u = self.beta_range
self.beta = random.uniform(l, u)
elif self.beta_range is Unspecified:
self.beta = 1.0
else:
self.beta = self.beta_range
if numpy.dtype(in_dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
if in_dtype == 'float16':
self.check_forward_options.update({'rtol': 2e-3, 'atol': 2e-3})
self.check_backward_options.update({'rtol': 2e-3, 'atol': 2e-3})
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 1e-2})
def func(self, xp, a):
in_dtype, = self.in_dtypes
if xp is numpy:
ba = self.beta * a
beta_inv = 1.0 / self.beta
y = (numpy.fmax(ba, 0) +
numpy.log1p(numpy.exp(-numpy.fabs(ba)))) * beta_inv
return y
elif self.beta_range is Unspecified:
return xp.softplus(a)
else:
return xp.softplus(a, self.beta)
| 12,767
| 31
| 78
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_logic.py
|
import chainer
import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import op_utils
_expected_numeric_dtypes_comparison = [
(t1, t2) for (t1, t2), _ in dtype_utils.result_numeric_dtypes_two_arrays
]
_expected_float_dtypes_comparison = [
(t1, t2)
for (t1, t2), _ in dtype_utils.result_dtypes_two_arrays
if all([numpy.dtype(t).kind == 'f' for t in (t1, t2)])
]
_expected_all_dtypes_comparison = [
(t1, t2) for (t1, t2), _ in dtype_utils.result_comparable_dtypes_two_arrays
]
def _make_in_dtypes(number_of_in_params, dtypes):
return [((dtype,) * number_of_in_params) for dtype in dtypes]
def dropout(a, prob=0.5):
a = a * numpy.random.binomial(1, prob, a.shape)
# shape -> () crashes without
# below line.
return numpy.array(a)
_cmp_funcs = {
'equal': lambda a, b: a == b,
'not_equal': lambda a, b: a != b,
'greater': lambda a, b: a > b,
'greater_equal': lambda a, b: a >= b,
'less': lambda a, b: a < b,
'less_equal': lambda a, b: a <= b,
}
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# All dtypes
chainer.testing.product({
'dtypes': _expected_all_dtypes_comparison,
'inputs': [
([], []),
([True], [True]),
([True], [False]),
]
})
# Numeric dtypes
+ chainer.testing.product({
'dtypes': _expected_numeric_dtypes_comparison,
'inputs': [
([0], [0]),
([0], [-0]),
([0], [1]),
([0, 1, 2], [0, 1, 2]),
([1, 1, 2], [0, 1, 2]),
([0, 1, 2], [1, 2, 3]),
([[0, 1], [2, 3]], [[0, 1], [2, 3]]),
([[0, 1], [2, 3]], [[0, 1], [2, -2]]),
([[0, 1], [2, 3]], [[1, 2], [3, 4]]),
(0, [0]),
(1, [0]),
([], [0]),
([0], [[0, 1, 2], [3, 4, 5]]),
([[0], [1]], [0, 1, 2]),
([0.2], [0.2]),
([0.2], [-0.3]),
],
})
# Float dtypes
+ chainer.testing.product({
'dtypes': _expected_float_dtypes_comparison,
'inputs': [
([0., numpy.nan], [0., 1.]),
([0., numpy.nan], [0., numpy.nan]),
([0., numpy.inf], [0., 1.]),
([0., -numpy.inf], [0., 1.]),
([numpy.inf, 1.], [numpy.inf, 1.]),
([-numpy.inf, 1.], [-numpy.inf, 1.]),
([numpy.inf, 1.], [-numpy.inf, 1.]),
([numpy.inf, 1.], [-numpy.inf, numpy.nan]),
]
})
))
@chainer.testing.parameterize_pytest('cmp_func', sorted(_cmp_funcs.keys()))
# Ignore warnings from numpy for NaN comparisons.
@pytest.mark.filterwarnings('ignore:invalid value encountered in ')
class TestCmp(op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
a_object, b_object = self.inputs
a_dtype, b_dtype = self.dtypes
a = numpy.array(a_object, a_dtype)
b = numpy.array(b_object, b_dtype)
return a, b
def forward_xp(self, inputs, xp):
a, b = inputs
cmp_op = _cmp_funcs[self.cmp_func]
module_func = getattr(xp, self.cmp_func)
y1 = cmp_op(a, b)
y2 = cmp_op(b, a)
y3 = module_func(a, b)
y4 = module_func(b, a)
return y1, y2, y3, y4
@pytest.mark.parametrize('a_shape,b_shape', [
((2,), (3,)),
((2,), (2, 3)),
((1, 2, 3), (1, 2, 3, 4)),
])
@pytest.mark.parametrize('cmp_func', sorted(_cmp_funcs.keys()))
def test_cmp_invalid_shapes(cmp_func, a_shape, b_shape):
cmp_op = _cmp_funcs[cmp_func]
chx_cmp = getattr(chainerx, cmp_func)
def check(x, y):
with pytest.raises(chainerx.DimensionError):
cmp_op(x, y)
with pytest.raises(chainerx.DimensionError):
chx_cmp(x, y)
a = array_utils.create_dummy_ndarray(chainerx, a_shape, 'float32')
b = array_utils.create_dummy_ndarray(chainerx, b_shape, 'float32')
check(a, b)
check(b, a)
@pytest.mark.parametrize('cmp_func', sorted(_cmp_funcs.keys()))
def test_cmp_invalid_dtypes(cmp_func, numeric_dtype):
cmp_op = _cmp_funcs[cmp_func]
chx_cmp = getattr(chainerx, cmp_func)
def check(x, y):
with pytest.raises(chainerx.DtypeError):
cmp_op(x, y)
with pytest.raises(chainerx.DtypeError):
chx_cmp(x, y)
a = array_utils.create_dummy_ndarray(chainerx, (2, 3), 'bool_')
b = array_utils.create_dummy_ndarray(chainerx, (2, 3), numeric_dtype)
check(a, b)
check(b, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# All dtypes
chainer.testing.product({
'dtype': chainerx.testing.all_dtypes,
'input': [
[],
[True],
[False],
]
})
# Numeric dtypes
+ chainer.testing.product({
'dtype': chainerx.testing.numeric_dtypes,
'input': [
[0],
[1],
[0, 1, 2],
[[0, 1], [2, 0]],
],
})
# Float dtypes
+ chainer.testing.product({
'dtype': chainerx.testing.float_dtypes,
'input': [
[0.2],
[-0.3],
[0., numpy.nan],
[numpy.nan, numpy.inf],
[-numpy.inf, numpy.nan],
]
})
))
class TestLogicalNot(op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
return numpy.array(self.input, self.dtype),
def forward_xp(self, inputs, xp):
a, = inputs
b = xp.logical_not(a)
return b,
_binary_logical_params = \
chainer.testing.product({
'dtypes': _expected_all_dtypes_comparison,
'func': [
'logical_and', 'logical_or', 'logical_xor'
],
'inputs': [
([], []),
([True], [True]),
([True], [False]),
]
}) + chainer.testing.product({
'dtypes': _expected_numeric_dtypes_comparison,
'func': [
'logical_and', 'logical_or', 'logical_xor'
],
'inputs': [
([0], [0]),
([0], [-0]),
([0], [1]),
([0, 1, 2], [0, 1, 2]),
([1, 1, 2], [0, 1, 2]),
([0, 1, 2], [1, 2, 3]),
([[0, 1], [2, 3]], [[0, 1], [2, 3]]),
([[0, 1], [2, 3]], [[0, 1], [2, -2]]),
([[0, 1], [2, 3]], [[1, 2], [3, 4]]),
(0, [0]),
(1, [0]),
([], [0]),
([0], [[0, 1, 2], [3, 4, 5]]),
([[0], [1]], [0, 1, 2]),
([0.2], [0.2]),
([0.2], [-0.3]),
],
}) + chainer.testing.product({
'dtypes': _expected_float_dtypes_comparison,
'func': [
'logical_and', 'logical_or'
],
'inputs': [
([0., numpy.nan], [0., 1.]),
([0., numpy.nan], [0., numpy.nan]),
([0., numpy.inf], [0., 1.]),
([0., -numpy.inf], [0., 1.]),
([numpy.inf, 1.], [numpy.inf, 1.]),
([-numpy.inf, 1.], [-numpy.inf, 1.]),
([numpy.inf, 1.], [-numpy.inf, 1.]),
([numpy.inf, 1.], [-numpy.inf, numpy.nan]),
]
})
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
_binary_logical_params
))
# Ignore warnings from numpy for NaN comparisons.
@pytest.mark.filterwarnings('ignore:invalid value encountered in ')
class TestLogicalBinary(op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
a_object, b_object = self.inputs
a_dtype, b_dtype = self.dtypes
a = numpy.array(a_object, a_dtype)
b = numpy.array(b_object, b_dtype)
return a, b
def forward_xp(self, inputs, xp):
a, b = inputs
fn = getattr(xp, self.func)
y1 = fn(a, b)
y2 = fn(b, a)
return y1, y2
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape,axis': [
((), None),
((), ()),
((2,), None),
((2,), ()),
((2,), 0),
((2,), (0,)),
((2,), (-1,)),
((2, 3), None),
((2, 3), ()),
((2, 3), 0),
((2, 3), (0,)),
((2, 3), (1,)),
((2, 3), (-1,)),
((2, 3), (-2,)),
((2, 3), (0, 1)),
((2, 3), (-2, -1)),
((1, 3), None), # Reduce over 1-dim axis
((0, 3), None), # Reduce over 0-dim axis
# Reduce over axes that are in the middle or apart
((2, 3, 4), (1,)),
((2, 3, 4), (0, 2)),
# Reduce over axes that are apart and/or unsorted
((2, 3), (1, 0)),
((2, 3, 4), (2, 0)),
((2, 3, 4), (2, 0, 1)),
((2, 3, 4), (-2, 2, 0)),
],
'keepdims': [True, False],
'in_dtype':
_make_in_dtypes(1, chainerx.testing.all_dtypes),
'func': ['all', 'any'],
# With all zero,
# partially zero,
# all non-zero arrays
'probs': [0.0, 0.6, 1.0],
'is_module': [True, False],
})
))
class TestLogicalReductions(op_utils.NumpyOpTest):
def setup(self):
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
in_dtype, = self.in_dtype
a = numpy.random.normal(0, 1, self.shape)
a = dropout(a, self.probs).astype(in_dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
if self.is_module:
fn = getattr(xp, self.func)
y = fn(a, axis=self.axis, keepdims=self.keepdims)
else:
fn = getattr(a, self.func)
y = fn(axis=self.axis, keepdims=self.keepdims)
return y,
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('keepdims', [False, True])
@pytest.mark.parametrize('shape,axis', [
((), 1),
((), (1,)),
((2,), 2),
((2,), (2,)),
((2,), (-2,)),
((2, 3,), (-3,)),
((2, 3,), (-3, -4)),
((2, 3,), (0, 0)),
((2, 3,), (-1, -1)),
((2, 3,), (0, 1, 1)),
((2, 3,), (0, -2)),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('func', ['all', 'any'])
@pytest.mark.parametrize('is_module', [False, True])
def test_logical_reductions_invalid(func, is_module, xp, shape,
axis, keepdims, dtype, device):
a = array_utils.create_dummy_ndarray(xp, shape, dtype, device)
if is_module:
fn = getattr(xp, func)
fn(a, axis=axis, keepdims=keepdims)
else:
fn = getattr(a, func)
fn(axis=axis, keepdims=keepdims)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0), numpy.asarray(-1), numpy.asarray(
10), numpy.asarray(float('inf')), numpy.asarray(-float('inf')),
numpy.asarray(float('nan')), numpy.full(
(), 2), numpy.full((0,), 2), numpy.full((2, 3), 2)
])
def test_isnan(xp, device, input, dtype):
a = xp.array(input.astype(dtype))
return xp.isnan(a)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0), numpy.asarray(-1), numpy.asarray(
10), numpy.asarray(float('inf')), numpy.asarray(-float('inf')),
numpy.asarray(float('nan')), numpy.full(
(), 2), numpy.full((0,), 2), numpy.full((2, 3), 2)
])
def test_isinf(xp, device, input, dtype):
a = xp.array(input.astype(dtype))
return xp.isinf(a)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0), numpy.asarray(-1), numpy.asarray(
10), numpy.asarray(float('inf')), numpy.asarray(-float('inf')),
numpy.asarray(float('nan')), numpy.full(
(), 2), numpy.full((0,), 2), numpy.full((2, 3), 2)
])
def test_isfinite(xp, device, input, dtype):
a = xp.array(input.astype(dtype))
return xp.isfinite(a)
| 12,458
| 28.315294
| 79
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_normalization.py
|
import unittest
import chainer
import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import op_utils
def _create_batch_norm_ndarray_args(
xp, device, x_shape, gamma_shape, beta_shape, mean_shape, var_shape,
float_dtype):
x = array_utils.create_dummy_ndarray(xp, x_shape, float_dtype)
# Non-contiguous gamma and beta is not supported by CUDA.
# TODO(hvy): Support non-contiguous gamma and beta with CUDA. Create a
# contiguous copy in the cuDNN wrapper.
pad_gamma_beta = device.backend.name != 'cuda'
gamma = array_utils.create_dummy_ndarray(
xp, gamma_shape, float_dtype, padding=pad_gamma_beta)
beta = array_utils.create_dummy_ndarray(
xp, beta_shape, float_dtype, padding=pad_gamma_beta)
# Non-contiguous running values which are updated in-place are not
# supported by CUDA, so we only pad for other devices.
pad_running = device.backend.name != 'cuda'
mean = array_utils.create_dummy_ndarray(
xp, mean_shape, float_dtype, padding=pad_running)
var = array_utils.create_dummy_ndarray(
xp, var_shape, float_dtype, padding=pad_running, start=0)
# TODO(imanishi): Remove them after supporting random test
x /= x.size
gamma /= gamma.size
beta /= beta.size
mean /= mean.size
var /= var.size
return x, gamma, beta, mean, var
# Note that CUDA (cuDNN) only supports batch normalization with 4 or
# 5-dimensional data. Arrays with smaller dimensions are supported by the
# CUDA backend, while those with larger dimensions are not.
# x_shape,reduced_shape,axis
_batch_norm_params = [
((3, 2), (2,), None),
((5, 4, 3, 2), (4, 3, 2), None),
((5, 4, 3, 2), (4, 3, 2), (0,)),
((5, 4, 3, 2), (4,), (0, 2, 3)),
((5, 4, 3, 2, 2), (4, 3, 2, 2), None),
((5, 4, 3, 2, 2), (4, 3, 2, 2), (0,)),
((5, 4, 3, 2, 2), (4,), (0, 2, 3, 4))
]
# x_shape,gamma_shape,beta_shape,mean_shape,var_shape,axis
_batch_norm_invalid_dimensions_params = [
# Bad reduction, axis defaults to (0,) but should be (0, 2, 3).
((2, 3, 4, 5), (3,), (3,), (3,), (3,), None),
# Bad reduction, axis is () but should be (0, 2, 3).
((2, 3, 4, 5), (3,), (3,), (3,), (3,), ()),
# Bad reduction, axis is (2, 3) but should be (0, 2, 3).
((2, 3, 4, 5), (3,), (3,), (3,), (3,), (2, 3)),
((2, 3, 4, 5), (3, 4), (3,), (3,), (3,), (0, 2, 3)), # Bad gamma shape.
((2, 3, 4, 5), (3,), (3, 4), (3,), (3,), (0, 2, 3)), # Bad beta shape.
((2, 3, 4, 5), (3,), (3,), (3, 4), (3,), (0, 2, 3)), # Bad mean shape.
((2, 3, 4, 5), (3,), (3,), (3,), (3, 4), (0, 2, 3)), # Bad var shape.
]
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest(
'x_shape,reduced_shape,axis', _batch_norm_params)
@chainer.testing.parameterize_pytest(
'x_dtype', chainerx.testing.float_dtypes)
@chainer.testing.parameterize_pytest(
'param_dtype', chainerx.testing.float_dtypes)
@chainer.testing.parameterize_pytest('eps', [2e-5, 5e-1])
@chainer.testing.parameterize_pytest('decay', [None, 0.5])
@chainer.testing.parameterize_pytest('contiguous', [None, 'C'])
class TestBatchNorm(op_utils.ChainerOpTest):
def setup(self):
reduced_shape = self.reduced_shape
x_dtype = self.x_dtype
param_dtype = self.param_dtype
eps = self.eps
decay = self.decay
axis = self.axis
contiguous = self.contiguous
# - Non-contiguous running values which are updated in-place are not
# supported by CUDA.
# - Non-contiguous gamma and beta is not supported by CUDA.
# TODO(hvy): Support non-contiguous gamma and beta with CUDA. Create a
# contiguous copy in the cuDNN wrapper.
if (chainerx.get_default_device().backend.name == 'cuda'
and contiguous is None):
raise unittest.SkipTest(
'batch_norm with CUDA currently has limited support for '
'non-contiguous inputs.')
# BatchNorm is unstable for fp16 for both native and CUDA.
# TODO(hvy): Fix backward and double backward for fp16.
if x_dtype == 'float16' and param_dtype == 'float16':
self.skip_backward_test = True
self.skip_double_backward_test = True
self.running_mean = numpy.random.uniform(
-1, 1, reduced_shape).astype(param_dtype)
self.running_var = numpy.random.uniform(
0.1, 1, reduced_shape).astype(param_dtype)
optional_args = {}
if eps is not None:
optional_args['eps'] = eps
if decay is not None:
optional_args['decay'] = decay
if axis is not None:
optional_args['axis'] = axis
self.optional_args = optional_args
# TODO(hvy): Fix forward, backward and double backward for fp16.
if x_dtype == 'float16' or param_dtype == 'float16':
self.check_forward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_double_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
else:
self.check_forward_options.update({
'rtol': 1e-6, 'atol': 1e-5})
self.check_backward_options.update({
'rtol': 5e-3, 'atol': 5e-4})
self.check_double_backward_options.update({
'rtol': 5e-2, 'atol': 5e-3})
# Running values that are recorded in forward for similarity checks.
self.running_mean_chx = None
self.running_var_chx = None
self.running_mean_ch = None
self.running_var_ch = None
def generate_inputs(self):
x_shape = self.x_shape
reduced_shape = self.reduced_shape
x_dtype = self.x_dtype
param_dtype = self.param_dtype
x = numpy.random.uniform(-1, 1, x_shape).astype(x_dtype)
gamma = numpy.random.uniform(0.5, 1, reduced_shape).astype(param_dtype)
beta = numpy.random.uniform(-1, 1, reduced_shape).astype(param_dtype)
return x, gamma, beta,
def forward_chainerx(self, inputs):
x, gamma, beta = inputs
running_mean = chainerx.array(self.running_mean, copy=True)
running_var = chainerx.array(self.running_var, copy=True)
y = chainerx.batch_norm(
x, gamma, beta, running_mean=running_mean, running_var=running_var,
**self.optional_args)
# Record running values for later checks.
self.running_mean_chx = running_mean
self.running_var_chx = running_var
return y,
def forward_chainer(self, inputs):
x, gamma, beta = inputs
running_mean = self.running_mean.copy()
running_var = self.running_var.copy()
y = chainer.functions.batch_normalization(
x, gamma, beta, running_mean=running_mean, running_var=running_var,
**self.optional_args)
# Record running values for later checks.
self.running_mean_ch = running_mean
self.running_var_ch = running_var
return y,
def check_forward_outputs(self, outputs, expected_outputs):
super().check_forward_outputs(outputs, expected_outputs)
# Check that running values are updated.
if (self.x_dtype == 'float16'
or self.param_dtype == 'float16'):
check_running_options = {'rtol': 1e-1, 'atol': 1e-1}
else:
check_running_options = {'rtol': 1e-6, 'atol': 1e-5}
chainerx.testing.assert_allclose(
self.running_mean_chx, self.running_mean_ch,
**check_running_options)
chainerx.testing.assert_allclose(
self.running_var_chx, self.running_var_ch, **check_running_options)
@pytest.mark.parametrize(
'x_shape,gamma_shape,beta_shape,running_mean_shape,running_var_shape,axis',
_batch_norm_invalid_dimensions_params)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_batch_norm_invalid_dimensions(
device, x_shape, gamma_shape, beta_shape, running_mean_shape,
running_var_shape, axis, float_dtype):
x, gamma, beta, running_mean, running_var = (
_create_batch_norm_ndarray_args(
chainerx, device, x_shape, gamma_shape, beta_shape,
running_mean_shape, running_var_shape, float_dtype))
with pytest.raises(chainerx.DimensionError):
chainerx.batch_norm(
x, gamma, beta, running_mean=running_mean, running_var=running_var,
eps=1e-2, decay=0.9, axis=axis)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest(
'x_shape,reduced_shape,axis', _batch_norm_params)
@chainer.testing.parameterize_pytest(
'x_dtype', chainerx.testing.float_dtypes)
@chainer.testing.parameterize_pytest(
'param_dtype', chainerx.testing.float_dtypes)
@chainer.testing.parameterize_pytest('eps', [None, 3e-5, 1.2])
@chainer.testing.parameterize_pytest('contiguous', [None, 'C'])
class TestFixedBatchNorm(op_utils.ChainerOpTest):
# Backward and double backward for fixed_batch_norm is not supported yet.
skip_backward_test = True
skip_double_backward_test = True
def setup(self, float_dtype):
x_dtype = self.x_dtype
param_dtype = self.param_dtype
eps = self.eps
axis = self.axis
optional_args = {}
if eps is not None:
optional_args['eps'] = eps
if axis is not None:
optional_args['axis'] = axis
self.optional_args = optional_args
if x_dtype == 'float16' or param_dtype == 'float16':
self.check_forward_options.update({'rtol': 1e-1, 'atol': 1e-1})
else:
self.check_forward_options.update({'rtol': 1e-6, 'atol': 1e-5})
def generate_inputs(self):
x_shape = self.x_shape
reduced_shape = self.reduced_shape
x_dtype = self.x_dtype
param_dtype = self.param_dtype
x = numpy.random.uniform(-1, 1, x_shape).astype(x_dtype)
gamma = numpy.random.uniform(-1, 1, reduced_shape).astype(param_dtype)
beta = numpy.random.uniform(-1, 1, reduced_shape).astype(param_dtype)
mean = numpy.random.uniform(-1, 1, reduced_shape).astype(param_dtype)
var = numpy.random.uniform(0.1, 1, reduced_shape).astype(param_dtype)
return x, gamma, beta, mean, var
def forward_chainerx(self, inputs):
x, gamma, beta, mean, var = inputs
y = chainerx.fixed_batch_norm(
x, gamma, beta, mean=mean, var=var, **self.optional_args)
return y,
def forward_chainer(self, inputs):
x, gamma, beta, mean, var = inputs
y = chainer.functions.fixed_batch_normalization(
x, gamma, beta, mean=mean, var=var, **self.optional_args)
return y,
@pytest.mark.parametrize(
'x_shape,gamma_shape,beta_shape,mean_shape,var_shape,axis',
_batch_norm_invalid_dimensions_params)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_fixed_batch_norm_invalid_dimensions(
device, x_shape, gamma_shape, beta_shape, mean_shape, var_shape, axis,
float_dtype):
x, gamma, beta, mean, var = _create_batch_norm_ndarray_args(
chainerx, device, x_shape, gamma_shape, beta_shape, mean_shape,
var_shape, float_dtype)
with pytest.raises(chainerx.DimensionError):
chainerx.fixed_batch_norm(
x, gamma, beta, mean=mean, var=var, eps=1e-2, axis=axis)
| 11,574
| 36.95082
| 79
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_statistics.py
|
import chainer
import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import math_utils
from chainerx_tests import op_utils
def test_max_amax():
assert chainerx.amax is chainerx.max
_minmax_params = [
# --- single axis
# input, axis
(numpy.asarray(0), None),
(numpy.asarray(-1), None),
(numpy.asarray(float('inf')), None),
(numpy.asarray(float('nan')), None),
(numpy.asarray(-float('inf')), None),
(numpy.asarray([4, 1, 4, 1]), None),
(numpy.asarray([4, 1, 4, 1]), 0),
(numpy.asarray([[4, 4, 1, 1], [4, 1, 4, 1]]), 0),
(numpy.asarray([[4, 4, 1, 1], [4, 1, 4, 1]]).T, 1),
(numpy.asarray([-0.0, +0.0, +0.0, -0.0]), None),
(numpy.asarray([[True, True, False, False],
[True, False, True, False]]), 0),
(numpy.ones((2, 3)), 1),
(numpy.ones((2, 3)), -2),
# --- multiple axes
# input, axis
(numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (0, 1)),
(numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (-2, -1)),
]
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape,axis': [
((), None),
((4,), None),
((4,), 0),
((4, 2), None),
((4, 2), 0),
((4, 2), 1),
((4, 2), -2),
((4, 3), (0, 1)),
((4, 3), (-2, -1)),
],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.all_dtypes)),
'is_module': [True, False],
}) +
chainer.testing.product({
'array,axis': _minmax_params,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.all_dtypes)),
'is_module': [True, False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestMax(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def generate_inputs(self):
in_dtype, = self.in_dtypes
if hasattr(self, 'array'):
return self.array.astype(in_dtype),
return array_utils.uniform(self.shape, in_dtype),
def func(self, xp, a):
if self.is_module:
return xp.max(a, self.axis)
else:
return a.max(self.axis)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('array,axis', [
(numpy.ones((2, 3)), 2),
(numpy.ones((2, 3)), -3),
(numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (1, 1)),
(numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (-3, 1)),
(numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (1, 2)),
])
@pytest.mark.parametrize('dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('is_module', [True, False])
def test_max_invalid_shapes_and_axis(device, array, axis, dtype, is_module):
a = chainerx.array(array).astype(dtype)
with pytest.raises(chainerx.DimensionError):
if is_module:
chainerx.max(a, axis)
else:
a.max(axis)
def test_min_amin():
assert chainerx.amin is chainerx.min
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape,axis': [
((), None),
((4,), None),
((4,), 0),
((4, 2), None),
((4, 2), 0),
((4, 2), 1),
((4, 2), -2),
((4, 3), (0, 1)),
((4, 3), (-2, -1)),
],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.all_dtypes)),
'is_module': [True, False],
}) +
chainer.testing.product({
'array,axis': _minmax_params,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.all_dtypes)),
'is_module': [True, False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestMin(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def generate_inputs(self):
in_dtype, = self.in_dtypes
if hasattr(self, 'array'):
return self.array.astype(in_dtype),
return array_utils.uniform(self.shape, in_dtype),
def func(self, xp, a):
if self.is_module:
return xp.min(a, self.axis)
else:
return a.min(self.axis)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('array,axis', [
(numpy.ones((2, 3)), 2),
(numpy.ones((2, 3)), -3),
(numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (1, 1)),
(numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (-3, 1)),
(numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (1, 2)),
])
@pytest.mark.parametrize('dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('is_module', [True, False])
def test_min_invalid_shapes_and_axis(device, array, axis, dtype, is_module):
a = chainerx.array(array).astype(dtype)
with pytest.raises(chainerx.DimensionError):
if is_module:
chainerx.min(a, axis)
else:
a.min(axis)
_mean_var_params = \
chainer.testing.product({
'shape,axis': [
((), None),
(1, 0),
((2, 1, 3), (1, 2)),
((1, 1, 1), (0, 1, 2)),
((2, 3), None),
((1, 2, 3), (0, 2)),
((2, 2, 2, 2), (2, 1, 0)),
((1, 1, 1), (-1))],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': ['random'],
'contiguous': [None, 'C'],
}) + chainer.testing.product({
'shape,axis': [((2, 3), None)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [1.57, 2, 3.14, float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
_mean_var_params
))
class TestMean(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.mean(a, self.axis)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
_mean_var_params
))
class TestVar(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.var(a, self.axis)
def apply_func(is_module, func, xp, device, input, axis, dtypes):
(in_dtype,), out_dtype = dtypes
try:
a_np = input.astype(in_dtype)
except (ValueError, OverflowError):
return xp.zeros(()) # invalid combination of data and dtype
a = xp.array(a_np)
a = func(is_module, xp, a, axis)
if xp is numpy:
a = dtype_utils.cast_if_numpy_array(xp, a, out_dtype)
return a
def compute_mean(is_module, xp, a, axis):
return xp.mean(a, axis) if is_module else a.mean(axis)
def compute_var(is_module, xp, a, axis):
return xp.var(a, axis) if is_module else a.var(axis)
@chainerx.testing.numpy_chainerx_array_equal(strides_check=False)
@pytest.mark.parametrize('input,axis', [
# --- single axis
# input, axis
# valid params
(numpy.asarray(0), None),
(numpy.asarray(-1), None),
(numpy.asarray(float('inf')), None),
(numpy.asarray(float('nan')), None),
(numpy.asarray(-float('inf')), None),
(numpy.asarray([4, 1, 4, 1]), None),
(numpy.asarray([4, 1, 4, 1]), 0),
(numpy.asarray([[4, 4, 1, 1], [4, 1, 4, 1]]), 0),
(numpy.asarray([[4, 4, 1, 1], [4, 1, 4, 1]]).T, 1),
(numpy.asarray([-0.0, +0.0, +0.0, -0.0]), None),
(numpy.asarray([[True, True, False, False],
[True, False, True, False]]), 0),
(numpy.ones((2, 0, 3)), 2),
(numpy.ones((2, 3)), 1),
(numpy.ones((2, 3)), -2),
# --- multiple axes
# input, axis
# valid params
(numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (0, 1)),
(numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (-2, -1)),
])
@pytest.mark.parametrize('dtypes', math_utils.in_out_dtypes_math_functions)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('func', [
compute_mean,
compute_var,
])
# TODO(kshitij12345): Remove strides_check=False
def test_valid_stats(is_module, func, xp, device, input, axis, dtypes):
return apply_func(is_module, func, xp, device, input, axis, dtypes)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(IndexError, ValueError, chainerx.DimensionError),
strides_check=False)
@pytest.mark.parametrize('input,axis', [
# --- single axis
# input, axis
# invalid params
(numpy.ones((0,)), None),
(numpy.ones((2, 0, 3)), 1),
(numpy.ones((2, 0, 3)), None),
(numpy.ones((2, 3)), 2),
(numpy.ones((2, 3)), -3),
# --- multiple axes
# input, axis
# invalid params
(numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (1, 1)),
(numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (-3, 1)),
(numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (1, 2)),
])
@pytest.mark.parametrize('dtypes', math_utils.in_out_dtypes_math_functions)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('func', [
compute_mean,
compute_var,
])
# TODO(kshitij12345): Remove strides_check=False
def test_invalid_stats(is_module, func, xp, device, input, axis, dtypes):
return apply_func(is_module, func, xp, device, input, axis, dtypes)
| 9,759
| 30.895425
| 77
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_evaluation.py
|
import chainer
from chainer import functions as F
import numpy
import pytest
import chainerx
from chainerx_tests import array_utils
from chainerx_tests import op_utils
_in_out_eval_dtypes = [
(('float16', 'int16')),
(('float32', 'int32')),
(('float64', 'int64')),
(('float32', 'int16')),
(('float64', 'int16')),
(('float64', 'int32')),
]
_accuracy_params = [
((10, 1), (10,)),
((5, 1), (5,)),
((10, 3), (10,)),
((10, 3, 1), (10,)),
((10, 3, 1, 1), (10,)),
((10, 3, 5), (10, 5)),
((10, 3, 5, 4), (10, 5, 4)),
((10, 3, 5, 4, 1), (10, 5, 4)),
((10, 3, 5, 4, 1, 1), (10, 5, 4)),
]
_invalid_accuracy_dtypes = [
(('int16', 'float16')),
(('int32', 'int32')),
(('float32', 'float32')),
(('float64', 'float64')),
(('int64', 'float64')),
]
_invalid_accuracy_shapes = [
((10, 1), (5,)),
((5, 3), (10, 3)),
]
class EvalBase(op_utils.ChainerOpTest):
def generate_inputs(self):
y_dtype, t_dtype = self.in_dtypes
y = numpy.random.uniform(-1, 1, self.y_shape).astype(y_dtype)
targ = numpy.random.randint(
3, size=self.t_shape).astype(t_dtype)
return y, targ
def forward_chainerx(self, inputs):
return self.forward_xp(inputs, chainerx)
def forward_chainer(self, inputs):
return self.forward_xp(inputs, F)
def forward_xp(self, inputs, xp):
raise NotImplementedError(
'Op test implementation must override `forward_xp`.')
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'y_shape,t_shape', _accuracy_params),
chainer.testing.from_pytest_parameterize(
'in_dtypes', _in_out_eval_dtypes),
chainer.testing.from_pytest_parameterize(
'ignore_label', [None, 0])
])
))
class TestAccuracy(EvalBase):
skip_backward_test = True
skip_double_backward_test = True
def setup(self):
super().setup()
dtype1, dtype2 = self.in_dtypes
if dtype1 == 'float16' or dtype2 == 'float16':
self.check_forward_options.update({'rtol': 1e-2, 'atol': 1e-2})
def generate_inputs(self):
y, t = super().generate_inputs()
# TODO(aksub99): Improve tests for the case
# where all labels are ignored.
if y.shape == (10, 1) or y.shape == (5, 1):
self.ignore_label = 0
t.fill(self.ignore_label)
return y, t
def forward_xp(self, inputs, xp):
y, t = inputs
out = xp.accuracy(y, t, self.ignore_label)
return out,
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('y_shape,t_shape', _accuracy_params)
@pytest.mark.parametrize('in_dtypes', _invalid_accuracy_dtypes)
@pytest.mark.parametrize('ignore_label', [None, 0])
def test_accuracy_invalid_dtype(device, y_shape,
t_shape, ignore_label, in_dtypes):
dtype1, dtype2 = in_dtypes
y = array_utils.create_dummy_ndarray(chainerx, y_shape, dtype1)
t = array_utils.create_dummy_ndarray(chainerx, t_shape, dtype2)
with pytest.raises(chainerx.DtypeError):
chainerx.accuracy(y, t, ignore_label=ignore_label)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('y_shape,t_shape', _invalid_accuracy_shapes)
@pytest.mark.parametrize('in_dtypes', _in_out_eval_dtypes)
@pytest.mark.parametrize('ignore_label', [None, 0])
def test_accuracy_invalid_shape(device, y_shape,
t_shape, ignore_label, in_dtypes):
dtype1, dtype2 = in_dtypes
y = array_utils.create_dummy_ndarray(chainerx, y_shape, dtype1)
t = array_utils.create_dummy_ndarray(chainerx, t_shape, dtype2)
with pytest.raises(chainerx.DimensionError):
chainerx.accuracy(y, t, ignore_label=ignore_label)
| 3,915
| 28.89313
| 75
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_loss.py
|
import numpy
import chainer
from chainer import functions as F
import chainerx
from chainerx_tests import dtype_utils
from chainerx_tests import op_utils
_loss_shapes = [
(2, 2),
(3, 3, 3),
(5, 5, 5),
(4, 1, 2, 4),
]
_in_out_loss_dtypes = dtype_utils._permutate_dtype_mapping([
(('float16', 'float16'), 'float16'),
(('float32', 'float32'), 'float32'),
(('float64', 'float64'), 'float64'),
(('float32', 'float16'), 'float32'),
(('float64', 'float16'), 'float64'),
(('float64', 'float32'), 'float64'),
])
class LossBase(op_utils.ChainerOpTest):
def setup(self):
super().setup()
in_dtype1, in_dtype2 = self.in_dtypes
if in_dtype1 == 'float16' or in_dtype2 == 'float16':
self.check_forward_options.update({'rtol': 5e-3, 'atol': 5e-3})
self.check_backward_options.update({'rtol': 1e-2, 'atol': 5e-3})
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 3e-1})
def generate_inputs(self):
in_dtype1, in_dtype2 = self.in_dtypes
y = numpy.random.normal(loc=0, scale=1.0, size=self.shape)
targ = numpy.random.normal(loc=0, scale=1.0, size=self.shape) + \
numpy.random.normal(loc=0, scale=0.5, size=self.shape)
return y.astype(in_dtype1), targ.astype(in_dtype2)
def forward_chainerx(self, inputs):
out, = self.forward_xp(inputs, chainerx)
return out,
def forward_chainer(self, inputs):
dtype = numpy.result_type(*inputs)
inputs = [x.astype(dtype) for x in inputs]
output, = self.forward_xp(inputs, F)
output.array = output.array.astype(self.out_dtype)
return output,
def forward_xp(self, inputs, xp):
raise NotImplementedError(
'Op test implementation must override `forward_xp`.')
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': _loss_shapes,
'in_dtypes,out_dtype': _in_out_loss_dtypes,
})
))
class TestSquaredError(LossBase):
def forward_xp(self, inputs, xp):
x1, x2 = inputs
return xp.squared_error(x1, x2),
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': _loss_shapes,
'in_dtypes,out_dtype': _in_out_loss_dtypes,
})
))
class TestAbsoluteError(LossBase):
# Absolute is non-differentiable at zero.
dodge_nondifferentiable = True
def forward_xp(self, inputs, xp):
x1, x2 = inputs
return xp.absolute_error(x1, x2),
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': _loss_shapes,
'in_dtypes,out_dtype': _in_out_loss_dtypes,
})
))
class TestGaussianKLDivergence(LossBase):
def forward_xp(self, inputs, xp):
mean, ln_var = inputs
if xp is chainerx:
out = xp.gaussian_kl_divergence(mean, ln_var)
else:
out = xp.gaussian_kl_divergence(mean, ln_var, reduce='no')
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': _loss_shapes,
'in_dtypes,out_dtype': _in_out_loss_dtypes,
'delta': [0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5],
})
))
class TestHuberLoss(LossBase):
def generate_inputs(self):
x, t = super().generate_inputs()
mask = numpy.abs(numpy.abs(x - t) - self.delta) > 1e-3
return x * mask, t * mask
def forward_xp(self, inputs, xp):
x, t = inputs
if xp is chainerx:
out = xp.huber_loss(x, t, self.delta)
else:
out = xp.huber_loss(x, t, self.delta, reduce='no')
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': _loss_shapes,
'x_dtype': chainerx.testing.float_dtypes,
't_dtype': ['int8', 'int16', 'int32', 'int64'],
})
))
class TestSigmoidCrossEntropy(op_utils.ChainerOpTest):
def setup(self):
if self.x_dtype == 'float16':
self.check_forward_options.update({'rtol': 5e-3, 'atol': 5e-3})
self.check_backward_options.update({'rtol': 1e-2, 'atol': 5e-3})
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 3e-1})
def generate_inputs(self):
x = numpy.random.normal(loc=0, scale=1.0, size=self.shape)
targ = numpy.random.normal(loc=0, scale=1.0, size=self.shape) + \
numpy.random.normal(loc=0, scale=0.5, size=self.shape)
self.t = targ.astype(self.t_dtype)
return x.astype(self.x_dtype),
def forward_chainerx(self, inputs):
x, = inputs
# TODO(aksub99): Improve implementation to avoid non-differentiability
# wrt targets
t = self.backend_config.get_array(self.t)
out = chainerx.sigmoid_cross_entropy(x, t)
return out,
def forward_chainer(self, inputs):
x, = inputs
t = self.t
out = F.sigmoid_cross_entropy(x, t, normalize=False, reduce='no')
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'x_dtype': chainerx.testing.float_dtypes,
't_dtype': chainerx.testing.signed_integral_dtypes,
})
))
class TestSoftmaxCrossEntropy(op_utils.ChainerOpTest):
def setup(self):
self.shape = (2, 2)
t_shape = self.shape[0],
t = numpy.random.randint(0, self.shape[1], t_shape)
self.t = t.astype(self.t_dtype)
if self.x_dtype == 'float16':
self.check_forward_options.update({'rtol': 5e-3, 'atol': 5e-3})
self.check_backward_options.update({'rtol': 1e-2, 'atol': 5e-3})
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 3e-1})
def generate_inputs(self):
x = numpy.random.normal(loc=0, scale=1.0, size=self.shape)
return x.astype(self.x_dtype),
def forward_chainerx(self, inputs):
x, = inputs
t = self.backend_config.get_array(self.t)
out = chainerx.softmax_cross_entropy(x, t)
return out,
def forward_chainer(self, inputs):
x, = inputs
out = F.softmax_cross_entropy(x, self.t, reduce='no')
return out,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(2, 2), (3, 5), (7, 1)],
'x_dtype': chainerx.testing.float_dtypes,
't_dtype': ['int8', 'int16', 'int32', 'int64'],
'norm_float,norm_str': [(1.0, 'L1'), (2.0, 'L2')],
})
))
class TestHinge(op_utils.ChainerOpTest):
dodge_nondifferentiable = True
def setup(self):
if self.x_dtype == 'float16':
self.check_forward_options.update({'rtol': 5e-3, 'atol': 5e-3})
self.check_backward_options.update({'rtol': 1e-2, 'atol': 5e-3})
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 3e-1})
def generate_inputs(self):
n, k = self.shape
x = numpy.random.normal(loc=0, scale=1.0, size=self.shape)
self.t = numpy.random.randint(k, size=n).astype(self.t_dtype)
return x.astype(self.x_dtype),
def forward_chainerx(self, inputs):
x, = inputs
t = self.backend_config.get_array(self.t)
norm = self.norm_float
out = chainerx.hinge(x, t, norm=norm)
return out,
def forward_chainer(self, inputs):
x, = inputs
t = self.t
norm = self.norm_str
out = F.hinge(x, t, norm=norm, reduce='no')
return out,
| 7,767
| 29.825397
| 78
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_rounding.py
|
import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import dtype_utils
from chainerx_tests import math_utils
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0.5),
numpy.asarray(-1.2),
numpy.asarray(10.9),
numpy.asarray(float('inf')),
numpy.asarray(-float('inf')),
numpy.asarray(float('nan')),
numpy.full((), 2.1),
numpy.full((0,), 2),
numpy.full((2, 3), 2.6),
numpy.full((1, 1), 1.01),
numpy.full((1, 1), 1.99),
])
@pytest.mark.parametrize('dtypes', math_utils.in_out_dtypes_math_functions)
@pytest.mark.parametrize('func', [
lambda xp, a: xp.ceil(a),
lambda xp, a: xp.floor(a)
])
def test_rounding_routines(func, xp, device, input, dtypes):
(in_dtype, ), out_dtype = dtypes
a = xp.array(input.astype(in_dtype))
a = func(xp, a)
a = dtype_utils.cast_if_numpy_array(xp, a, out_dtype)
return a
| 1,007
| 26.243243
| 75
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_reduction.py
|
import chainer
import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import math_utils
from chainerx_tests import op_utils
_logsumexp_params = [
((2,), 0),
((2,), -1),
((2, 3), None),
((2, 3), 0),
((2, 3), 1),
((2, 3), -2),
((2, 3), (0, 1)),
((2, 3), (-2, 1)),
((1, 2, 3), None),
((1, 2, 3), (1)),
((1, 2, 3), (1, 0)),
((1, 2, 3), (0, 1, 2)),
]
_invalid_logsumexp_params = [
# Axis out of bounds
((2,), 1),
((2,), -2),
((2,), (0, 1)),
((2, 3), (0, 1, 2)),
# Duplicate axes
((2,), (0, 0)),
((2, 3), (0, 0)),
]
_cumsum_params = [
((1,), 0),
((2, 3, 4), 0),
((2, 3, 4), 1),
((2, 3, 4), 2),
((2, 3, 4), -3),
((2, 3, 4), -2),
((2, 3, 4), -1),
((2, 3, 4), None),
((100000, 2), None),
((100000, 2), 0),
((100000, 2), 1),
]
_sum_params = [
((), None),
((), ()),
((2,), None),
((2,), ()),
((2,), 0),
((2,), (0,)),
((2,), (-1,)),
((2, 3), None),
((2, 3), ()),
((2, 3), 0),
((2, 3), (0,)),
((2, 3), (1,)),
((2, 3), (-1,)),
((2, 3), (-2,)),
((2, 3), (0, 1)),
((2, 3), (-2, -1)),
((1, 3), None), # sum over 1-dim axis
((0, 3), None), # sum over 0-dim axis
# Sum over axes that are in the middle or apart
((2, 3, 4), (1,)),
((2, 3, 4), (0, 2)),
# Sum over axes that are apart and/or unsorted
((2, 3), (1, 0)),
((2, 3, 4), (2, 0)),
((2, 3, 4), (2, 0, 1)),
((2, 3, 4), (-2, 2, 0)),
]
_in_out_dtypes_sum = [
(('bool_',), 'int64'),
(('int8',), 'int64'),
(('int16',), 'int64'),
(('int32',), 'int64'),
(('int64',), 'int64'),
(('float16',), 'float16'),
(('float32',), 'float32'),
(('float64',), 'float64'),
# TODO(niboshi): Unsigned integer dtypes should result in uint64.
# Currently chainerx returns int64.
(('uint8',), 'int64'),
]
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest(
'in_dtypes,out_dtype', _in_out_dtypes_sum)
@chainer.testing.parameterize_pytest('shape,axis', _sum_params)
@chainer.testing.parameterize_pytest('keepdims', [True, False])
@chainer.testing.parameterize_pytest('is_module', [True, False])
class TestSum(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
input = 'random'
def setup(self):
super().setup()
in_dtype, = self.in_dtypes
if in_dtype == 'float16':
self.check_forward_options.update({'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 1e-2})
def func(self, xp, a):
if self.is_module:
return xp.sum(a, axis=self.axis, keepdims=self.keepdims)
else:
return a.sum(axis=self.axis, keepdims=self.keepdims)
@op_utils.op_test(['native:0'])
class TestSumStability(op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
return numpy.full(2 ** 20, 0.1, dtype=numpy.float32),
def forward_xp(self, inputs, xp):
x, = inputs
if xp is chainerx:
return x.sum(),
else:
return (x[0] * x.size).astype(x.dtype),
@op_utils.op_test(['native:0'])
@chainer.testing.parameterize_pytest('size', list(range(1024)))
class TestSumEachSize(op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
return numpy.arange(self.size, dtype=numpy.int32) + 1,
def forward_xp(self, inputs, xp):
x, = inputs
return x.sum(),
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('keepdims', [False, True])
@pytest.mark.parametrize('shape,axis', [
# ((), 0), # TODO(sonots): Fix compatibility
((), 1),
((), (1,)),
((2,), 2),
((2,), (2,)),
((2,), (-2,)),
((2, 3,), (-3,)),
((2, 3,), (-3, -4)),
((2, 3,), (0, 0)),
((2, 3,), (-1, -1)),
((2, 3,), (0, 1, 1)),
((2, 3,), (0, -2)),
])
def test_sum_invalid(is_module, xp, shape, axis, keepdims, dtype):
a = array_utils.create_dummy_ndarray(xp, shape, dtype)
if is_module:
xp.sum(a, axis=axis, keepdims=keepdims)
else:
a.sum(axis=axis, keepdims=keepdims)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,axis', _logsumexp_params)
@chainer.testing.parameterize_pytest(
'in_dtypes,out_dtype', math_utils.in_out_dtypes_math_functions)
class TestSoftmax(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
input = 'random'
def setup(self):
super().setup()
self.check_forward_options.update({'rtol': 3e-3, 'atol': 3e-3})
self.check_backward_options.update({'rtol': 3e-3, 'atol': 3e-3})
def forward_xp(self, inputs, xp):
x, = inputs
axis = self.axis
if xp is chainerx:
return chainerx.softmax(x, axis=axis),
x = x.astype(self.out_dtype)
axis = axis if axis is not None else 1
return numpy.exp(x) / (numpy.exp(x).sum(axis=axis, keepdims=True)),
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest(
'in_dtypes,out_dtype', math_utils.in_out_dtypes_math_functions)
@chainer.testing.parameterize_pytest('shape,axis', _logsumexp_params)
@chainer.testing.parameterize_pytest('keepdims', [True, False])
class TestLogSumExp(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
input = 'random'
def setup(self):
super().setup()
in_dtype, = self.in_dtypes
if in_dtype == 'float16':
# TODO(imanishi): Support device implementation and remove this.
self.check_forward_options.update({'rtol': 3e-3, 'atol': 3e-3})
def forward_xp(self, inputs, xp):
x, = inputs
axis = self.axis
keepdims = self.keepdims
if xp is chainerx:
return chainerx.logsumexp(x, axis=axis, keepdims=keepdims),
x = x.astype(self.out_dtype)
return numpy.log(numpy.exp(x).sum(axis=axis, keepdims=keepdims)),
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('a_shape,axis', _invalid_logsumexp_params)
@pytest.mark.parametrize('keepdims', [True, False])
# TODO(hvy): Should not overflow for large numbers, add tests
def test_logsumexp_invalid(device, a_shape, axis, keepdims, dtype):
a = array_utils.create_dummy_ndarray(chainerx, a_shape, dtype)
with pytest.raises(chainerx.DimensionError):
chainerx.logsumexp(a, axis=axis, keepdims=keepdims)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,axis', _logsumexp_params)
@chainer.testing.parameterize_pytest(
'in_dtypes,out_dtype', math_utils.in_out_dtypes_math_functions)
class TestLogSoftmax(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
input = 'random'
def setup(self):
super().setup()
self.check_forward_options.update({'rtol': 3e-3, 'atol': 3e-3})
self.check_backward_options.update({'rtol': 3e-3, 'atol': 3e-3})
def forward_xp(self, inputs, xp):
x, = inputs
axis = self.axis
if xp is chainerx:
return chainerx.log_softmax(x, axis=axis),
x = x.astype(self.out_dtype)
axis = axis if axis is not None else 1
return x - numpy.log(numpy.exp(x).sum(axis=axis, keepdims=True)),
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('a_shape,axis', _invalid_logsumexp_params)
def test_log_softmax_invalid(device, a_shape, axis, dtype):
a = array_utils.create_dummy_ndarray(chainerx, a_shape, dtype)
with pytest.raises(chainerx.DimensionError):
return chainerx.log_softmax(a, axis=axis)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest(
'in_dtypes,out_dtype', _in_out_dtypes_sum)
@chainer.testing.parameterize_pytest('shape,axis', _cumsum_params)
class TestCumsum(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
input = 'random'
def setup(self):
super().setup()
in_dtype, = self.in_dtypes
if in_dtype == 'float16':
self.check_forward_options.update({'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 1e-2})
if (numpy.dtype(in_dtype).kind in ('float16, float32')
and numpy.prod(self.shape) > 1000):
pytest.skip('Skip large tests for float16/float32 dtypes')
def func(self, xp, a):
return xp.cumsum(a, axis=self.axis)
@op_utils.op_test(['native:0'])
@chainer.testing.parameterize_pytest(
'in_dtypes,out_dtype', math_utils.in_out_float_dtypes_math_functions)
@chainer.testing.parameterize_pytest('shape,axis', _sum_params)
@chainer.testing.parameterize_pytest('keepdims', [True, False])
class TestNansum(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
input = 'random'
def setup(self):
super().setup()
in_dtype, = self.in_dtypes
if in_dtype == 'float16':
self.check_forward_options.update({'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 1e-2})
def generate_inputs(self):
shape = self.shape
a, = super().generate_inputs()
indices = numpy.asarray([i for i in numpy.ndindex(shape)])
numpy.random.shuffle(indices)
if len(indices) == 0:
n_nans = 0
else:
n_nans = numpy.random.randint(len(indices))
if n_nans == 0:
return a,
nan_indices = indices[:n_nans]
for i in nan_indices:
a[tuple(i)] = numpy.nan
return a,
def func(self, xp, a):
return xp.nansum(a, axis=self.axis, keepdims=self.keepdims)
| 10,196
| 29.348214
| 76
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_arithmetic.py
|
import chainer
import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import math_utils
from chainerx_tests import op_utils
def test_mod_remainder():
assert chainerx.remainder is chainerx.mod
_in_out_dtypes_arithmetic_invalid = [
(('bool_', 'bool_'), 'bool_'),
(('bool_', 'int8'), 'int8'),
(('bool_', 'int16'), 'int16'),
(('bool_', 'int32'), 'int32'),
(('bool_', 'int64'), 'int64'),
(('bool_', 'uint8'), 'uint8'),
(('bool_', 'float16'), 'float16'),
(('bool_', 'float32'), 'float32'),
(('bool_', 'float64'), 'float64'),
(('int8', 'bool_'), 'int8'),
(('int16', 'bool_'), 'int16'),
(('int32', 'bool_'), 'int32'),
(('int64', 'bool_'), 'int64'),
(('uint8', 'bool_'), 'uint8'),
(('float16', 'bool_'), 'float16'),
(('float32', 'bool_'), 'float32'),
(('float64', 'bool_'), 'float64'),
]
_in_out_dtypes_arithmetic = [
dtypes for dtypes in dtype_utils.result_dtypes_two_arrays
if dtypes not in _in_out_dtypes_arithmetic_invalid
]
_in_out_dtypes_inplace_arithmetic_invalid = [
((t1, t2), t3) for (t1, t2), t3 in _in_out_dtypes_arithmetic
if (numpy.dtype(t1).kind != 'f' and numpy.dtype(t2).kind == 'f')
] + _in_out_dtypes_arithmetic_invalid
_in_out_dtypes_inplace_arithmetic = [
dtypes for dtypes in dtype_utils.result_dtypes_two_arrays
if dtypes not in _in_out_dtypes_inplace_arithmetic_invalid
]
_in_out_dtypes_array_int_scalar = [
# Int scalar.
(('int8',), int, 'int8'),
(('int16',), int, 'int16'),
(('int32',), int, 'int32'),
(('int64',), int, 'int64'),
(('uint8',), int, 'uint8'),
(('float16',), int, 'float16'),
(('float32',), int, 'float32'),
(('float64',), int, 'float64'),
(('int16',), numpy.int16, 'int16'),
(('uint8',), numpy.int8, 'uint8'),
(('float64',), numpy.int8, 'float64'),
(('float16',), numpy.int64, 'float16'),
]
_in_out_dtypes_int_array_float_scalar = [
# Int arrays and float scalars.
(('int8',), float, 'float32'),
(('int16',), float, 'float32'),
(('int32',), float, 'float32'),
(('int64',), float, 'float32'),
(('uint8',), float, 'float32'),
(('int8',), numpy.float32, 'float32'),
(('int64',), numpy.float16, 'float32'),
(('uint8',), numpy.float64, 'float32'),
]
_in_out_dtypes_float_array_float_scalar = [
# Float arrays and flaot scalars.
(('float16',), float, 'float16'),
(('float32',), float, 'float32'),
(('float64',), float, 'float64'),
(('float64',), float, 'float64'),
(('float16',), numpy.float64, 'float16'),
(('float64',), numpy.float16, 'float64'),
]
_in_out_dtypes_arithmetic_scalar = (
_in_out_dtypes_array_int_scalar
+ _in_out_dtypes_int_array_float_scalar
+ _in_out_dtypes_float_array_float_scalar)
_in_out_dtypes_inplace_arithmetic_scalar = (
_in_out_dtypes_array_int_scalar
+ _in_out_dtypes_float_array_float_scalar)
_in_out_dtypes_float_arithmetic_scalar = (
_in_out_dtypes_int_array_float_scalar
+ _in_out_dtypes_float_array_float_scalar)
_in_out_dtypes_inplace_float_arithmetic_scalar = (
_in_out_dtypes_float_array_float_scalar)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.numeric_dtypes)),
'input': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.numeric_dtypes)),
'input': ['random'],
'is_module': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.float_dtypes)),
'input': [float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestNegative(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
if self.is_module:
return xp.negative(a)
else:
return -a
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(chainerx.DtypeError, TypeError))
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_negative_invalid_bool(xp, device, is_module):
x = xp.array([True, False], dtype='bool_')
if is_module:
xp.negative(x)
else:
-x
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestAdd(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
if self.is_module:
return xp.add(a, b)
else:
return a + b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_arithmetic_invalid)
def test_add_invalid_dtypes(device, dtypes, is_module):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
if is_module:
a + b
else:
chainerx.add(a, b)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_inplace_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_inplace_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestIAdd(math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
a += b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_inplace_arithmetic_invalid)
def test_iadd_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
a += b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [False],
})
# Type combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [True, False],
'is_scalar_rhs': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_float_arithmetic_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestAddScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return a + scalar
else:
return scalar + a
else:
if self.is_scalar_rhs:
return xp.add(a, scalar)
else:
return xp.add(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
})
# Dtype combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_float_arithmetic_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2, float('inf'), -float('inf'), float('nan')],
})
))
class TestIAddScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
a += scalar
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestSub(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
if self.is_module:
return xp.subtract(a, b)
else:
return a - b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_arithmetic_invalid)
def test_sub_invalid_dtypes(device, dtypes, is_module):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
if is_module:
a - b
else:
chainerx.subtract(a, b)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_inplace_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_inplace_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestISub(math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
a -= b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_inplace_arithmetic_invalid)
def test_isub_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
a -= b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [False],
})
# Type combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [True, False],
'is_scalar_rhs': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_float_arithmetic_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestSubScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return a - scalar
else:
return scalar - a
else:
if self.is_scalar_rhs:
return xp.subtract(a, scalar)
else:
return xp.subtract(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
})
# Dtype combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_float_arithmetic_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2, float('inf'), -float('inf'), float('nan')],
})
))
class TestISubScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
a -= scalar
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.all_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': dtype_utils.result_dtypes_two_arrays,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.all_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestMul(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
if self.is_module:
return xp.multiply(a, b)
else:
return a * b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_inplace_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.all_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_inplace_arithmetic + [
((t, 'bool_'), t) for t in chainerx.testing.all_dtypes
],
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestIMul(math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
a *= b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [False],
})
# Type combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar + [
((t,), bool, t) for t in chainerx.testing.all_dtypes
],
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [True, False],
'is_scalar_rhs': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_float_arithmetic_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestMulScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return a * scalar
else:
return scalar * a
else:
if self.is_scalar_rhs:
return xp.multiply(a, scalar)
else:
return xp.multiply(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
})
# Dtype combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': (
_in_out_dtypes_inplace_arithmetic_scalar + [
((t,), bool, t) for t in chainerx.testing.all_dtypes
]),
'input': ['random'],
'scalar_value': [1],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_float_arithmetic_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2, float('inf'), -float('inf'), float('nan')],
})
))
class TestIMulScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
a *= scalar
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*chainer.testing.product({
'lhs,rhs': [
([], []),
([0, 1, 2, 3, 100, 101, 102, 103], [3] * 8),
([-0, -1, -2, -3, -4, -100, -101, -102, -103], [3] * 9),
([0, 1, 2, 3, 100, 101, 102, 103], [-3] * 8),
([-0, -1, -2, -3, -4, -100, -101, -102, -103], [-3] * 9),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4], [1.2] * 8),
([-0., -0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4],
[1.2] * 9),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4], [-1.2] * 8),
([-0., -0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4],
[-1.2] * 9),
],
'in_dtypes,out_dtype': _in_out_dtypes_arithmetic,
'is_module': [True, False],
}))
class TestFloorDivide(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
in_dtype1, in_dtype2 = self.in_dtypes
a = numpy.array(self.lhs).astype(in_dtype1)
b = numpy.array(self.rhs).astype(in_dtype2)
return a, b
def func(self, xp, a, b):
if self.is_module:
return xp.floor_divide(a, b)
else:
return a // b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(chainer.testing.product_dict(
chainer.testing.product({
'array': [
([]),
([0, 1, 2, 3, 100, 101, 102, 103]),
([-0, -1, -2, -3, -4, -100, -101, -102, -103]),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4]),
([-0., -0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4]),
([-0.61, -0.6, -0.59, 0.59, 0.6, 0.61]),
],
'is_module': [True, False],
'is_scalar_rhs': [True, False],
}),
chainer.testing.product({
'scalar_value': [-3, 3, -1.2, 1.2, 0],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
})
# Special values
+ chainer.testing.product({
'scalar_value': [float('inf'), -float('inf'), float('nan')],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_float_arithmetic_scalar,
})
)))
class TestFloorDivideScalar(
math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def setup(self):
super().setup()
in_dtype, = self.in_dtypes
# TODO(imanishi): Remove this.
if in_dtype == 'uint8' and self.scalar_value < 0:
self.skip_forward_test = True
def generate_inputs(self):
in_dtype, = self.in_dtypes
a = numpy.array(self.array).astype(in_dtype)
return a,
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return xp.floor_divide(a, scalar)
else:
return xp.floor_divide(scalar, a)
else:
if self.is_scalar_rhs:
return a // scalar
else:
return scalar // a
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_arithmetic_invalid)
def test_floordiv_invalid_dtypes(device, dtypes, is_module):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
if is_module:
a // b
else:
chainerx.floor_divide(a, b)
# TODO(imanishi): Support and test zero division and mixed dtypes.
# TODO(imanishi): Support and test chainerx.Scalar // chainerx.ndarray.
# TODO(imanishi): Support and test bool dtype.
@chainerx.testing.numpy_chainerx_array_equal(float16_rtol=1e-3)
@pytest.mark.parametrize('lhs,rhs', [
([], []),
([0, 1, 2, 3, 100, 101, 102, 103], [3] * 8),
([-1, -2, -3, -4, -100, -101, -102, -103], [3] * 8),
([0, 1, 2, 3, 100, 101, 102, 103], [-3] * 8),
([-1, -2, -3, -4, -100, -101, -102, -103], [-3] * 8),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4], [1.2] * 8),
([-0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4], [1.2] * 8),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4], [-1.2] * 8),
([-0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4], [-1.2] * 8),
([0, 1, 2, 3, 100, 101, 102, 103], 3),
([-1, -2, -3, -4, -100, -101, -102, -103], 3),
([0, 1, 2, 3, 100, 101, 102, 103], -3),
([-1, -2, -3, -4, -100, -101, -102, -103], -3),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4], 1.2),
([-0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4], 1.2),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4], -1.2),
([-0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4], -1.2),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_ifloordiv(xp, lhs, rhs, device, numeric_dtype):
if numpy.array(lhs).dtype.kind != numpy.dtype(numeric_dtype).kind:
return chainerx.testing.ignore()
lhs = xp.array(lhs).astype(numeric_dtype)
if isinstance(rhs, (list, tuple)):
rhs = xp.array(rhs).astype(numeric_dtype)
lhs //= rhs
return lhs
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_inplace_arithmetic_invalid)
def test_ifloordiv_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
a //= b
_in_out_dtypes_inplace_truediv = [
(('float32', 'int16'), 'float32'),
(('float64', 'uint8'), 'float64'),
(('float16', 'float16'), 'float16'),
(('float32', 'float32'), 'float32'),
(('float64', 'float64'), 'float64'),
(('float32', 'float16'), 'float32'),
(('float16', 'float64'), 'float64'),
]
_in_out_dtypes_reciprocal = [
(('int16',), 'float32'),
(('uint8',), 'float32'),
(('float16',), 'float16'),
(('float32',), 'float32'),
(('float64',), 'float64'),
]
_in_out_dtypes_truediv = _in_out_dtypes_inplace_truediv + [
(('int8', 'int8'), 'float32'),
(('int16', 'int16'), 'float32'),
(('int32', 'int32'), 'float32'),
(('int64', 'int64'), 'float32'),
(('uint8', 'uint8'), 'float32'),
(('int8', 'int32'), 'float32'),
(('uint8', 'int64'), 'float32'),
(('int8', 'uint8'), 'float32'),
(('int32', 'float16'), 'float16'),
(('uint8', 'float32'), 'float32'),
]
_in_out_dtypes_inplace_truediv_scalar = [
(('int8',), int, 'float32'),
(('int16',), int, 'float32'),
(('int32',), int, 'float32'),
(('int64',), int, 'float32'),
(('uint8',), int, 'float32'),
(('float16',), int, 'float16'),
(('float32',), int, 'float32'),
(('float64',), int, 'float64'),
(('float16',), float, 'float16'),
(('float32',), float, 'float32'),
(('float64',), float, 'float64'),
]
_in_out_dtypes_truediv_scalar = _in_out_dtypes_inplace_truediv_scalar + [
(('int8',), float, 'float32'),
(('int16',), float, 'float32'),
(('int32',), float, 'float32'),
(('int64',), float, 'float32'),
(('uint8',), float, 'float32'),
]
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': _in_out_dtypes_truediv,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_truediv,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_truediv,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestTrueDivide(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
def setup(self):
super().setup()
dtype1, dtype2 = self.in_dtypes
if dtype1 == 'float16' or dtype2 == 'float16':
self.check_forward_options.update({'rtol': 5e-3, 'atol': 5e-3})
self.check_backward_options.update({'rtol': 1e-2, 'atol': 5e-3})
# Double backward is heavily influenced by some fp16
# precision issues due to the way intermediate results
# are treated in ChainerX
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 3e-1})
def generate_inputs(self):
a, b = super().generate_inputs()
if self.input_lhs == 'random':
# Avoid (-0.3, 0.3) interval
with math_utils.IgnoreNumpyFloatingPointError():
b[numpy.logical_and(-0.3 < b, b < 0.3)] = 1
return a, b
def func(self, xp, a, b):
if self.is_module:
return xp.divide(a, b)
else:
return a / b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_arithmetic_invalid)
def test_truediv_invalid_dtypes(device, dtypes, is_module):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
if is_module:
a / b
else:
chainerx.true_divide(a, b)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_inplace_binary,
'in_dtypes,out_dtype': _in_out_dtypes_inplace_truediv,
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_inplace_truediv,
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestITrueDivide(
math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
a, b = super().generate_inputs()
if self.input_lhs == 'random':
with math_utils.IgnoreNumpyFloatingPointError():
b[numpy.logical_and(-0.3 < b, b < 0.3)] = 1
return a, b
def func(self, xp, a, b):
a /= b
# TODO(hvy): Support and test zero division and mixed dtypes (dtype kinds).
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_truediv_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# Dtype combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_truediv_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.float_dtypes)),
'scalar_type': [float],
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [-1, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'is_scalar_rhs': [True, False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestTrueDivideScalar(
math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
def generate_inputs(self):
# Do not divide by small number to avoid ridiculously large outputs.
if not self.is_scalar_rhs and self.input == 'random':
in_dtype, = self.in_dtypes
low = -5 if numpy.dtype(in_dtype).kind != 'u' else 2
high = 5
x = array_utils.uniform(self.shape, in_dtype, low=low, high=high)
x[(-1 < x) & (x < 0)] = -2
x[(0 <= x) & (x < 1)] = 2
return x,
return super().generate_inputs()
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return xp.divide(a, scalar)
else:
return xp.divide(scalar, a)
else:
if self.is_scalar_rhs:
return a / scalar
else:
return scalar / a
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.float_dtypes)),
'scalar_type': [float],
'input': ['random'],
'scalar_value': [1],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.float_dtypes)),
'scalar_type': [float],
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [-1, 1, 2, float('inf'), -float('inf'), float('nan')],
})
))
class TestITrueDivideScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
a /= scalar
def _create_dummy_array_for_dot(xp, shape, dtype):
x = numpy.arange(numpy.prod(shape)).reshape(shape)
if dtype == 'bool_':
x = numpy.asarray(x % 2 == 0)
else:
x = x.astype(dtype)
return xp.array(x)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs,input_rhs': [(2, 2)],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': dtype_utils.result_numeric_dtypes_two_arrays,
'input_lhs,input_rhs': [(2, 2)],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs,input_rhs': [(2, 2)],
'is_module': [True, False],
})
# Special values (integers forward)
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.signed_integral_dtypes)),
'input_lhs': [-2, -1, 0, 1, 2, 5],
'input_rhs': [0, 1, 2, 5],
'is_module': [False],
})
# Special values (floats forward)
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': [-1, 0, 1, 2, float('inf'), -float('inf'), float('nan')],
'input_rhs': [-1, 0, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
# Special values (floats backward)
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': [-3.0, -1.2, 1.2, 3],
'input_rhs': [-3.0, -1.2, 0.0, 1.2, 3.0],
'is_module': [False],
})
))
class TestPower(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def setup(self):
super().setup()
in_dtype1, in_dtype2 = self.in_dtypes
if in_dtype1 == 'float16' or in_dtype2 == 'float16':
self.check_backward_options.update({'rtol': 5e-2, 'atol': 5e-2})
self.check_double_backward_options.update(
{'rtol': 5e-2, 'atol': 5e-2})
def func(self, xp, a, b):
if self.is_module:
y = xp.power(a, b)
else:
y = a ** b
return y
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [2],
'scalar_value': [1.2, 2],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# Type combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [2],
'scalar_value': [1.2, 2],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [2],
'scalar_value': [1.2, 2],
'is_module': [True, False],
'is_scalar_rhs': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_float_arithmetic_scalar,
'input': [-1, 0, 1, 2, float('inf'), -float('inf'), float('nan')],
'scalar_value': [
-1, 0, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestPowerScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def setup(self):
super().setup()
if self.in_dtypes == 'float16':
self.check_backward_options.update({'rtol': 5e-3, 'atol': 5e-3})
self.check_double_backward_options.update(
{'rtol': 5e-3, 'atol': 5e-3})
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
y = xp.power(a, scalar)
else:
y = xp.power(scalar, a)
else:
if self.is_scalar_rhs:
y = a ** scalar
else:
y = scalar ** a
return y
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('is_bool_rhs', [True, False])
@pytest.mark.parametrize('is_bool_primitive', [True, False])
@pytest.mark.parametrize('is_module', [True, False])
def test_power_invalid_bool_dtype(
device, dtype, is_bool_rhs, is_bool_primitive, is_module):
shape = (3, 2)
a = chainerx.array(array_utils.uniform(shape, dtype))
if is_bool_primitive:
b = True
else:
b = chainerx.array(array_utils.uniform(shape, 'bool'))
with pytest.raises(chainerx.DtypeError):
if is_module:
if is_bool_rhs:
chainerx.power(a, b)
else:
chainerx.power(b, a)
else:
if is_bool_rhs:
a ** b
else:
b ** a
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': _in_out_dtypes_reciprocal,
'input': [1, 3],
})
# Dtype combinations
+ chainer.testing.product({
'shape': [(0,), (2, 0, 3)],
'in_dtypes,out_dtype': _in_out_dtypes_reciprocal,
'input': [1, 3],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': ['random', float('inf'), -float('inf'), float('nan'), 0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestReciprocal(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.reciprocal(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
# Special values.
# TODO(nakasuka): Add tests for inf and NaN.
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_arithmetic,
'input_lhs': [-1, 0, 1, 2],
'input_rhs': [-1, 1, 2],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestRemainder(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def setup(self):
super().setup()
dtype1, dtype2 = self.in_dtypes
if dtype1 == 'float16' or dtype2 == 'float16':
self.check_backward_options.update({'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 1e-2})
def generate_inputs(self):
dtype1, dtype2 = self.in_dtypes
shape1, shape2 = self.in_shapes
low1 = -5 if numpy.dtype(dtype1).kind != 'u' else 2
low2 = -5 if numpy.dtype(dtype2).kind != 'u' else 2
high = 5
a = array_utils.uniform(shape1, dtype1, low=low1, high=high)
b = array_utils.uniform(shape2, dtype2, low=low2, high=high)
a[numpy.logical_and(-0.5 < a, a < 0.5)] = 1
b[numpy.logical_and(-0.5 < b, b < 0.5)] = 1
return a, b
def func(self, xp, a, b):
if self.is_module:
return xp.remainder(a, b)
else:
return a % b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_arithmetic_invalid)
def test_remainder_invalid_dtypes(device, dtypes, is_module):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
if is_module:
a % b
else:
chainerx.remainder(a, b)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_inplace_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_inplace_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Special values.
# TODO(nakasuka): Add tests for inf and NaN.
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_inplace_arithmetic,
'input_lhs': [-1, 0, 1, 2],
'input_rhs': [-1, 1, 2],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestIRemainder(
math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def setup(self):
super().setup()
dtype1, dtype2 = self.in_dtypes
if dtype1 == 'float16' or dtype2 == 'float16':
self.check_backward_options.update({'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 1e-2})
def generate_inputs(self):
dtype1, dtype2 = self.in_dtypes
shape1, shape2 = self.in_shapes
low1 = -5 if numpy.dtype(dtype1).kind != 'u' else 2
low2 = -5 if numpy.dtype(dtype2).kind != 'u' else 2
high = 5
a = array_utils.uniform(shape1, dtype1, low=low1, high=high)
b = array_utils.uniform(shape2, dtype2, low=low2, high=high)
a[numpy.logical_and(-0.5 < a, a < 0.5)] = 1
b[numpy.logical_and(-0.5 < b, b < 0.5)] = 1
return a, b
def func(self, xp, a, b):
a %= b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_inplace_arithmetic_invalid)
def test_iremainder_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
a %= b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [2],
'is_module': [False],
'is_scalar_rhs': [False],
})
# Type combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [2],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [2],
'is_module': [True, False],
'is_scalar_rhs': [True, False],
})
# Special values.
# TODO(nakasuka): Add tests for inf and NaN.
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [-1, 1, 2],
'scalar_value': [1, 2],
'is_module': [False],
'is_scalar_rhs': [False, True],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestRemainderScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def generate_inputs(self):
# Avoid (-0.5, 0.5) interval
if not self.is_scalar_rhs and self.input == 'random':
in_dtype, = self.in_dtypes
low = -5 if numpy.dtype(in_dtype).kind != 'u' else 2
high = 5
x = array_utils.uniform(self.shape, in_dtype, low=low, high=high)
x[numpy.logical_and(-0.5 < x, x < 0.5)] = 1
return x,
return super().generate_inputs()
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return a % scalar
else:
return scalar % a
else:
if self.is_scalar_rhs:
return xp.remainder(a, scalar)
else:
return xp.remainder(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_arithmetic_scalar,
'input': ['random'],
'scalar_value': [2, 3],
})
# Dtype combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_arithmetic_scalar,
'input': ['random'],
'scalar_value': [2, 3],
})
# Special values.
# TODO(nakasuka): Add tests for inf and NaN.
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_arithmetic_scalar,
'input': [-1, 0, 1, 2],
'scalar_value': [-1, 1, 2],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestIRemainderScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def func_scalar(self, xp, a, scalar):
a %= scalar
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
# Checks only for deterministic values to avoid non-differential point
'input_lhs': [4, -4],
'input_rhs': [7, -7],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
# Special values (differentiable)
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random', 2, -2, 5, -5, 11, -11],
'input_rhs': ['random', 7, -7, 13, -13],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': [
'random', 12, -12, float('inf'), -float('inf'), float('nan')],
'input_rhs': [
'random', 3, -3, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestFmod(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def generate_inputs(self):
shape1, shape2 = self.in_shapes
dtype1, dtype2 = self.in_dtypes
a, b = super().generate_inputs()
# Division with too small divisor is unstable.
if not numpy.isnan(b).any():
b[numpy.abs(b) < 0.3] += 1
# Avoid non-differentiable points
if not (self.skip_backward_test and
self.skip_double_backward_test):
fmod = numpy.abs(numpy.fmod(a, b))
mask = (fmod < 0.1) | (numpy.abs(b) - fmod < 0.1)
if mask.any():
# Never reach this line in broadcast test
assert a.shape == b.shape
a += b * mask * 0.5
return a, b
def func(self, xp, a, b):
return xp.fmod(a, b)
| 58,814
| 32.398637
| 79
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_manipulation.py
|
import itertools
import unittest
import warnings
import chainer
import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import op_utils
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable) # allows duplicate elements
return list(itertools.chain(*[itertools.combinations(s, r)
for r in range(len(s)+1)]))
# Value for parameterization to represent an unspecified (default) argument.
class _UnspecifiedType(object):
def __repr__(self):
return '<Unspecified>'
_unspecified = _UnspecifiedType()
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('mode', ['module', 'transpose', 'T'])
class TestTranspose(op_utils.NumpyOpTest):
def setup(self, shape, dtype):
# Skip backward/double-backward tests for int dtypes
if numpy.dtype(dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
self.shape = shape
self.dtype = dtype
def generate_inputs(self):
shape = self.shape
dtype = self.dtype
a = array_utils.create_dummy_ndarray(numpy, shape, dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
mode = self.mode
if mode == 'module':
b = xp.transpose(a)
elif mode == 'transpose':
b = a.transpose()
elif mode == 'T':
b = a.T
else:
assert False
return b,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,axes', [
((1,), 0),
((1,), (0,)),
((2,), (0,)),
((2, 3), (1, 0)),
((2, 3), (-2, -1)),
((2, 3, 1), (2, 0, 1)),
((2, 3, 1), (2, -3, 1)),
])
@chainer.testing.parameterize_pytest('is_module', [True, False])
class TestTransposeAxes(op_utils.NumpyOpTest):
def setup(self, dtype):
# Skip backward/double-backward tests for int dtypes
if numpy.dtype(dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
self.dtype = dtype
def generate_inputs(self):
shape = self.shape
dtype = self.dtype
a = array_utils.create_dummy_ndarray(numpy, shape, dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
axes = self.axes
if self.is_module:
b = xp.transpose(a, axes)
else:
b = a.transpose(axes)
return b,
@pytest.mark.parametrize('shape,axes', [
((), (0,)),
((1,), (1,)),
((2, 3), (1,)),
((2, 3), (1, 0, 2)),
])
def test_transpose_invalid_axes(shape, axes):
a = array_utils.create_dummy_ndarray(chainerx, shape, 'float32')
with pytest.raises(chainerx.DimensionError):
chainerx.transpose(a, axes)
with pytest.raises(chainerx.DimensionError):
a.transpose(axes)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('in_shape,axis,start', [
# various axis
((2, 3, 4), 0, _unspecified),
((2, 3, 4), 1, _unspecified),
((2, 3, 4), 2, _unspecified),
((2, 3, 4), -1, _unspecified),
((2, 3, 4), -3, _unspecified),
# with start
((2, 3, 4), 1, 0),
((2, 3, 4), 1, 1),
((2, 3, 4), 1, 2),
((2, 3, 4), 1, 3),
((2, 3, 4), 1, -1),
((2, 3, 4), 1, -2),
((2, 3, 4), 1, -3),
((2, 3, 4), 2, 3),
((2, 3, 4), 2, 0),
((2, 3, 4), 0, 3),
((2, 3, 4), 0, 0),
# single dim
((1,), 0, _unspecified),
((1,), -1, _unspecified),
# zero-length dims
((0,), 0, _unspecified),
((0,), 0, 0),
((0,), 0, 1),
((0,), -1, _unspecified),
((2, 0, 3), 1, _unspecified),
((2, 0, 3), -2, _unspecified),
])
class TestRollaxis(op_utils.NumpyOpTest):
def setup(self, dtype):
# Skip backward/double-backward tests for int dtypes
if numpy.dtype(dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
self.dtype = dtype
def generate_inputs(self):
in_shape = self.in_shape
dtype = self.dtype
a = array_utils.create_dummy_ndarray(numpy, in_shape, dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
axis = self.axis
start = self.axis
if start is _unspecified:
b = xp.rollaxis(a, axis)
else:
b = xp.rollaxis(a, axis, start)
return b,
@pytest.mark.parametrize('in_shape,axis,start', [
# out of bounds axis
((2, 3, 4), 3, _unspecified),
((2, 3, 4), -4, _unspecified),
# out of bounds start
((2, 3, 4), 2, 4),
((2, 3, 4), 2, -4),
# empty shape
((), 0, _unspecified),
((), -1, _unspecified),
])
def test_rollaxis_invalid(in_shape, axis, start):
a = array_utils.create_dummy_ndarray(chainerx, in_shape, 'float32')
with pytest.raises(chainerx.DimensionError):
if start is _unspecified:
chainerx.rollaxis(a, axis)
else:
chainerx.rollaxis(a, axis, start)
_reshape_shape = [
((), ()),
((0,), (0,)),
((1,), (1,)),
((5,), (5,)),
((2, 3), (2, 3)),
((1,), ()),
((), (1,)),
((1, 1), ()),
((), (1, 1)),
((6,), (2, 3)),
((2, 3), (6,)),
((2, 0, 3), (5, 0, 7)),
((5,), (1, 1, 5, 1, 1)),
((1, 1, 5, 1, 1), (5,)),
((2, 3), (3, 2)),
((2, 3, 4), (3, 4, 2)),
((2, 3, 4), (3, -1, 2)),
((2, 3, 4), (3, -3, 2)), # -3 is treated as a -1 and is valid.
((2, 0, 3), (-1,)), # Empty to inferred.
((2, 0, 3), (-1, 4)), # Empty to inferred.
((2, 0, 3), (4, -1)), # Empty to inferred.
((2, 0, 3), (4, -1, 5)), # Empty to inferred.
]
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('a_shape,b_shape', _reshape_shape)
@chainer.testing.parameterize_pytest('shape_type', [tuple, list])
@chainer.testing.parameterize_pytest('contiguous', ['C', None])
@chainer.testing.parameterize_pytest('is_module', [True, False])
class TestReshape(op_utils.NumpyOpTest):
def generate_inputs(self):
a = array_utils.shaped_arange(self.a_shape, 'float64')
return a,
def forward_xp(self, inputs, xp):
a, = inputs
b_shape = self.b_shape
shape_type = self.shape_type
if self.is_module:
b = xp.reshape(a, shape_type(b_shape))
else:
b = a.reshape(shape_type(b_shape))
return b,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('a_shape,b_shape', _reshape_shape)
@chainer.testing.parameterize_pytest('shape_type', [tuple, list])
@chainer.testing.parameterize_pytest('contiguous', ['C', None])
@chainer.testing.parameterize_pytest('is_module', [True, False])
class TestReshapeCopied(op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
a = array_utils.shaped_arange(self.a_shape, 'float64')
return a,
def forward_xp(self, inputs, xp):
a, = inputs
b_shape = self.b_shape
shape_type = self.shape_type
if self.is_module:
b = xp.reshape(a, shape_type(b_shape))
else:
b = a.reshape(shape_type(b_shape))
if xp is chainerx:
copied = (
a._debug_data_memory_address
!= b._debug_data_memory_address)
else:
copied = a.ctypes.data != b.ctypes.data
if copied:
if xp is chainerx:
assert b.is_contiguous
else:
assert b.flags.c_contiguous
return xp.asarray(copied),
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('is_module', [True, False])
@chainer.testing.parameterize_pytest('a_shape,b_shape', _reshape_shape)
@chainer.testing.parameterize_pytest('contiguous', ['C', None])
class TestReshapeArg(op_utils.NumpyOpTest):
forward_accept_errors = (TypeError, chainerx.ChainerxError)
def setup(self):
if self.is_module and len(self.b_shape) > 1:
# Skipping tests where the 'order' argument is unintentionally
# given a shape value, since numpy won't raise any errors in this
# case which you might expect at first.
raise unittest.SkipTest(
'NumPy won\'t raise error for unintentional argument '
'unpacking')
def generate_inputs(self):
a = array_utils.shaped_arange(self.a_shape, 'float64')
return a,
def forward_xp(self, inputs, xp):
a, = inputs
b_shape = self.b_shape
if self.is_module:
# TypeError/chainerx.ChainerxError in case b_shape is empty.
b = xp.reshape(a, *b_shape)
else:
# TypeError/chainerx.ChainerxError in case b_shape is empty.
b = a.reshape(*b_shape)
if xp is chainerx:
if self.contiguous == 'C':
assert b.is_contiguous
assert (a._debug_data_memory_address
== b._debug_data_memory_address), (
'Reshape must be done without copy')
return b,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('a_shape,b_shape', _reshape_shape)
class TestFlatten(op_utils.NumpyOpTest):
forward_accept_errors = (TypeError, chainerx.ChainerxError)
check_numpy_strides_compliance = False
def generate_inputs(self):
a = array_utils.shaped_arange(self.a_shape, 'float64')
return a,
def forward_xp(self, inputs, xp):
a, = inputs
b = a.flatten()
return b,
@pytest.mark.parametrize('shape1,shape2', [
((), (0,)),
((), (2,)),
((), (1, 2,)),
((0,), (1,)),
((0,), (1, 1, 1)),
((2, 3), (2, 3, 2)),
((2, 3, 4), (2, 3, 5)),
])
def test_reshape_invalid(shape1, shape2):
def check(a_shape, b_shape):
a = array_utils.create_dummy_ndarray(chainerx, a_shape, 'float32')
with pytest.raises(chainerx.DimensionError):
a.reshape(b_shape)
check(shape1, shape2)
check(shape2, shape1)
@pytest.mark.parametrize('shape1,shape2', [
((2, 3, 4), (5, -1, 3)), # Not divisible.
((2, 3, 4), (-1, -1, 3)), # More than one dimension cannot be inferred.
((2, 3, 4), (-2, 4, -1)),
((2, 0, 4), (-1, 0)), # Empty to ambiguous.
((2, 0, 4), (0, -1)), # Empty to ambiguous.
((2, 0, 4), (0, -1, 2)), # Empty to ambiguous.
])
def test_reshape_invalid_cannot_infer(shape1, shape2):
a = array_utils.create_dummy_ndarray(chainerx, shape1, 'float32')
with pytest.raises(chainerx.DimensionError):
a.reshape(shape2)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,axis', [
((), None),
((0,), None),
((1,), None),
((1, 1), None),
((1, 0, 1), None),
((3,), None),
((3, 1), None),
((1, 3), None),
((2, 0, 3), None),
((2, 4, 3), None),
((2, 1, 3), 1),
((2, 1, 3), -2),
((1, 2, 1, 3, 1, 1, 4), None),
((1, 2, 1, 3, 1, 1, 4), (2, 0, 4)),
((1, 2, 1, 3, 1, 1, 4), (-2, 0, 4)),
])
@chainer.testing.parameterize_pytest('is_module', [True, False])
class TestSqueeze(op_utils.NumpyOpTest):
def generate_inputs(self):
a = array_utils.shaped_arange(self.shape, 'float32')
return a,
def forward_xp(self, inputs, xp):
a, = inputs
axis = self.axis
if self.is_module:
b = xp.squeeze(a, axis)
else:
b = a.squeeze(axis)
return b,
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('shape,axis', [
((2, 1, 3), 0),
((2, 1, 3), -1),
((2, 1, 3), (1, 2)),
((2, 1, 3), (1, -1)),
((2, 1, 3), (1, 1)),
])
def test_squeeze_invalid(is_module, xp, shape, axis):
a = xp.ones(shape, 'float32')
if is_module:
return xp.squeeze(a, axis)
else:
return a.squeeze(axis)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('src_shape,dst_shape', [
((), ()),
((1,), (2,)),
((1, 1), (2, 2)),
((1, 1), (1, 2)),
((2,), (3, 2)),
])
class TestBroadcastTo(op_utils.NumpyOpTest):
def generate_inputs(self):
a = array_utils.shaped_arange(self.src_shape, 'float32')
return a,
def forward_xp(self, inputs, xp):
a, = inputs
b = xp.broadcast_to(a, self.dst_shape)
return b,
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(chainerx.DimensionError, ValueError))
@pytest.mark.parametrize(('src_shape,dst_shape'), [
((3,), (2,)),
((3,), (3, 2)),
((1, 3), (3, 2)),
((3,), [2]), # shape as a list instead of tuple
])
def test_broadcast_to_invalid(xp, src_shape, dst_shape):
a = xp.ones(src_shape, 'float32')
return xp.broadcast_to(a, dst_shape)
def _make_inputs(shapes, dtypes):
# Generates input ndarrays.
assert isinstance(shapes, (list, tuple))
assert isinstance(dtypes, (list, tuple))
assert len(shapes) == len(dtypes)
inputs = []
for i, (shape, dtype) in enumerate(zip(shapes, dtypes)):
size = array_utils.total_size(shape)
a = numpy.arange(i * 100, i * 100 + size)
a = a.reshape(shape)
a = a.astype(dtype)
inputs.append(a)
assert len(inputs) > 0
return tuple(inputs)
class JoinTestBase(op_utils.NumpyOpTest):
chx_expected_dtype = None
dtypes = None
def setup(self):
# Skip backward/double-backward tests for int dtypes
if any(numpy.dtype(dt).kind != 'f' for dt in self.dtypes):
self.skip_backward_test = True
self.skip_double_backward_test = True
# TODO(niboshi): Fix strides for 0-size inputs
if any(0 in shape for shape in self.shapes):
self.check_numpy_strides_compliance = False
if any(dt == 'float16' for dt in self.dtypes):
self.check_backward_options.update({'rtol': 1e-3, 'atol': 1e-3})
def generate_inputs(self):
return _make_inputs(self.shapes, self.dtypes)
def join(self, inputs, xp):
# Calls and returns the result of joining routines e.g. xp.concatenate,
# xp.stack and xp.vstack.
raise NotImplementedError()
def forward_xp(self, inputs, xp):
b = self.join(inputs, xp)
if self.chx_expected_dtype is not None:
b = dtype_utils.cast_if_numpy_array(xp, b, self.chx_expected_dtype)
return b,
class ConcatenateTestBase(JoinTestBase):
axis = None
def join(self, inputs, xp):
if self.axis is _unspecified:
b = xp.concatenate(inputs)
else:
b = xp.concatenate(inputs, self.axis)
return b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shapes,axis', [
([(0,)], 0),
([(1,)], 0),
([(0,), (0,)], 0),
([(0,), (1,)], 0),
([(1,), (1,)], 0),
([(0, 0,), (0, 0,)], 0),
([(0, 0,), (0, 0,)], 1),
([(1, 0,), (1, 0,)], 0),
([(1, 0,), (1, 0,)], 1),
([(1, 0,), (1, 0,)], 2),
([(3, 4, 5)], 0),
([(2, 3, 1), (2, 3, 1)], 1),
([(2, 3, 2), (2, 4, 2), (2, 3, 2)], 1),
([(2, 3, 2), (2, 4, 2), (3, 3, 2)], 1),
([(4, 10), (5, 10)], 0),
([(4, 10), (4, 8)], 0),
([(4, 4), (5,)], 0),
([(4, 4), (4,)], 0),
([(2, 3), (2, 3)], 10),
([(2, 3), (2, 3)], -1),
([(2, 3), (2, 3)], None),
([(2, 3), (4, 5)], None),
([(2, 3), (4, 5, 1)], None),
([(2, 3), (4, 5, 1), (4,)], None),
([(2, 3), (2, 3)], _unspecified),
([(2, 3), (4, 5)], _unspecified),
])
class TestConcatenate(ConcatenateTestBase):
forward_accept_errors = (chainerx.DimensionError, ValueError)
def setup(self):
self.dtypes = ['float32'] * len(self.shapes)
super().setup()
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shapes,axis', [
([(0,), (0,)], 0),
([(0,), (1,)], 0),
([(1,), (1,)], 0),
([(0, 0,), (0, 0,)], 0),
([(0, 0,), (0, 0,)], 1),
([(1, 0,), (1, 0,)], 0),
([(1, 0,), (1, 0,)], 1),
([(2, 3, 1), (2, 3, 1)], 1),
([(4, 10), (5, 10)], 0),
([(2, 3), (2, 3)], None),
([(2, 3), (4, 5)], None),
([(2, 3), (4, 5, 1)], None),
([(2, 3), (2, 3)], _unspecified),
])
@chainer.testing.parameterize_pytest(
'dtypes,chx_expected_dtype', dtype_utils.result_dtypes_two_arrays)
class TestConcatenateTwoArraysMixedDtypes(ConcatenateTestBase):
pass
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shapes,axis', [
([(0,), (0,), (0,)], 0),
([(0,), (1,), (1,)], 0),
([(2, 3, 2), (2, 4, 2), (2, 3, 2)], 1),
([(2, 3), (4, 5), (4, 2)], None),
([(2, 3), (4, 5, 1), (4,)], None),
([(2, 3), (2, 3), (1, 3)], _unspecified),
])
@chainer.testing.parameterize_pytest(
'dtypes,chx_expected_dtype', dtype_utils.result_dtypes_three_arrays)
class TestConcatenateThreeArraysMixedDtypes(ConcatenateTestBase):
pass
def test_concatenate_insufficient_inputs():
with pytest.raises(chainerx.DimensionError):
chainerx.concatenate([])
class StackTestBase(JoinTestBase):
axis = None
def join(self, inputs, xp):
if self.axis is None:
b = xp.stack(inputs)
else:
b = xp.stack(inputs, self.axis)
return b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shapes,axis', [
([(0,)], -1),
([(0,)], 0),
([(0,)], 1),
([(0,)], 2),
([(1,)], -1),
([(1,)], 0),
([(1,)], 1),
([(1,)], 2),
([(0,), (0,)], 0),
([(0,), (0,)], 1),
([(0, 0,), (0, 0,)], 0),
([(0, 0,), (0, 0,)], 1),
([(1, 0,), (1, 0,)], 0),
([(1, 0,), (1, 0,)], 1),
([(1, 0,), (1, 0,)], 2),
([(2, 3,), (2, 3,)], None),
([(2, 3,), (2, 3,)], 1),
([(2, 3,), (2, 3,)], -1),
([(3, 4, 5), (3, 4, 5), (3, 4, 5)], None),
([(3, 4, 5), (3, 4, 5), (3, 4, 5)], 0),
([(3, 4, 5), (3, 4, 5), (3, 4, 5)], 1),
([(3, 4, 5), (3, 4, 5), (3, 4, 5)], 2),
([(3, 4, 5), (3, 4, 5), (3, 4, 5)], 3),
([(3, 4, 5), (3, 4, 5), (3, 4, 5)], 4),
([(3, 4, 5), (3, 4, 5), (3, 4, 5)], -1),
([(2, 3, 2), (2, 4, 2), (2, 3, 2)], 1),
])
class TestStack(StackTestBase):
forward_accept_errors = (chainerx.DimensionError, ValueError)
def setup(self):
self.dtypes = ['float32'] * len(self.shapes)
super().setup()
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,axis', [
((0,), 0),
((0,), 1),
((0, 0), 0),
((0, 0), 1),
((1, 0), 0),
((1, 0), 1),
((1, 0), 2),
((2, 3), None),
((2, 3), 1),
((2, 3), -1),
])
@chainer.testing.parameterize_pytest(
'dtypes,chx_expected_dtype', dtype_utils.result_dtypes_two_arrays)
class TestStackTwoArraysMixedDtypes(StackTestBase):
def setup(self):
self.shapes = (self.shape, self.shape)
super().setup()
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,axis', [
((3, 4, 5), None),
((3, 4, 5), 0),
((3, 4, 5), 1),
((3, 4, 5), 2),
((3, 4, 5), 3),
((3, 4, 5), -1),
])
@chainer.testing.parameterize_pytest(
'dtypes,chx_expected_dtype', dtype_utils.result_dtypes_three_arrays)
class TestStackThreeArraysMixedDtypes(StackTestBase):
def setup(self):
self.shapes = (self.shape, self.shape, self.shape)
super().setup()
def test_stack_insufficient_inputs():
with pytest.raises(chainerx.DimensionError):
chainerx.stack([])
with pytest.raises(chainerx.DimensionError):
chainerx.stack([], 0)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,indices_or_sections,axis', [
((2,), 1, 0),
((2,), [], 0),
((2,), [1, 2], 0),
((2,), [-5, -3], 0),
((2, 4), 1, 0),
((2, 4), 2, 1),
((2, 4), 2, -1),
((2, 4, 6), [], 0),
((2, 4, 6), [2, 4], 2),
((2, 4, 6), [2, -3], 2),
((2, 4, 6), [2, 8], 2),
((2, 4, 6), [4, 2], 2),
((2, 4, 6), [1, 3], -2),
((6,), numpy.array([1, 2]), 0), # indices with 1-d numpy array
((6,), numpy.array([2]), 0), # indices with (1,)-shape numpy array
((6,), numpy.array(2), 0), # sections numpy scalar
((6,), numpy.array(2.0), 0), # sections with numpy scalar, float
((6,), 2.0, 0), # float type sections, without fraction
# indices with empty numpy indices
((6,), numpy.array([], numpy.int32), 0),
((6,), numpy.array([], numpy.float64), 0),
])
class TestSplit(op_utils.NumpyOpTest):
def setup(self):
# TODO(niboshi): There's a bug in backward of split() in which the
# gradient shape differs from the input if indices are not in the
# sorted order. Fix this.
indices_or_sections = self.indices_or_sections
if (isinstance(indices_or_sections, list) and
sorted(indices_or_sections) != indices_or_sections):
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
a = array_utils.create_dummy_ndarray(numpy, self.shape, 'float32')
return a,
def forward_xp(self, inputs, xp):
a, = inputs
b = xp.split(a, self.indices_or_sections, self.axis)
assert isinstance(b, list)
return tuple(b)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('shape,indices_or_sections,axis', [
((7, 0), [2, 5], 0),
((0, 6), 3, 1),
])
def test_split_zero_sized_no_offset(device, shape, indices_or_sections, axis):
# An (sub-)array of size 0 should always have 0 offset.
a = chainerx.random.uniform(-1, 1, shape)
assert a.offset == 0 # Test pre-condition.
b = chainerx.split(a, indices_or_sections, axis)
assert all(bi.offset == 0 for bi in b)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.DimensionError, IndexError, ValueError, TypeError,
ZeroDivisionError))
@pytest.mark.parametrize('shape,indices_or_sections,axis', [
((), 1, 0),
((2,), 3, 0),
((2, 4), 0, 0),
((2, 4), -1, 1),
((2, 4), 1, 2), # Axis out of range.
((2, 4), 3, 1), # Uneven split.
((6,), [2.0], 0), # float type indices
((6,), 2.1, 0), # float type sections, with fraction
# indices with (1,)-shape numpy array, float
((6,), numpy.array([2.0]), 0),
# sections with numpy scalar, float with fraction
((6,), numpy.array(2.1), 0),
((2,), [1, 2.0], 0), # indices with mixed type
((6,), '2', 0), # Invalid type
# indices with empty numpy indices
((6,), numpy.array([[], []], numpy.int32), 0),
((6,), numpy.array([[], []], numpy.float64), 0),
])
def test_split_invalid(xp, shape, indices_or_sections, axis):
a = array_utils.create_dummy_ndarray(xp, shape, 'float32')
return xp.split(a, indices_or_sections, axis)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,indices_or_sections', [
((2, 4, 6), []),
((2, 4, 6), [2, 4]),
((2, 4, 6), [2, -3]),
((2, 4, 6), [2, 8]),
((2, 4, 6), [4, 2]),
((2, 4, 6), [1, 3]),
((2, 4, 5), [1, -3]),
((2, 4, 8), [2, 5]),
((2, 4, 10), [1, 4]),
((2, 4, 6), numpy.array([1, 2])), # indices with 1-d numpy array
((2, 4, 6), numpy.array([2])), # indices with (1,)-shape numpy array
((2, 4, 8), numpy.array(2)), # sections numpy scalar
((2, 4, 6, 8), numpy.array(2.0)), # sections with numpy scalar, float
((2, 4, 6, 8), 2.0), # float type sections, without fraction
# indices with empty numpy indices
((2, 4, 8, 10), numpy.array([], numpy.int32)),
((2, 4, 5, 10), numpy.array([], numpy.float64)),
])
class TestDSplit(op_utils.NumpyOpTest):
def setup(self):
# TODO(niboshi): There's a bug in backward of split() in which the
# gradient shape differs from the input if indices are not in the
# sorted order. Fix this.
indices_or_sections = self.indices_or_sections
if (isinstance(indices_or_sections, list) and
sorted(indices_or_sections) != indices_or_sections):
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
a = array_utils.create_dummy_ndarray(numpy, self.shape, 'float32')
return a,
def forward_xp(self, inputs, xp):
a, = inputs
b = xp.dsplit(a, self.indices_or_sections)
assert isinstance(b, list)
return tuple(b)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,indices_or_sections', [
((6, 4, 2), [1, 2, 4]),
((6, 4, 2), [2, 4, 6]),
((6, 4, 2), [1, 5, 7]),
((6, 4, 2), [2, -3, -5]),
((6, 4, 2), [2, 8, 10]),
((8, 6, 4, 2), [2, 4, 4, 6]),
((8, 6, 4, 2), [2, 6, 6, 8]),
((8, 6, 4, 2), [1, 4, 4, 6]),
((8, 6, 4, 2), [1, 4, 5, 6]),
((8, 6, 6, 4, 4, 2), [1, 3, 5, 7]),
# indices with 1-d numpy array
((8, 6, 4, 2), numpy.array([1, 2, 3])),
# indices with (4,)-shape numpy array
((8, 6, 4, 2), numpy.array([1, 2, 3, 4])),
((8, 8, 6, 6, 4, 2), numpy.array([1, 2, 3], numpy.int32)),
])
class TestVSplit(op_utils.NumpyOpTest):
def setup(self):
# TODO(ishanrai05): There's a bug in backward of split() in which the
# gradient shape differs from the input if indices are not in the
# sorted order. Fix this.
indices_or_sections = self.indices_or_sections
if (isinstance(indices_or_sections, list) and
sorted(indices_or_sections) != indices_or_sections):
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
a = array_utils.create_dummy_ndarray(numpy, self.shape, 'float32')
return a,
def forward_xp(self, inputs, xp):
a, = inputs
b = xp.vsplit(a, self.indices_or_sections)
assert isinstance(b, list)
return tuple(b)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,indices_or_sections', [
((2,), []),
((2, 4), 2),
((2, 4), [2, -3]),
((2, 6, 4), [2, 8]),
((2, 8, 4), [2, 5]),
((2, 5, 4), [1, -3]),
((2, 10, 4), [1, 4]),
((2, 6, 4), [4, 2, 1]),
((2, 6, 4), [1, 3, -2]),
((2, 6, 4), numpy.array([1, 2])), # indices with 1-d numpy array
((2, 6, 4), numpy.array([2])), # indices with (1,)-shape numpy array
((2, 8, 4), numpy.array(2)), # sections numpy scalar
((2, 6, 4, 8), numpy.array(2.0)), # sections with numpy scalar, float
((2, 6, 4, 8), 2.0), # float type sections, without fraction
# indices with empty numpy indices
((2, 8, 4, 10), numpy.array([], numpy.int32)),
((2, 5, 4, 10), numpy.array([], numpy.float64)),
])
class TestHSplit(op_utils.NumpyOpTest):
def setup(self):
# TODO(ishanrai05): There's a bug in backward of split() in which the
# gradient shape differs from the input if indices are not in the
# sorted order. Fix this.
indices_or_sections = self.indices_or_sections
if (isinstance(indices_or_sections, list) and
sorted(indices_or_sections) != indices_or_sections):
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
a = array_utils.create_dummy_ndarray(numpy, self.shape, 'float32')
return a,
def forward_xp(self, inputs, xp):
a, = inputs
b = xp.hsplit(a, self.indices_or_sections)
assert isinstance(b, list)
return tuple(b)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.DimensionError, IndexError, ValueError, TypeError,
ZeroDivisionError))
@pytest.mark.parametrize('shape,indices_or_sections', [
((), 1), # Empty Shape
((2,), 0), # Zero Section
((2, 6, 4), -1), # Negative section
((2, 6, 4), 3), # Uneven split
((2, 6, 4), [2.0]), # float type indices
((2, 6, 4), 3.1), # float type section with fraction
((2, 6, 4), '4'), # Invalid type
])
def test_hsplit_invalid(xp, shape, indices_or_sections):
a = array_utils.create_dummy_ndarray(xp, shape, 'float32')
return xp.hsplit(a, indices_or_sections)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,axis1,axis2', [
((1, 1), 0, 1),
((2, 4), -1, 1),
((1, 2, 2), 0, 1),
((1, 2, 3, 4), 0, 2),
((3, 2, 1, 2, 3), 0, 4),
((1, 2, 4, 3, 1), 0, -2),
((1, 2, 4, 2, 1), 0, 0),
((1, 3, 3, 1), -1, -4),
])
@chainer.testing.parameterize_pytest('is_module', [True, False])
class TestSwapaxes(op_utils.NumpyOpTest):
def setup(self, dtype):
# Skip backward/double-backward tests for int dtypes
if numpy.dtype(dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
self.dtype = dtype
def generate_inputs(self):
a = array_utils.create_dummy_ndarray(numpy, self.shape, self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
if self.is_module:
b = xp.swapaxes(a, self.axis1, self.axis2)
else:
b = a.swapaxes(self.axis1, self.axis2)
return b,
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.DimensionError, numpy.AxisError))
@pytest.mark.parametrize('shape,axis1,axis2', [
# Axis out of range.
((), 1, 0),
((2,), 3, 0),
((2, 4), 1, 2),
((1, 1, 2), -1, -4)
])
def test_swap_invalid(xp, shape, axis1, axis2):
a = array_utils.create_dummy_ndarray(xp, shape, 'float32')
return xp.swapaxes(a, axis1, axis2)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,repeats,axis', [
((4,), 1, None),
((4,), 2, None),
((4, 2), 2, None),
((4,), 1, 0),
((4,), 2, 0),
((4,), (1,), None),
((4,), (2,), None),
((4, 2), (2,), None),
((4,), (1,), 0),
((4,), (2,), 0),
((2,), (1, 2), 0),
((2,), (0, 2), 0),
((4, 2), 2, 0),
((4, 2), 2, 1),
((4, 2), 2, -1),
((4, 2), 2, -2),
((2, 4), (1, 2), 0),
((4, 2), (1, 2), 1),
((2, 4), [1, 2], 0),
((4, 2), [1, 2], 1),
((2, 4), numpy.array([1, 2]), numpy.array(0)),
((4, 2), numpy.array([1, 2]), numpy.array(1)),
])
@chainer.testing.parameterize_pytest('is_module', [True, False])
class TestRepeat(op_utils.NumpyOpTest):
def setup(self, dtype):
# Skip backward/double-backward tests for int dtypes
if numpy.dtype(dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
self.dtype = dtype
if dtype == 'float16':
self.check_backward_options.update({'rtol': 1e-3, 'atol': 1e-3})
def generate_inputs(self):
a = array_utils.create_dummy_ndarray(numpy, self.shape, self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
if self.is_module:
b = xp.repeat(a, self.repeats, self.axis)
else:
b = a.repeat(self.repeats, self.axis)
return b,
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('shape,repeats,axis', [
# Axis out of range.
((1,), 1, 1),
((1, 1), (1, 2), 0),
((1, 1), (1, -2), 0),
((1, 1), (1, 2), -3),
((1, 1), (1, 2), 2),
])
def test_repeat_invalid(xp, shape, repeats, axis):
a = array_utils.create_dummy_ndarray(xp, shape, 'float32')
return xp.repeat(a, repeats, axis)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(2, 2, 2)],
'axis': [*range(3 + 1)] + [*range(-1, -3 - 1, -1)],
})
+ chainer.testing.product({
'shape': [(3, 3, 2, 3, 3)],
'axis': [*range(5 + 1)] + [*range(-1, -5 - 1, -1)],
})
+ chainer.testing.product({
'shape': [(3, 0, 2, 0, 3)],
'axis': [*range(5 + 1)] + [*range(-1, -5 - 1, -1)],
})
+ chainer.testing.product({
'shape': [(1, 2, 3, 1, 3, 3)],
'axis': [*range(6 + 1)] + [*range(-1, -6 - 1, -1)],
})
+ chainer.testing.product({
'shape': [(3, 4, 5, 2, 3, 5)],
'axis': [*range(6 + 1)] + [*range(-1, -6 - 1, -1)],
})
+ chainer.testing.product({
'shape': [(1,)],
'axis': [*range(1 + 1)] + [*range(-1, -1 - 1, -1)],
})
))
@chainer.testing.parameterize_pytest('is_contiguous', [True, False])
class TestExpandDims(op_utils.NumpyOpTest):
# TODO(kshitij12345): Remove this when fixed
check_numpy_strides_compliance = False
def setup(self, dtype):
# Skip backward/double-backward tests for int dtypes
if numpy.dtype(dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
self.dtype = dtype
if dtype == 'float16':
self.check_backward_options.update({'rtol': 1e-3, 'atol': 1e-3})
def generate_inputs(self):
a = array_utils.create_dummy_ndarray(numpy, self.shape, self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
if self.is_contiguous:
a = a.copy()
y = xp.expand_dims(a, self.axis)
# Result should be a view, not a copy.
if xp is chainerx:
assert y.data_ptr == a.data_ptr
return y,
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.DimensionError, numpy.AxisError, DeprecationWarning))
@pytest.mark.parametrize('shape,axis', [
# Axis out of range.
((), 1),
((2,), 3),
((2,), -3),
((2, 4), 4),
((1, 1, 2), -4)
])
def test_expand_dims_invalid(xp, shape, axis):
with warnings.catch_warnings():
warnings.simplefilter('error', DeprecationWarning)
a = array_utils.create_dummy_ndarray(xp, shape, 'float32')
return xp.expand_dims(a, axis)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Single axis and None
chainer.testing.product({
'shape': [()],
'axis': [*range(0)] + [None] + [*range(-0, -0, -1)],
})
+ chainer.testing.product({
'shape': [(0,)],
'axis': [*range(1)] + [None] + [*range(-1, -1, -1)],
})
+ chainer.testing.product({
'shape': [(2, 2, 2)],
'axis': [*range(3)] + [None] + [*range(-1, -3, -1)],
})
+ chainer.testing.product({
'shape': [(3, 3, 2, 3, 3)],
'axis': [*range(5)] + [None] + [*range(-1, -5, -1)],
})
+ chainer.testing.product({
'shape': [(3, 0, 2, 0, 3)],
'axis': [*range(5)] + [None] + [*range(-1, -5, -1)],
})
+ chainer.testing.product({
'shape': [(1, 2, 3, 1, 3, 3)],
'axis': [*range(6)] + [None] + [*range(-1, -6, -1)],
})
+ chainer.testing.product({
'shape': [(3, 4, 5, 2, 3, 5)],
'axis': [*range(6)] + [None] + [*range(-1, -6, -1)],
})
+ chainer.testing.product({
'shape': [(1,)],
'axis': [*range(1)] + [None] + [*range(-1, -1, -1)],
})
# Multiple axes
+ chainer.testing.product({
'shape': [(1, 3, 4)],
'axis': powerset([*range(3)]) + powerset([*range(-1, -3, -1)]),
})
+ chainer.testing.product({
'shape': [(3, 0, 2)],
'axis': powerset([*range(3)]) + powerset([*range(-1, -3, -1)]),
})
+ chainer.testing.product({
'shape': [(1,)],
'axis': powerset([*range(1)]) + powerset([*range(-1, -1, -1)]),
})
+ chainer.testing.product({
'shape': [(0,)],
'axis': powerset([*range(1)]) + powerset([*range(-1, -1, -1)]),
})
))
@chainer.testing.parameterize_pytest('contiguous', ['C', None])
class TestFlip(op_utils.NumpyOpTest):
def setup(self, dtype):
# TODO(kshitij12345) : Remove when #6621 is in.
if numpy.dtype(dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
self.dtype = dtype
if dtype == 'float16':
self.check_backward_options.update({'rtol': 1e-3, 'atol': 1e-3})
def generate_inputs(self):
a = array_utils.uniform(self.shape, self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
return xp.flip(a, self.axis),
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.DimensionError, numpy.AxisError))
@pytest.mark.parametrize('shape,axis', [
# Axis out of range.
((), 1),
((2,), 3),
((2,), -3),
((2, 4), 4),
((1, 1, 2), -4),
((1, 1, 2), (0, 4)),
((1, 1, 2), (0, -6)),
])
def test_flip_invalid(xp, shape, axis):
a = array_utils.create_dummy_ndarray(xp, shape, 'float32')
return xp.flip(a, axis)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(2, 2, 2)],
})
+ chainer.testing.product({
'shape': [(2, 1, 3)],
})
+ chainer.testing.product({
'shape': [(0, 1, 3, 4)],
})
+ chainer.testing.product({
'shape': [(1, 0, 3, 4)],
})
+ chainer.testing.product({
'shape': [(1, 0, 3, 4, 0)],
})
))
@chainer.testing.parameterize_pytest('contiguous', ['C', None])
@chainer.testing.parameterize_pytest('func_name', [
'fliplr',
'flipud'
])
class TestFlipLRUD(op_utils.NumpyOpTest):
def setup(self, dtype):
# TODO(kshitij12345) : Remove when #6621 is in.
if numpy.dtype(dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
self.dtype = dtype
if dtype == 'float16':
self.check_backward_options.update({'rtol': 1e-3, 'atol': 1e-3})
def generate_inputs(self):
a = array_utils.uniform(self.shape, self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
if self.func_name == 'fliplr':
b = xp.fliplr(a)
elif self.func_name == 'flipud':
b = xp.flipud(a)
return b,
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('shape', [
(),
(1,),
(10,),
])
def test_fliplr_invalid(xp, shape):
a = array_utils.create_dummy_ndarray(xp, shape, 'float32')
return xp.fliplr(a)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('shape', [
(),
])
def test_flipud_invalid(xp, shape):
a = array_utils.create_dummy_ndarray(xp, shape, 'float32')
return xp.flipud(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({'shapes': [
[(1,)],
[(0,), (0,)],
[(0, 0,), (0, 0,)],
[(1, 0,), (1, 0,)],
[(3, 4, 5), (3, 4, 5), (3, 4, 5)],
[(2, 3, 2), (2, 3, 2), (2, 3, 2)],
[(1, 0, 1), (1, 0, 1), (1, 0, 1)],
[(2, 0, 0), (2, 0, 0), (2, 0, 0)],
[(1, 0, 1, 0), (1, 0, 1, 0), (1, 0, 1, 0)],
[(0, 0, 0, 0), (0, 0, 0, 0), (0, 0, 0, 0)],
[(2, 2, 2, 2), (2, 2, 2, 2), (2, 2, 2, 2)],
], 'func_name': [
'hstack', 'vstack', 'dstack'
],
'dtype': chainerx.testing.dtypes.all_dtypes
})
))
class TestHVDStack(op_utils.NumpyOpTest):
dtypes = None
def setup(self):
if numpy.dtype(self.dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
return _make_inputs(self.shapes, [self.dtype] * len(self.shapes))
def forward_xp(self, inputs, xp):
if self.func_name == 'hstack':
y = xp.hstack(inputs)
elif self.func_name == 'vstack':
y = xp.vstack(inputs)
elif self.func_name == 'dstack':
y = xp.dstack(inputs)
return y,
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('shape', [
[(2, 1), (1, 2)],
[(1, 1, 1), (2, 3, 4)],
[(2, 1, 4), (1, 4, 5)],
[(1, 1, 2), (3, 5, 8)]
])
@pytest.mark.parametrize('func_name', [
'hstack', 'vstack', 'dstack'
])
def test_hvdstack_invalid_shapes(func_name, xp, shape):
inputs = _make_inputs(shape, ['float32'] * len(shape))
inputs = [xp.array(a) for a in inputs]
if func_name == 'hstack':
b = xp.hstack(inputs)
elif func_name == 'vstack':
b = xp.vstack(inputs)
elif func_name == 'dstack':
b = xp.dstack(inputs)
return b
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('func_name', [
'hstack', 'vstack', 'dstack'
])
def test_hvdstack_invalid_empty(func_name, xp):
inputs = []
if func_name == 'hstack':
output = xp.hstack(inputs)
elif func_name == 'vstack':
output = xp.vstack(inputs)
elif func_name == 'dstack':
output = xp.dstack(inputs)
return output
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({'shapes': [
(1,),
(1, 1),
(1, 1, 1),
(2, 2, 2, 2),
],
'dtype': chainerx.testing.dtypes.all_dtypes
})
))
class TestAtLeast2d(op_utils.NumpyOpTest):
dtypes = None
def setup(self):
if numpy.dtype(self.dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
a = numpy.random.uniform(0, 1, self.shapes).astype(self.dtype)
return a,
def forward_xp(self, input, xp):
x, = input
y = xp.atleast_2d(x)
return y,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({'shapes': [
(1,),
(1, 1),
(1, 1, 1),
(2, 2, 2, 2),
],
'dtype': chainerx.testing.dtypes.all_dtypes
})
))
class TestAtLeast3d(op_utils.NumpyOpTest):
dtypes = None
def setup(self):
if numpy.dtype(self.dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
a = numpy.random.uniform(0, 1, self.shapes).astype(self.dtype)
return a,
def forward_xp(self, input, xp):
x, = input
y = xp.atleast_3d(x)
return y,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# sequence args
chainer.testing.product({'arr_shape': [
(1, 2, 3)
],
'src': list(itertools.permutations(range(3), 3)),
'dst': list(itertools.permutations(range(3), 3)),
'dtype': chainerx.testing.dtypes.all_dtypes
}) + chainer.testing.product({'arr_shape': [
(1, 2, 3)
],
'src': list(itertools.permutations(range(3), 2)),
'dst': list(itertools.permutations(range(3), 2)),
'dtype': chainerx.testing.dtypes.all_dtypes
})
+ chainer.testing.product({'arr_shape': [
(1, 2, 3)
],
'src': list(itertools.permutations(range(3), 1)),
'dst': list(itertools.permutations(range(3), 1)),
'dtype': chainerx.testing.dtypes.all_dtypes
})
# negative axis
+ chainer.testing.product({'arr_shape': [
(1, 2, 3)
],
'src': list(itertools.permutations(range(0, -3, -1), 3)),
'dst': list(itertools.permutations(range(0, -3, -1), 3)),
'dtype': chainerx.testing.dtypes.all_dtypes
}) + chainer.testing.product({'arr_shape': [
(1, 2, 3)
],
'src': list(itertools.permutations(range(0, -3, -1), 2)),
'dst': list(itertools.permutations(range(0, -3, -1), 2)),
'dtype': chainerx.testing.dtypes.all_dtypes
})
+ chainer.testing.product({'arr_shape': [
(1, 2, 3)
],
'src': list(itertools.permutations(range(0, -3, -1), 1)),
'dst': list(itertools.permutations(range(0, -3, -1), 1)),
'dtype': chainerx.testing.dtypes.all_dtypes
})
# empty
+ chainer.testing.product({'arr_shape': [
(1, 2, 3)
],
'src': [()],
'dst': [()],
'dtype': chainerx.testing.dtypes.all_dtypes
})
# integer args
+ chainer.testing.product({'arr_shape': [
(1, 2, 3)
],
'src': list(range(3)),
'dst': list(range(3)),
'dtype': chainerx.testing.dtypes.all_dtypes
})
))
class TestMoveaxis(op_utils.NumpyOpTest):
dtypes = None
def setup(self):
if numpy.dtype(self.dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
a = numpy.random.uniform(0, 1, self.arr_shape).astype(self.dtype)
return a,
def forward_xp(self, input, xp):
x, = input
y = xp.moveaxis(x, self.src, self.dst)
return y,
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('shape,source,dst', [
# differing src, dst
((1, 2, 3), (1, 2), (2,)),
# out of bounds
((1, 2, 3), (1, 4), (2, 0)),
((1, 2, 3), (1, 2), (-4, 0)),
# duplicate
((1, 2, 3), (1, 1), (2, 0)),
((1, 2, 3), (1, 2), (2, 2)),
])
def test_moveaxis_invalid(xp, shape, source, dst):
a = array_utils.uniform(shape, 'float')
a = xp.array(a)
return xp.moveaxis(a, source, dst)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({'dst_shape,src_shape,where_shape': [
# Same Shapes
((2, 3), (2, 3), (2, 3)),
# Broadcast Shapes
((2, 3), (1, 3), (1, 3)),
((2, 3), (2, 1), (1, 3)),
((2, 3), (2, 3), (1, 3)),
((4, 5), (4, 1), (1, 5)),
((1, 4, 5), (1, 4, 1), (1, 1, 5)),
((2, 3), (2, 3), (2, 3)),
# Omit where
((2, 3), (2, 3), None),
],
'in_dtypes,out_dtype': dtype_utils.result_numeric_dtypes_two_arrays,
'casting': ['no'],
})
))
class TestCopyTo(op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
check_numpy_strides_compliance = False
forward_accept_errors = (TypeError, chainerx.DtypeError)
def generate_inputs(self):
dst_dtype, src_dtype = self.in_dtypes
dst = array_utils.uniform(self.dst_shape, dst_dtype)
src = array_utils.uniform(self.src_shape, src_dtype)
where = array_utils.uniform(
self.where_shape if self.where_shape is not None else (1,),
'float32', 0, 1) > 0.5
return dst, src, where
def forward_xp(self, inputs, xp):
dst, src, where = inputs
if xp is chainerx:
dst = dst.as_grad_stopped().copy()
src = src.as_grad_stopped()
where = where.as_grad_stopped()
else:
dst = dst.copy()
kwargs = {}
if self.casting is not None:
kwargs['casting'] = self.casting
if self.where_shape is not None:
kwargs['where'] = where
xp.copyto(dst, src, **kwargs)
return dst,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'where': [True, False, 2, 1.2],
})
))
class TestCopyToScalarWhere(op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
check_numpy_strides_compliance = False
def generate_inputs(self):
dst = array_utils.uniform((2, 3), 'float32')
src = array_utils.uniform((2, 3), 'float32')
return dst, src
def forward_xp(self, inputs, xp):
dst, src = inputs
if xp is chainerx:
dst = dst.as_grad_stopped().copy()
src = src.as_grad_stopped()
else:
dst = dst.copy()
xp.copyto(dst, src, casting='no', where=self.where)
return dst,
def test_copyto_invalid_casting():
a = array_utils.create_dummy_ndarray(chainerx, (2, 3), 'float32')
b = array_utils.create_dummy_ndarray(chainerx, (3,), 'float32')
with pytest.raises(ValueError):
chainerx.copyto(a, b, casting='some_invalid_casting')
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('a_shape,b_shape', _reshape_shape)
@chainer.testing.parameterize_pytest('is_module', [True, False])
class TestRavel(op_utils.NumpyOpTest):
forward_accept_errors = (TypeError, chainerx.ChainerxError)
check_numpy_strides_compliance = False
def generate_inputs(self):
a = array_utils.shaped_arange(self.a_shape, 'float64')
return a,
def forward_xp(self, inputs, xp):
a, = inputs
if self.is_module:
b = xp.ravel(a)
else:
b = a.ravel()
return b,
| 49,344
| 28.887947
| 79
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_misc.py
|
import chainer
import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import math_utils
from chainerx_tests import op_utils
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0,), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan'), -1, 0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestSqrt(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.sqrt(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.numeric_dtypes),
'input': ['random'],
'contiguous': [None, 'C'],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.float_dtypes),
'input': [float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestSquare(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.square(a)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_square_invalid_dtypes(device):
shape = (3, 2)
bool_array = chainerx.array(array_utils.uniform(shape, 'bool_'))
with pytest.raises(chainerx.DtypeError):
chainerx.square(bool_array)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': ['random'],
'contiguous': [None, 'C'],
'is_module': [True, False],
})
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
'is_module': [True, False],
})
))
class TestAbs(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def func(self, xp, a):
# Check correct alias.
assert chainerx.abs is chainerx.absolute
# Check computed result.
if self.is_module:
return xp.abs(a)
else:
return abs(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [-2.5, -1.5, -0.1, 0.1, 1.5, 2.5],
'contiguous': [None, 'C'],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestFabs(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.fabs(a)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0.5),
numpy.asarray(-1.2),
numpy.asarray(10.9),
numpy.asarray(-10.6),
numpy.asarray(0.),
numpy.asarray(float('inf')),
numpy.asarray(-float('inf')),
numpy.asarray(float('nan')),
numpy.full((), 2.1),
numpy.full((0,), 2),
numpy.full((2, 3), 0),
numpy.full((2, 3), 2.6),
numpy.full((1, 1), -1.01),
numpy.full((1, 1), 1.99),
])
@pytest.mark.parametrize('dtypes', [
(('int8',), 'int8'),
(('int16',), 'int16'),
(('int32',), 'int32'),
(('int64',), 'int64'),
(('float16',), 'float16'),
(('float32',), 'float32'),
(('float64',), 'float64'),
])
def test_sign(xp, device, input, dtypes):
(in_dtype, ), out_dtype = dtypes
a = xp.array(input.astype(in_dtype))
return xp.sign(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.all_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': dtype_utils.result_comparable_dtypes_two_arrays,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.all_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
# TODO(aksub99): Add tests for inf and NaN.
))
class TestMaximum(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def generate_inputs(self):
for _ in range(10):
a, b = super().generate_inputs()
if ((a.dtype.kind in 'biu' and b.dtype.kind in 'biu') or
(numpy.abs(a - b) > 0.01).all()):
return a, b
assert False, 'Couldn\'t construct a test case.'
def func(self, xp, a, b):
return xp.maximum(a, b)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtype', chainerx.testing.numeric_dtypes)
def test_maximum_invalid_dtypes(device, dtype):
shape = (3, 2)
bool_array = chainerx.array(array_utils.uniform(shape, 'bool_'))
numeric_array = chainerx.array(array_utils.uniform(shape, dtype))
with pytest.raises(chainerx.DtypeError):
chainerx.maximum(bool_array, numeric_array)
with pytest.raises(chainerx.DtypeError):
chainerx.maximum(numeric_array, bool_array)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': (
dtype_utils.result_comparable_dtypes_array_scalar),
'input': ['random'],
'scalar_value': [0, 1],
'is_scalar_rhs': [False],
})
# Differentiable cases
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': (
dtype_utils.result_comparable_dtypes_array_scalar),
'input': [numpy.array([1, 3, 3, 4])],
'scalar_value': [0, 2, 5],
'is_scalar_rhs': [False, True],
})
# Non-differentiable cases
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': (
dtype_utils.result_comparable_dtypes_array_scalar),
'input': [numpy.array([1, 3, 3, 4])],
'scalar_value': [1, 3, 4],
'is_scalar_rhs': [False, True],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
# Special float values
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': (
dtype_utils.result_float_dtypes_array_scalar),
# TODO(imanishi): Add test for NaN.
'input': [numpy.array([0, float('inf'), -float('inf')])],
'scalar_value': [-1, 0, 1, float('inf'), -float('inf')],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestMaximumScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def func_scalar(self, xp, a, scalar):
if self.is_scalar_rhs:
return xp.maximum(a, scalar)
else:
return xp.maximum(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': dtype_utils.result_comparable_dtypes_two_arrays,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.all_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
# TODO(aksub99): Add tests for inf and NaN.
))
class TestMinimum(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def generate_inputs(self):
for _ in range(10):
a, b = super().generate_inputs()
if ((a.dtype.kind in 'biu' and b.dtype.kind in 'biu') or
(numpy.abs(a - b) > 0.01).all()):
return a, b
assert False, 'Couldn\'t construct a test case.'
def func(self, xp, a, b):
return xp.minimum(a, b)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtype', chainerx.testing.numeric_dtypes)
def test_minimum_invalid_dtypes(device, dtype):
shape = (3, 2)
bool_array = chainerx.array(array_utils.uniform(shape, 'bool_'))
numeric_array = chainerx.array(array_utils.uniform(shape, dtype))
with pytest.raises(chainerx.DtypeError):
chainerx.minimum(bool_array, numeric_array)
with pytest.raises(chainerx.DtypeError):
chainerx.minimum(numeric_array, bool_array)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': (
dtype_utils.result_comparable_dtypes_array_scalar),
'input': ['random'],
'scalar_value': [1],
'is_scalar_rhs': [False],
})
# Differentiable cases
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': (
dtype_utils.result_comparable_dtypes_array_scalar),
'input': [numpy.array([1, 3, 3, 4])],
'scalar_value': [0, 2, 5],
'is_scalar_rhs': [False, True],
})
# Non-differentiable cases
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': (
dtype_utils.result_comparable_dtypes_array_scalar),
'input': [numpy.array([1, 3, 3, 4])],
'scalar_value': [1, 3, 4],
'is_scalar_rhs': [False, True],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
# Special float values
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': (
dtype_utils.result_float_dtypes_array_scalar),
# TODO(imanishi): Add test for NaN.
'input': [numpy.array([0, float('inf'), -float('inf')])],
'scalar_value': [-1, 0, 1, float('inf'), -float('inf')],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestMinimumScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def func_scalar(self, xp, a, scalar):
if self.is_scalar_rhs:
return xp.minimum(a, scalar)
else:
return xp.minimum(scalar, a)
| 12,857
| 32.310881
| 79
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_rnn.py
|
import chainer
import chainerx
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import op_utils
n_step_lstm_dtypes_valid = dtype_utils._permutate_dtype_mapping([
# Floats.
(('float16', ), ()),
(('float32', ), ()),
(('float64', ), ()),
])
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'n_layers,hidden_size,input_size,batches', [
(2, 2, 1, (1, 1, 1)),
(2, 2, 3, (3, 2, 1)),
(3, 8, 4, (4, 2, 1)),
(4, 12, 4, (4, 3, 2)),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes, out_dtype', n_step_lstm_dtypes_valid)
])
))
class TestNStepLstm(op_utils.ChainerOpTest):
def setup(self):
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 5e-3, 'atol': 5e-2})
if self.in_dtypes[0] == 'float16':
self.check_forward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_double_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
device = chainerx.get_default_device()
if device.backend.name == 'cuda':
if self.in_dtypes[0] != 'float32':
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
h_shape = (self.n_layers, self.batches[0], self.hidden_size)
dtype = self.in_dtypes[0]
h = array_utils.uniform(h_shape, dtype)
c = array_utils.uniform(h_shape, dtype)
in_size = self.input_size
out_size = self.hidden_size
xs = [array_utils.uniform((self.batches[b], in_size), dtype)
for b in range(len(self.batches))]
def w_in(i, j):
return in_size if i == 0 and j < 4 else out_size
inputs = []
inputs.append(h)
inputs.append(c)
for i in range(len(self.batches)):
inputs.append(xs[i])
for n in range(self.n_layers):
for i in range(8):
inputs.append(array_utils.uniform(
(out_size, w_in(n, i)), dtype))
for i in range(8):
inputs.append(array_utils.uniform((out_size,), dtype))
return tuple(inputs)
def process_input(self, inputs):
h = inputs[0]
c = inputs[1]
xs = inputs[2:2 + len(self.batches)]
ws = []
bs = []
index = 2 + len(self.batches)
for n in range(self.n_layers):
ws.append(inputs[index: index + 8])
bs.append(inputs[index + 8: index + 16])
index += 16
return h, c, ws, bs, xs
def forward_chainerx(self, inputs):
h, c, ws, bs, xs = self.process_input(inputs)
out = chainerx.n_step_lstm(self.n_layers, h, c, ws, bs, xs)
rets = []
rets.append(out[0])
rets.append(out[1])
for i in range(len(out[2])):
rets.append(out[2][i])
return tuple(rets)
def forward_chainer(self, inputs):
h, c, ws, bs, xs = self.process_input(inputs)
out = chainer.functions.n_step_lstm(
self.n_layers, 0.0, h, c, ws, bs, xs)
rets = []
rets.append(out[0])
rets.append(out[1])
for i in range(len(out[2])):
rets.append(out[2][i])
return tuple(rets)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'n_layers,hidden_size,input_size,batches', [
(1, 2, 1, (1, 1, 1)),
(2, 6, 8, (4, 2, 2)),
(3, 8, 4, (4, 2, 1)),
(4, 12, 4, (4, 3, 2)),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes,out_dtype', n_step_lstm_dtypes_valid)
])
))
class TestNStepBiLstm(op_utils.ChainerOpTest):
def setup(self):
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 5e-3, 'atol': 5e-2})
if self.in_dtypes[0] == 'float16':
self.check_forward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_double_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
device = chainerx.get_default_device()
if device.backend.name == 'cuda':
if self.in_dtypes[0] != 'float32':
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
h_shape = (self.n_layers * 2, self.batches[0], self.hidden_size)
dtype = self.in_dtypes[0]
h = array_utils.uniform(h_shape, dtype)
c = array_utils.uniform(h_shape, dtype)
in_size = self.input_size
out_size = self.hidden_size
xs = [array_utils.uniform((self.batches[b], in_size), dtype)
for b in range(len(self.batches))]
def w_in(i, j):
if i == 0 and j < 4:
return in_size
elif i > 0 and j < 4:
return out_size * 2
else:
return out_size
inputs = []
inputs.append(h)
inputs.append(c)
for i in range(len(self.batches)):
inputs.append(xs[i])
for n in range(self.n_layers):
for direction in (0, 1):
for i in range(8):
inputs.append(array_utils.uniform(
(out_size, w_in(n, i)), dtype))
for i in range(8):
inputs.append(array_utils.uniform((out_size,), dtype))
return tuple(inputs)
def process_input(self, inputs):
h = inputs[0]
c = inputs[1]
xs = inputs[2:2 + len(self.batches)]
ws = []
bs = []
index = 2 + len(self.batches)
for n in range(self.n_layers):
ws.append(inputs[index: index + 8])
bs.append(inputs[index + 8: index + 16])
ws.append(inputs[index + 16: index + 24])
bs.append(inputs[index + 24: index + 32])
index += 32
return h, c, ws, bs, xs
def forward_chainerx(self, inputs):
h, c, ws, bs, xs = self.process_input(inputs)
out = chainerx.n_step_bilstm(self.n_layers, h, c, ws, bs, xs)
rets = []
rets.append(out[0])
rets.append(out[1])
for i in range(len(out[2])):
rets.append(out[2][i])
return tuple(rets)
def forward_chainer(self, inputs):
h, c, ws, bs, xs = self.process_input(inputs)
out = chainer.functions.n_step_bilstm(
self.n_layers, 0.0, h, c, ws, bs, xs)
rets = []
rets.append(out[0])
rets.append(out[1])
for i in range(len(out[2])):
rets.append(out[2][i])
return tuple(rets)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'n_layers,hidden_size,input_size,batches', [
(2, 2, 1, (1, 1, 1)),
(2, 2, 3, (3, 2, 1)),
(3, 8, 4, (4, 2, 1)),
(4, 6, 4, (4, 3, 2)),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes, out_dtype', n_step_lstm_dtypes_valid)
])
))
@op_utils.fix_random() # This test is unstable.
class TestNStepGru(op_utils.ChainerOpTest):
def setup(self):
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 5e-3, 'atol': 5e-2})
if self.in_dtypes[0] == 'float16':
self.check_forward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_double_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
device = chainerx.get_default_device()
if device.backend.name == 'cuda':
if self.in_dtypes[0] != 'float32':
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
h_shape = (self.n_layers, self.batches[0], self.hidden_size)
dtype = self.in_dtypes[0]
h = array_utils.uniform(h_shape, dtype)
in_size = self.input_size
out_size = self.hidden_size
xs = [array_utils.uniform((self.batches[b], in_size), dtype)
for b in range(len(self.batches))]
def w_in(i, j):
return in_size if i == 0 and j < 3 else out_size
inputs = []
inputs.append(h)
for i in range(len(self.batches)):
inputs.append(xs[i])
for n in range(self.n_layers):
for i in range(6):
inputs.append(array_utils.uniform(
(out_size, w_in(n, i)), dtype))
for i in range(6):
inputs.append(array_utils.uniform((out_size,), dtype))
return tuple(inputs)
def process_input(self, inputs):
h = inputs[0]
xs = inputs[1:1 + len(self.batches)]
ws = []
bs = []
index = 1 + len(self.batches)
for n in range(self.n_layers):
ws.append(inputs[index: index + 6])
bs.append(inputs[index + 6: index + 12])
index += 12
return h, ws, bs, xs
def forward_chainerx(self, inputs):
h, ws, bs, xs = self.process_input(inputs)
out = chainerx.n_step_gru(self.n_layers, h, ws, bs, xs)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
def forward_chainer(self, inputs):
h, ws, bs, xs = self.process_input(inputs)
out = chainer.functions.n_step_gru(
self.n_layers, 0.0, h, ws, bs, xs)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'n_layers,hidden_size,input_size,batches', [
(2, 2, 1, (1, 1, 1)),
(2, 2, 3, (3, 2, 1)),
(3, 4, 4, (4, 2, 1)),
(4, 5, 4, (4, 3, 2)),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes,out_dtype', n_step_lstm_dtypes_valid)
])
))
class TestNStepBiGRU(op_utils.ChainerOpTest):
def setup(self):
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 5e-2, 'atol': 5e-2})
if self.in_dtypes[0] == 'float16':
self.check_forward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_double_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
device = chainerx.get_default_device()
if device.backend.name == 'cuda':
if self.in_dtypes[0] != 'float32':
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
h_shape = (self.n_layers * 2, self.batches[0], self.hidden_size)
dtype = self.in_dtypes[0]
h = array_utils.uniform(h_shape, dtype)
in_size = self.input_size
out_size = self.hidden_size
xs = [array_utils.uniform((self.batches[b], in_size), dtype)
for b in range(len(self.batches))]
def w_in(i, j):
if i == 0 and j < 3:
return in_size
elif i > 0 and j < 3:
return out_size * 2
else:
return out_size
inputs = []
inputs.append(h)
for i in range(len(self.batches)):
inputs.append(xs[i])
for n in range(self.n_layers):
for direction in (0, 1):
for i in range(6):
inputs.append(array_utils.uniform(
(out_size, w_in(n, i)), dtype))
for i in range(6):
inputs.append(array_utils.uniform((out_size,), dtype))
return tuple(inputs)
def process_input(self, inputs):
h = inputs[0]
xs = inputs[1:1 + len(self.batches)]
ws = []
bs = []
index = 1 + len(self.batches)
for n in range(self.n_layers):
ws.append(inputs[index: index + 6])
bs.append(inputs[index + 6: index + 12])
ws.append(inputs[index + 12: index + 18])
bs.append(inputs[index + 18: index + 24])
index += 24
return h, ws, bs, xs
def forward_chainerx(self, inputs):
h, ws, bs, xs = self.process_input(inputs)
out = chainerx.n_step_bigru(self.n_layers, h, ws, bs, xs)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
def forward_chainer(self, inputs):
h, ws, bs, xs = self.process_input(inputs)
out = chainer.functions.n_step_bigru(
self.n_layers, 0.0, h, ws, bs, xs)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
@op_utils.op_test(['native:0', 'cuda:0'])
# ReLU activation is unstable around 0 but can seemingly not be dodged
# automatically.
@op_utils.fix_random()
@chainer.testing.parameterize(*(
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'n_layers,hidden_size,input_size,batches,activation', [
(2, 2, 1, (1, 1, 1), "tanh"),
(2, 2, 1, (1, 1, 1), "relu"),
(2, 2, 3, (3, 2, 1), "tanh"),
(2, 2, 3, (3, 2, 1), "relu"),
(3, 4, 4, (4, 2, 1), "tanh"),
(3, 4, 4, (4, 2, 1), "relu"),
(4, 5, 4, (4, 3, 2), "tanh"),
(4, 5, 4, (4, 3, 2), "relu"),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes, out_dtype', n_step_lstm_dtypes_valid)
])
))
class TestNStepRNN(op_utils.ChainerOpTest):
check_numpy_strides_compliance = False
dodge_nondifferentiable = True
def setup(self):
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 5e-2, 'atol': 5e-2})
if self.in_dtypes[0] == 'float16':
self.check_forward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_double_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
device = chainerx.get_default_device()
if device.backend.name == 'cuda':
if self.in_dtypes[0] != 'float32':
self.skip_forward_test = True
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
h_shape = (self.n_layers, self.batches[0], self.hidden_size)
dtype = self.in_dtypes[0]
h = array_utils.uniform(h_shape, dtype)
in_size = self.input_size
out_size = self.hidden_size
xs = [array_utils.uniform((self.batches[b], in_size), dtype)
for b in range(len(self.batches))]
def w_in(i, j):
return in_size if i == 0 and j < 1 else out_size
inputs = []
inputs.append(h)
for i in range(len(self.batches)):
inputs.append(xs[i])
for n in range(self.n_layers):
for i in range(2):
inputs.append(array_utils.uniform(
(out_size, w_in(n, i)), dtype))
for i in range(2):
inputs.append(array_utils.uniform((out_size,), dtype))
return tuple(inputs)
def process_input(self, inputs):
h = inputs[0]
xs = inputs[1:1 + len(self.batches)]
ws = []
bs = []
index = 1 + len(self.batches)
for n in range(self.n_layers):
ws.append(inputs[index: index + 2])
bs.append(inputs[index + 2: index + 4])
index += 4
return h, ws, bs, xs
def forward_chainerx(self, inputs):
h, ws, bs, xs = self.process_input(inputs)
out = chainerx.n_step_rnn(
self.n_layers, h, ws, bs, xs, self.activation)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
def forward_chainer(self, inputs):
h, ws, bs, xs = self.process_input(inputs)
out = chainer.functions.n_step_rnn(
self.n_layers, 0.0, h, ws, bs, xs, self.activation)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
@op_utils.op_test(['native:0', 'cuda:0'])
# ReLU activation is unstable around 0 but can seemingly not be dodged
# automatically.
@op_utils.fix_random()
@chainer.testing.parameterize(*(
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'n_layers,hidden_size,input_size,batches,activation', [
(2, 2, 1, (1, 1, 1), "tanh"),
(2, 2, 1, (1, 1, 1), "relu"),
(2, 2, 3, (3, 2, 1), "tanh"),
(2, 2, 3, (3, 2, 1), "relu"),
(3, 4, 4, (4, 2, 1), "tanh"),
(3, 4, 4, (4, 2, 1), "relu"),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes,out_dtype', n_step_lstm_dtypes_valid)
])
))
class TestNStepBiRNN(op_utils.ChainerOpTest):
check_numpy_strides_compliance = False
dodge_nondifferentiable = True
def setup(self):
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 5e-2, 'atol': 5e-2})
if self.in_dtypes[0] == 'float16':
self.check_forward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_double_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
device = chainerx.get_default_device()
if device.backend.name == 'cuda':
if self.in_dtypes[0] != 'float32':
self.skip_forward_test = True
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
h_shape = (self.n_layers * 2, self.batches[0], self.hidden_size)
dtype = self.in_dtypes[0]
low = -1.0
high = 1.0
if dtype == 'float16':
low = -0.5
high = 0.5
h = array_utils.uniform(h_shape, dtype)
in_size = self.input_size
out_size = self.hidden_size
xs = [array_utils.uniform((self.batches[b], in_size),
dtype, low=low, high=high)
for b in range(len(self.batches))]
def w_in(i, j):
if i == 0 and j < 1:
return in_size
elif i > 0 and j < 1:
return out_size * 2
else:
return out_size
inputs = []
inputs.append(h)
for i in range(len(self.batches)):
inputs.append(xs[i])
for n in range(self.n_layers):
for direction in (0, 1):
for i in range(2):
inputs.append(array_utils.uniform(
(out_size, w_in(n, i)), dtype, low=low, high=high))
for i in range(2):
inputs.append(array_utils.uniform(
(out_size,), dtype, low=low, high=high))
return tuple(inputs)
def process_input(self, inputs):
h = inputs[0]
xs = inputs[1:1 + len(self.batches)]
ws = []
bs = []
index = 1 + len(self.batches)
for n in range(self.n_layers):
ws.append(inputs[index: index + 2])
bs.append(inputs[index + 2: index + 4])
ws.append(inputs[index + 4: index + 6])
bs.append(inputs[index + 6: index + 8])
index += 8
return h, ws, bs, xs
def forward_chainerx(self, inputs):
h, ws, bs, xs = self.process_input(inputs)
out = chainerx.n_step_birnn(
self.n_layers, h, ws, bs, xs, self.activation)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
def forward_chainer(self, inputs):
h, ws, bs, xs = self.process_input(inputs)
out = chainer.functions.n_step_birnn(
self.n_layers, 0.0, h, ws, bs, xs, self.activation)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
| 22,430
| 33.037936
| 75
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_explog.py
|
import chainer
import chainerx
import numpy
from chainerx_tests import math_utils
from chainerx_tests import op_utils
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'shape', [
(2, 2),
(3, 3, 3),
(5, 5, 5),
(4, 1, 2, 4)
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes,out_dtype', math_utils.in_out_dtypes_math_functions)
])
))
class TestErf(op_utils.ChainerOpTest):
dodge_nondifferentiable = True
def setup(self, float_dtype):
dtype = float_dtype
if dtype == 'float16':
self.check_forward_options.update({'rtol': 1e-3, 'atol': 1e-3})
self.check_backward_options.update({'rtol': 5e-2, 'atol': 5e-2})
self.check_double_backward_options.update({
'rtol': 5e-2, 'atol': 5e-2})
self.dtype = dtype
def generate_inputs(self):
shape = self.shape
dtype = self.dtype
x = numpy.random.normal(-1, 1, shape).astype(dtype)
return x,
def forward_chainerx(self, inputs):
x, = inputs
y = chainerx.erf(x)
return y,
def forward_chainer(self, inputs):
x, = inputs
y = chainer.functions.erf(x)
return y,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [0, 2, -2],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [0, 2, -2],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestExp(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.exp(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [0, 2, -2],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [0, 2, -2],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestExpm1(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.expm1(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [0, 2, -2],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [0, 2, -2],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestExp2(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.exp2(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0,), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan'), -1, 0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestLog(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.log(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0,), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan'), -1, 0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestLog10(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.log10(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [1, 3],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0,), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [1, 3],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan'), -1, 0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestLog2(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.log2(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [1, 3],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0,), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [1, 3],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan'), -1, 0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestLog1p(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.log1p(a)
| 8,303
| 30.574144
| 77
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_indexing.py
|
import unittest
import numpy
import pytest
import chainer.testing
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import math_utils
from chainerx_tests import op_utils
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,indices', [
# empty indexing
((), ()),
((3,), ()),
((2, 2, 2), ()),
# integer indexing - non-tuple indexing
((3,), 0),
((3,), 1),
((3,), 2),
((3,), -1),
((2, 3), 0),
((2, 3), 1),
((2, 3), numpy.int8(-1)),
((2, 3), numpy.int32(0)),
((2, 3), numpy.uint64(1)),
# integer indexining - tuple indexing
((3,), (0,)),
((3,), (1,)),
((3,), (2,)),
((3,), (-1,)),
((2, 3), (0,)),
((2, 3), (1,)),
((2, 3), (0, 0)),
((2, 3), (1, 1)),
((2, 3, 4), (0, -2, 3)),
((2, 3, 4), (1, 0)),
# slice indexing - non-tuple indexing
((3,), slice(None)),
((3,), slice(2)),
((3,), slice(0, 3)),
((3,), slice(0, 2)),
((3,), slice(1, 3)),
((3,), slice(0, 0)),
((3,), slice(0, 1)),
((3,), slice(2, 0, -1)),
((3,), slice(-2, -1)),
((3,), slice(2, None, -1)),
((3,), slice(None, 0, 1)),
((3,), slice(None, -1, -1)),
((3,), slice(None, -2, -1)),
((6,), slice(0, 6, 2)),
((6,), slice(1, 6, 2)),
((6,), slice(5, None, -2)),
((6,), slice(4, 10)),
((6,), slice(10, 5, -1)),
((6,), slice(5, -1)),
((6,), slice(5, -1, -1)),
((6,), slice(-1, 5)),
((6,), slice(-1, 5, -1)),
# slice indexing - tuple indexing
((3,), (slice(None),)),
((3,), (slice(2),)),
((3,), (slice(0, 3),)),
((3,), (slice(0, 2),)),
((3,), (slice(1, 3),)),
((3,), (slice(0, 0),)),
((3,), (slice(0, 1),)),
((3,), (slice(2, 0, -1),)),
((3,), (slice(-2, -1),)),
((3,), (slice(2, None, -1),)),
((3,), (slice(None, 0, 1),)),
((3,), (slice(None, -1, -1),)),
((3,), (slice(None, -2, -1),)),
((6,), (slice(0, 6, 2),)),
((6,), (slice(1, 6, 2),)),
((6,), (slice(5, None, -2),)),
((6,), (slice(50, 1, -1),)),
((6,), (slice(3, 3, 1),)),
((6,), (slice(3, 3, -2),)),
((6,), (slice(50, 50, 1),)),
((6,), (slice(50, 50, -2),)),
((6,), (slice(-50, -50, 1),)),
((6,), (slice(-50, -50, -2),)),
((2, 3), (slice(None), slice(None))),
((2, 3), (slice(1), slice(2))),
((2, 3), (slice(0, 2), slice(0, 3))),
((2, 3), (slice(0, 2), slice(0, -1))),
((2, 3), (slice(0, None, -1), slice(2, 3))),
((2, 3), (slice(0, None, None), slice(-2, 0, -1))),
((2, 3), (slice(1, 2), slice(0, 2))),
((2, 3), (slice(-2, None, -1), slice(0, 3))),
((2, 3), (slice(-2, None, -1), slice(-3, None, -1))),
((2, 3), (slice(-2, None, -1), slice(None, None, -2))),
((2, 3), (slice(1, 2), slice(None, None, 1))),
((2, 3), (slice(1, 2), slice(None, None, 2))),
((2, 3, 4), (slice(1), slice(-2, 3), slice(1, None, -1))),
# newaxis indexing - non-tuple indexing
((), chainerx.newaxis),
((3,), chainerx.newaxis),
# newaxis indexing - tuple indexing
((), (chainerx.newaxis,)),
((3,), (chainerx.newaxis,)),
((2, 3), (chainerx.newaxis, chainerx.newaxis)),
# ellipsis indexing - non-tuple indexing
((), Ellipsis),
((3,), Ellipsis),
# ellipsis indexing - tuple indexing
((), (Ellipsis,)),
((2, 3), (Ellipsis,)),
# mixed indexing - tuple indexing
((2, 3), (0, slice(1, 3))),
((4, 3), (slice(1, 3), 1)),
((2, 3, 4), (1, slice(2,), slice(1, 3))),
((2, 3), (1, chainerx.newaxis, slice(1, 3))),
((2, 3, 4), (slice(0, 1), slice(1, 2), slice(1, 3), chainerx.newaxis)),
((2, 3, 4), (slice(0, 1), slice(1, 2), chainerx.newaxis, slice(1, 3))),
((2, 3, 4), (slice(0, 1), chainerx.newaxis, slice(1, 2), slice(1, 3))),
((2, 3, 4), (chainerx.newaxis, slice(0, 1), slice(1, 2), slice(1, 3))),
((2, 3, 4),
(1, slice(2,), chainerx.newaxis, slice(1, 3), chainerx.newaxis)),
((2, 3, 4), (0, Ellipsis)),
((2, 3, 4), (Ellipsis, 2)),
((2, 3, 4), (1, Ellipsis, 2)),
((2, 3, 4), (1, Ellipsis, 2, 3)),
((2, 3, 4), (chainerx.newaxis, Ellipsis, chainerx.newaxis)),
((2, 3, 4), (1, Ellipsis, chainerx.newaxis, 3)),
((2, 3, 4), (1, Ellipsis, 2, chainerx.newaxis, 3)),
((2, 3, 4), (slice(0, 1), Ellipsis, slice(1, 3))),
])
class TestGetitem(op_utils.NumpyOpTest):
# TODO(niboshi): Remove this
check_numpy_strides_compliance = False
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype('float32')
return x,
def forward_xp(self, inputs, xp):
x, = inputs
y = x[self.indices]
return y,
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('shape,indices', [
((), 0),
((), (1,)),
((), (1, 0)),
((3,), 3),
((3,), (0, 1)),
((2, 3,), (2, 0)),
((2,), (2, chainerx.newaxis, 3)),
((2,), (2, Ellipsis, chainerx.newaxis, 3)),
((2,), (Ellipsis, Ellipsis)),
])
def test_getitem_index_error(device, shape, indices):
a = array_utils.create_dummy_ndarray(chainerx, shape, 'float32')
with pytest.raises(IndexError):
a[indices]
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_getitem_zero_sized_offsets(device):
a = chainerx.arange(6)
b = a[3:3]
# Test pre-conditions.
assert b.size == 0
assert b.offset == 12
# The offset of `c` should be the same as `b` since `b` is empty.
c = b[2:]
assert c.size == 0
assert c.offset == b.offset
@op_utils.op_test(['native:0', 'cuda:0'])
# TODO(hvy): Add cases where axis=None, when supported.
@chainer.testing.parameterize_pytest('shape,indices,axis', [
# Valid parameters
((3,), [0], 0),
((3,), [1], 0),
((2, 3), [0], 0),
((2, 3), [0], 1),
((2, 3), [0], -1),
((2, 3), [1], 0),
((2, 3), [0, -1], 0),
((2, 3), [1, 0], 0),
((2, 3), [1, 2], 1),
((2, 3), [2, 1], 1),
((2, 3), [[0], [1]], 0),
# Take from a duplicate index
((3, 2), [1, 1], 0),
# Invalid: Axis out of bounds
((2, 3), [0], 2),
((2, 3), [0], -3),
])
@chainer.testing.parameterize_pytest('is_module', [True, False])
@chainer.testing.parameterize_pytest(
'indices_type', ['list', 'numpy', 'xp'])
# TODO(niboshi): indices_dtype is ignored if indices_type == 'list', which is
# wasteful.
@chainer.testing.parameterize_pytest(
'indices_dtype', chainerx.testing.integral_dtypes)
@chainer.testing.parameterize_pytest(
'mode', ['raise', 'wrap', 'clip'])
@chainer.testing.parameterize_pytest(
'a_dtype', chainerx.testing.all_dtypes)
class TestTake(op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
forward_accept_errors = (chainerx.DimensionError, numpy.AxisError)
def setup(self):
if (self.mode == 'raise'
and numpy.dtype(self.indices_dtype).kind == 'u'
and (numpy.array(self.indices, 'int64') < 0).any()):
raise unittest.SkipTest(
'Indices underflows and index out of bounds cannot be tested.')
if self.a_dtype == 'float16':
self.check_backward_options.update(
{'rtol': 1e-3, 'atol': 1e-3})
self.check_double_backward_options.update(
{'rtol': 1e-3, 'atol': 1e-3})
def generate_inputs(self):
a = numpy.random.uniform(-1, 1, self.shape).astype(self.a_dtype)
return a,
def forward_xp(self, inputs, xp):
indices = self.indices
axis = self.axis
indices_type = self.indices_type
a, = inputs
if (xp is chainerx and self.mode == 'raise'
and 'cuda' in xp.get_default_device().name):
pytest.skip('CUDA is not supportted with mode="raise"')
assert isinstance(indices, list)
if indices_type == 'list':
pass
elif indices_type == 'numpy':
indices = numpy.array(indices).astype(self.indices_dtype)
elif indices_type == 'xp':
indices = xp.array(indices).astype(self.indices_dtype)
else:
assert False, indices_type
if self.is_module:
b = xp.take(a, indices, axis, mode=self.mode)
else:
b = a.take(indices, axis, mode=self.mode)
return b,
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('shape,indices,axis', [
# Invalid: Index out of bounds
((2, 3), [2], 0),
((2, 3), [-3], 0),
])
def test_take_index_error(device, shape, indices, axis):
a = array_utils.create_dummy_ndarray(chainerx, shape, 'float32')
indices = numpy.array(indices).astype(numpy.int32)
error = IndexError
if device.backend.name == 'cuda':
error = chainerx.BackendError # Not supported in CUDA
with pytest.raises(error):
chainerx.take(a, indices, axis, mode='raise')
def _random_condition(shape, dtype, *, random_state=None):
if random_state is None:
random_state = numpy.random.RandomState()
neg_mask = random_state.randint(0, 2, size=shape).astype('bool')
cond = array_utils.uniform(shape, dtype, random_state=random_state)
# Replace zeros with nonzero, making the average number of zero elements
# in cond independent of the dtype.
cond[cond == 0] = 1
cond[neg_mask] = 0
return cond
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('shape,indices,axis', [
# Invalid: Index out of bounds
((2, 3), [1, 1], 0),
((2, 3, 4), [0, 1, 1], 1),
])
def test_take_non_contiguous(device, shape, indices, axis):
a = numpy.random.uniform(-1, 1, shape).astype('float32')
indices = numpy.array(indices).astype(numpy.int32)
chx_a = chainerx.array(a).astype('float32')
a = numpy.transpose(a, axes=range(chx_a.ndim)[::-1])
chx_a = chainerx.transpose(chx_a, axes=range(chx_a.ndim)[::-1])
assert(not chx_a.is_contiguous)
chx_indices = chainerx.array(indices).astype(numpy.int32)
chx_out = chainerx.take(chx_a, chx_indices, axis)
np_out = numpy.take(a, indices, axis)
numpy.testing.assert_array_equal(chx_out, np_out)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'cond_shape,in_shapes': [
# Same Shapes
((2, 3), ((2, 3), (2, 3))),
# Broadcast Shapes
((2, 3), ((1, 3), (1, 3))),
((2, 3), ((2, 1), (1, 3))),
((2, 3), ((2, 3), (1, 3))),
((4, 5), ((3, 4, 1), (1, 5))),
((1, 4, 5), ((3, 4, 1), (3, 1, 5))),
],
'cond_dtype': ['bool_'],
'in_dtypes,out_dtype': dtype_utils.result_dtypes_two_arrays,
})
# Dtype combinations
+ chainer.testing.product({
'cond_shape,in_shapes': [((2, 3), ((2, 3), (2, 3)))],
'cond_dtype': chainerx.testing.all_dtypes,
'in_dtypes,out_dtype': dtype_utils.result_dtypes_two_arrays,
})
))
class TestWhere(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
dodge_nondifferentiable = True
input_lhs = 'random'
input_rhs = 'random'
def setup(self):
super().setup()
self.condition = _random_condition(self.cond_shape, self.cond_dtype)
def func(self, xp, x, y):
condition = xp.array(self.condition)
return xp.where(condition, x, y)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('cond_shape,x_shape,y_shape', [
((2, 3), (3, 4), (2, 3)),
((2, 3), (2, 3), (3, 4)),
((2, 3), (1, 3), (2, 4))
])
def test_where_invalid_shapes(xp, cond_shape, x_shape, y_shape):
x = array_utils.create_dummy_ndarray(xp, x_shape, 'float32')
y = array_utils.create_dummy_ndarray(xp, y_shape, 'float32')
c = array_utils.create_dummy_ndarray(xp, cond_shape, 'float32')
return xp.where(c, x, y)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'cond_shape,shape': math_utils.shapes_combination_inplace_binary,
'cond_dtype': ['bool_'],
'in_dtypes,scalar_type,out_dtype': (
dtype_utils.result_dtypes_array_scalar),
'is_scalar_rhs': [True, False],
})
# Dtype combinations
+ chainer.testing.product({
'cond_shape,shape': [((2, 3), (2, 3))],
'cond_dtype': chainerx.testing.all_dtypes,
'in_dtypes,scalar_type,out_dtype': (
dtype_utils.result_dtypes_array_scalar),
'is_scalar_rhs': [True, False],
})
))
class TestWhereScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
input = 'random'
scalar_value = 3
def setup(self):
super().setup()
self.condition = _random_condition(self.cond_shape, self.cond_dtype)
def func_scalar(self, xp, a, scalar):
condition = xp.array(self.condition)
if self.is_scalar_rhs:
return xp.where(condition, a, scalar)
else:
return xp.where(condition, scalar, a)
_in_out_dtypes_where_scalar = [
((bool, bool), 'bool_'),
((bool, int), 'int32'),
((bool, float), 'float32'),
((int, bool), 'int32'),
((int, int), 'int32'),
((int, float), 'float32'),
((float, bool), 'float32'),
((float, int), 'float32'),
((float, float), 'float32'),
]
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('cond_shape', [(2, 3)])
@pytest.mark.parametrize('cond_dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('in_types,out_dtype', _in_out_dtypes_where_scalar)
def test_where_scalar_scalar(xp, cond_shape, cond_dtype, in_types, out_dtype):
cond = _random_condition(
cond_shape, cond_dtype, random_state=numpy.random.RandomState(seed=0))
cond = xp.array(cond)
x_type, y_type = in_types
x = x_type(0)
y = y_type(2)
out = xp.where(cond, x, y)
return dtype_utils.cast_if_numpy_array(xp, out, out_dtype)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'dtype': chainerx.testing.all_dtypes,
'input': [
[],
[[]],
[0],
[1],
[2, 0, 5],
[4, 0, 0, 0],
[0, 0, 0, 4],
[0, 0, 0, 0],
[[4, 0, 0, 1], [0, 0, 4, 1]],
[[4, 4, 1, 1], [4, 1, 4, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0]],
]
})
))
class TestNonzero(op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
x = numpy.asarray(self.input).astype(self.dtype)
return x,
def forward_xp(self, inputs, xp):
x, = inputs
return xp.nonzero(x)
| 14,971
| 31.406926
| 79
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_binary.py
|
import chainer
import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import math_utils
from chainerx_tests import op_utils
_in_out_dtypes_bitwise = dtype_utils._permutate_dtype_mapping([
# Same dtypes
(('bool_', 'bool_'), 'bool_'),
(('int8', 'int8'), 'int8'),
(('int16', 'int16'), 'int16'),
(('int32', 'int32'), 'int32'),
(('int64', 'int64'), 'int64'),
(('uint8', 'uint8'), 'uint8'),
# Mixed dtypes
(('bool_', 'int8'), 'int8'),
(('bool_', 'int16'), 'int16'),
(('bool_', 'int32'), 'int32'),
(('bool_', 'int64'), 'int64'),
(('bool_', 'uint8'), 'uint8'),
(('int8', 'int16'), 'int16'),
(('int8', 'int32'), 'int32'),
(('int8', 'int64'), 'int64'),
(('int8', 'uint8'), 'int16'),
(('int16', 'int32'), 'int32'),
(('int16', 'int64'), 'int64'),
(('int16', 'uint8'), 'int16'),
(('int32', 'int64'), 'int64'),
(('int32', 'uint8'), 'int32'),
(('int64', 'uint8'), 'int64'),
])
_in_out_dtypes_inplace_bitwise_invalid = [
(('bool_', 'int8'), 'int8'),
(('bool_', 'int16'), 'int16'),
(('bool_', 'int32'), 'int32'),
(('bool_', 'int64'), 'int64'),
(('bool_', 'uint8'), 'uint8'),
]
_in_out_dtypes_inplace_bitwise = [
dtypes for dtypes in _in_out_dtypes_bitwise
if dtypes not in _in_out_dtypes_inplace_bitwise_invalid
]
_in_out_dtypes_bitwise_scalar = [
# Bool scalar
# TODO(imanishi): Support bool in op_utils.NumpyOpTest
# (('bool_',), bool, 'bool_'),
(('int8',), bool, 'int8'),
(('int32',), bool, 'int32'),
(('uint8',), bool, 'uint8'),
(('uint8',), numpy.bool_, 'uint8'),
# Int scalar
# TODO(imanishi): Support bool in op_utils.NumpyOpTest
# (('bool_',), int, 'bool_'),
(('int8',), int, 'int8'),
(('int16',), int, 'int16'),
(('int32',), int, 'int32'),
(('int64',), int, 'int64'),
(('uint8',), int, 'uint8'),
(('int16',), numpy.int16, 'int16'),
(('uint8',), numpy.int8, 'uint8'),
]
_scalar_invalid_bitwise = [
(('float16',), int, 'float16'),
(('float32',), int, 'float32'),
(('float64',), int, 'float64'),
(('float64',), numpy.int8, 'float64'),
(('float16',), numpy.int64, 'float16'),
]
_in_out_dtypes_shift = [
(('int8', 'int8'), 'int8'),
(('int16', 'int16'), 'int16'),
(('int32', 'int32'), 'int32'),
(('int64', 'int64'), 'int64'),
(('uint8', 'uint8'), 'uint8'),
(('int8', 'int16'), 'int8'),
(('int8', 'int32'), 'int8'),
(('int8', 'int64'), 'int8'),
(('int8', 'uint8'), 'int8'),
(('int16', 'int32'), 'int16'),
(('int16', 'int64'), 'int16'),
(('int16', 'uint8'), 'int16'),
(('int32', 'int64'), 'int32'),
(('int32', 'uint8'), 'int32'),
(('int64', 'uint8'), 'int64'),
]
_in_out_dtypes_shift_array_scalar = [
(('int8',), int, 'int8'),
(('int16',), int, 'int16'),
(('int32',), int, 'int32'),
(('int64',), int, 'int64'),
(('uint8',), int, 'uint8'),
(('int16',), numpy.int16, 'int16'),
(('uint8',), numpy.int8, 'uint8'),
]
_in_out_dtypes_shift_scalar_array = [
(('int8',), int, 'int64'),
(('int16',), int, 'int64'),
(('int32',), int, 'int64'),
(('int64',), int, 'int64'),
(('uint8',), int, 'int64'),
(('int16',), numpy.int16, 'int64'),
(('uint8',), numpy.int8, 'int64'),
]
_in_out_dtypes_shift_invalid = [
(('bool_', 'int8'), 'int8'),
(('bool_', 'int16'), 'int16'),
(('bool_', 'int32'), 'int32'),
(('bool_', 'int64'), 'int64'),
(('bool_', 'uint8'), 'uint8'),
(('float', 'int8'), 'int8'),
(('float', 'int16'), 'int16'),
(('float', 'int32'), 'int32'),
(('float', 'int64'), 'int64'),
(('float', 'uint8'), 'uint8'),
]
_params_bitwise = (
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.nonfloat_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_bitwise,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.nonfloat_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
)
_inplace_params_bitwise = (
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_inplace_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.nonfloat_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_inplace_bitwise,
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.nonfloat_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
})
)
_scalar_params_bitwise = (
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_bitwise_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [False],
})
# Type combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_bitwise_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_bitwise_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [True, False],
'is_scalar_rhs': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_bitwise_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2],
'is_module': [False],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
)
_inplace_scalar_params_bitwise = (
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_bitwise_scalar,
'input': ['random'],
'scalar_value': [1],
})
# Dtype combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_bitwise_scalar,
'input': ['random'],
'scalar_value': [1],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_bitwise_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2],
})
)
_params_shift = (
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.integral_dtypes)),
'input_lhs': ['random'],
'input_rhs': [0, 1, 3],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_shift,
'input_lhs': ['random'],
'input_rhs': [0, 1, 3],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.integral_dtypes)),
'input_lhs': ['random'],
'input_rhs': [0, 1, 3],
'is_module': [True, False],
})
)
_inplace_params_shift = (
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_inplace_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.integral_dtypes)),
'input_lhs': ['random'],
'input_rhs': [0, 1, 3],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_shift,
'input_lhs': ['random'],
'input_rhs': [0, 1, 3],
})
)
_scalar_params_shift_scalar_array = (
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_shift_scalar_array,
'input': [0, 1, 3],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [False],
})
# Type combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_shift_scalar_array,
'input': [0, 1, 3],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [False],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_shift_scalar_array,
'input': [0, 1, 3],
'scalar_value': [1],
'is_module': [True, False],
'is_scalar_rhs': [False],
})
)
_scalar_params_shift_array_scalar = (
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_shift_array_scalar,
'input': [0, 1, 3],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True],
})
# Type combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_shift_array_scalar,
'input': [0, 1, 3],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_shift_array_scalar,
'input': [0, 1, 3],
'scalar_value': [1],
'is_module': [True, False],
'is_scalar_rhs': [True],
})
)
_inplace_scalar_params_shift = (
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_shift_array_scalar,
'input': [0, 1, 3],
'scalar_value': [1],
})
# Dtype combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_shift_array_scalar,
'input': [0, 1, 3],
'scalar_value': [1],
})
)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_params_bitwise)
class TestBitwiseAnd(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
if self.is_module:
return xp.bitwise_and(a, b)
else:
return a & b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_params_bitwise)
class TestBitwiseOr(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
if self.is_module:
return xp.bitwise_or(a, b)
else:
return a | b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_params_bitwise)
class TestBitwiseXor(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
if self.is_module:
return xp.bitwise_xor(a, b)
else:
return a ^ b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_params_shift)
class TestLeftShift(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
if self.is_module:
return xp.left_shift(a, b)
else:
return a << b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_params_shift)
class TestRightShift(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
if self.is_module:
return xp.right_shift(a, b)
else:
return a >> b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_inplace_params_bitwise)
class TestIBitwiseAnd(
math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
a &= b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_inplace_params_bitwise)
class TestIBitwiseOr(
math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
a |= b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_inplace_params_bitwise)
class TestIBitwiseXor(
math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
a ^= b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_inplace_params_shift)
class TestILeftShift(
math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
a <<= b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_inplace_params_shift)
class TestIRightShift(
math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
a >>= b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_inplace_bitwise_invalid)
def test_iand_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
a &= b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_inplace_bitwise_invalid)
def test_ior_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
a |= b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_inplace_bitwise_invalid)
def test_ixor_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
a ^= b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_shift_invalid)
def test_ileftshift_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
a << b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_shift_invalid)
def test_irightshift_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
a >> b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_scalar_params_bitwise)
class TestBitwiseAndScalar(
math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return a & scalar
else:
return scalar & a
else:
if self.is_scalar_rhs:
return xp.bitwise_and(a, scalar)
else:
return xp.bitwise_and(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_scalar_params_bitwise)
class TestBitwiseOrScalar(
math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return a | scalar
else:
return scalar | a
else:
if self.is_scalar_rhs:
return xp.bitwise_or(a, scalar)
else:
return xp.bitwise_or(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_scalar_params_bitwise)
class TestBitwiseXorScalar(
math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return a ^ scalar
else:
return scalar ^ a
else:
if self.is_scalar_rhs:
return xp.bitwise_xor(a, scalar)
else:
return xp.bitwise_xor(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_scalar_params_shift_scalar_array)
class TestLeftShiftScalarArray(
math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
if self.is_module:
return scalar << a
else:
return xp.left_shift(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_scalar_params_shift_array_scalar)
class TestLeftShiftArrayScalar(
math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
if self.is_module:
return a << scalar
else:
return xp.left_shift(a, scalar)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_scalar_params_shift_scalar_array)
class TestRightShiftScalarArray(
math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
if self.is_module:
return scalar >> a
else:
return xp.right_shift(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_scalar_params_shift_array_scalar)
class TestRightShiftArrayScalar(
math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
if self.is_module:
return a >> scalar
else:
return xp.right_shift(a, scalar)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_inplace_scalar_params_bitwise)
class TestIBitwiseAndScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
a &= scalar
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_inplace_scalar_params_bitwise)
class TestIBitwiseOrScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
a |= scalar
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_inplace_scalar_params_bitwise)
class TestIBitwiseXorScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
a ^= scalar
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_scalar_params_shift_array_scalar)
class TestILeftShiftScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
a <<= scalar
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*_scalar_params_shift_array_scalar)
class TestIRightShiftScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
a >>= scalar
| 21,101
| 29.36259
| 77
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_linalg.py
|
import numpy
import pytest
import chainer
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import op_utils
def _skip_if_native_and_lapack_unavailable(device):
if (device.backend.name == 'native'
and not chainerx.linalg._is_lapack_available()):
pytest.skip('LAPACK is not linked to ChainerX')
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('a_shape,b_shape', [
((), ()),
((), (2, 3)),
((0, 2), (2, 0)),
((2, 0), (0, 3)),
((0, 0), (0, 0)),
((2, 3), (3, 4)),
((1, 2, 3), (3, 4)),
((1, 2, 0), (0, 4)),
((1, 0, 3), (3, 0)),
((1, 0, 3), (3, 4)),
((1, 2, 3), (3, 0)),
((1, 2), (1, 2, 3)),
((1, 0), (1, 0, 3)),
((0, 2), (1, 2, 0)),
((0, 2), (1, 2, 3)),
((1, 2), (1, 2, 0)),
((4, 5, 2), (3, 2, 5)),
((2, 3, 4, 4), (3, 4, 2)),
((2, 2, 3, 1), (2, 1, 3, 1, 4)),
((2, 4, 3), (1, 2, 3, 2)),
((1, 2, 3, 0), (4, 0, 5)),
((1, 2, 0, 3), (4, 3, 0)),
((1, 2, 0, 3), (4, 3, 5))
])
@chainer.testing.parameterize_pytest(
'in_dtypes,chx_expected_dtype', dtype_utils.result_dtypes_two_arrays)
@chainer.testing.parameterize_pytest('is_module', [True, False])
class TestDot(op_utils.NumpyOpTest):
def setup(self):
device = chainerx.get_default_device()
a_dtype, b_dtype = self.in_dtypes
a_kind = numpy.dtype(a_dtype).kind
b_kind = numpy.dtype(b_dtype).kind
# TODO(beam2d): Remove the skip after supporting non-float dot on CUDA
if device.name == 'cuda:0' and (a_kind != 'f' and b_kind != 'f'):
pytest.skip('non-float dot is not supported on CUDA')
# Skip backward/double-backward tests for int dtypes
if a_kind != 'f' or b_kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
# Skip backward/double-backward tests if the output will be
# disconnected.
# TODO(niboshi): Remove this skip condition after enabling backward()
# for such cases.
if self.a_shape and self.a_shape[-1] == 0:
self.skip_backward_test = True
self.skip_double_backward_test = True
if a_dtype == 'float16' or b_dtype == 'float16':
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
def generate_inputs(self):
a_dtype, b_dtype = self.in_dtypes
a_shape = self.a_shape
b_shape = self.b_shape
a = numpy.random.uniform(-1, 1, a_shape).astype(a_dtype)
b = numpy.random.uniform(-1, 1, b_shape).astype(b_dtype)
return a, b
def forward_xp(self, inputs, xp):
a, b = inputs
if self.is_module:
y = xp.dot(a, b)
else:
y = a.dot(b)
y = dtype_utils.cast_if_numpy_array(xp, y, self.chx_expected_dtype)
return y,
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('a_shape,b_shape', [
((3, 2), (1, 3)),
((4, 3, 2, 5), (6, 4, 1, 2))
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_dot_invalid(is_module, xp, device, a_shape, b_shape, dtype):
# TODO(beam2d): Remove the skip after supporting non-float dot on CUDA
if device.name == 'cuda:0' and numpy.dtype(dtype).kind != 'f':
return chainerx.testing.ignore()
a = array_utils.create_dummy_ndarray(xp, a_shape, dtype)
b = array_utils.create_dummy_ndarray(xp, b_shape, dtype)
if is_module:
return xp.dot(a, b)
else:
return a.dot(b)
class NumpyLinalgOpTest(op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def setup(self):
super().setup()
device = chainerx.get_default_device()
_skip_if_native_and_lapack_unavailable(device)
self.check_forward_options.update({'rtol': 1e-4, 'atol': 1e-4})
self.check_backward_options.update({'rtol': 5e-3})
self.check_double_backward_options.update({'rtol': 5e-3})
_numpy_does_not_support_0d_input113 = \
numpy.lib.NumpyVersion(numpy.__version__) < '1.13.0'
_numpy_does_not_support_0d_input116 = \
numpy.lib.NumpyVersion(numpy.__version__) < '1.16.0'
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(0, 0), (1, 1), (3, 3)],
'b_columns': [(), (1,), (3,), (4,)],
'dtypes': [
('float32', 'float32'),
('float64', 'float64'),
('float64', 'float32'),
('float32', 'float64')]
})
))
class TestSolve(NumpyLinalgOpTest):
def generate_inputs(self):
sv = numpy.random.uniform(1, 2, size=self.shape[0])
a = chainer.testing.generate_matrix(
self.shape, dtype=self.dtypes[0], singular_values=sv)
b = numpy.random.random(
(self.shape[0], *self.b_columns)).astype(self.dtypes[1])
return a, b
def forward_xp(self, inputs, xp):
a, b = inputs
out = xp.linalg.solve(a, b)
return out,
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('shape', [(2, 3), (3, 2)])
def test_solve_invalid_shape(device, shape):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, shape, 'float32')
b = array_utils.create_dummy_ndarray(chainerx, shape, 'float32')
with pytest.raises(chainerx.DimensionError):
chainerx.linalg.solve(a, b)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_solve_invalid_dtype(device):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, (3, 3), 'float16')
b = array_utils.create_dummy_ndarray(chainerx, (3, 3), 'float16')
with pytest.raises(chainerx.DtypeError):
chainerx.linalg.solve(a, b)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(0, 0), (1, 1), (3, 3)],
'dtype': ['float32', 'float64']
})
))
class TestInverse(NumpyLinalgOpTest):
# For zero sized input strides are different
check_numpy_strides_compliance = False
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
a = a * 10 + numpy.ones(self.shape)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
out = xp.linalg.inv(a)
return out,
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('shape', [(2, 3), (3, 2)])
def test_inv_invalid_shape(device, shape):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, shape, 'float32')
with pytest.raises(chainerx.DimensionError):
chainerx.linalg.inv(a)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_inv_invalid_dtype(device):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, (3, 3), 'float16')
with pytest.raises(chainerx.DtypeError):
chainerx.linalg.inv(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(0, 0), (0, 3), (3, 0), (1, 1), (2, 3), (3, 2), (3, 3)],
'dtype': ['float32', 'float64'],
'full_matrices': [False],
'compute_uv': [True]
}) + chainer.testing.product({
'shape': [(0, 0), (0, 3), (3, 0), (1, 1), (2, 3), (3, 2), (3, 3)],
'dtype': ['float32', 'float64'],
'full_matrices': [True],
'compute_uv': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestSVD(NumpyLinalgOpTest):
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
if (_numpy_does_not_support_0d_input116 and a.size == 0):
pytest.skip('Older NumPy versions do not work with empty arrays')
out = xp.linalg.svd(a,
full_matrices=self.full_matrices,
compute_uv=self.compute_uv)
# NOTE: cuSOLVER's (CuPy's) and NumPy's outputs of u and v might
# differ in signs, which is not a problem mathematically
if self.compute_uv:
u, s, v = out
return xp.abs(u), s, xp.abs(v)
else:
s = out
return s,
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_svd_invalid_shape(device):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, (), 'float32')
with pytest.raises(chainerx.DimensionError):
chainerx.linalg.svd(a)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_svd_invalid_dtype(device):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, (2, 3), 'float16')
with pytest.raises(chainerx.DtypeError):
chainerx.linalg.svd(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(0, 0), (0, 3), (3, 0), (1, 1), (2, 3), (3, 2), (3, 3)],
'rcond': [1e-15, 0.3, 0.5, 0.6],
'dtype': ['float32', 'float64']
})
))
class TestPseudoInverse(NumpyLinalgOpTest):
# For zero sized input strides are different
check_numpy_strides_compliance = False
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
a = a * 10 + numpy.ones(self.shape)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
if (_numpy_does_not_support_0d_input113 and a.size == 0):
pytest.skip('Older NumPy versions do not work with empty arrays')
out = xp.linalg.pinv(a, rcond=self.rcond)
return out,
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_pinv_invalid_shape(device):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, (), 'float32')
with pytest.raises(chainerx.DimensionError):
chainerx.linalg.pinv(a)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_pinv_invalid_dtype(device):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, (2, 3), 'float16')
with pytest.raises(chainerx.DtypeError):
chainerx.linalg.pinv(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# backward for 'r', 'raw' modes is not implemented
chainer.testing.product({
'shape': [(0, 3), (3, 0), (1, 1), (2, 3), (3, 2), (3, 3)],
'in_dtypes': ['float32', 'float64'],
'mode': ['r', 'raw'],
'skip_backward_test': [True],
'skip_double_backward_test': [True]
}) +
# backward for non-square `R` is not implemented
chainer.testing.product({
'shape': [(0, 3), (3, 0), (2, 3), (3, 2)],
'in_dtypes': ['float32', 'float64'],
'mode': ['complete', 'reduced'],
'skip_backward_test': [True],
'skip_double_backward_test': [True]
}) +
chainer.testing.product({
'shape': [(1, 1), (3, 3)],
'in_dtypes': ['float32', 'float64'],
'mode': ['reduced', 'complete']
}) + chainer.testing.product({
'shape': [(3, 2)],
'in_dtypes': ['float32', 'float64'],
'mode': ['reduced']
})
))
class TestQR(NumpyLinalgOpTest):
# For input with shape (N, 0) strides are different
check_numpy_strides_compliance = False
def generate_inputs(self):
singular_values = numpy.random.uniform(
low=0.1, high=1.5, size=min(self.shape))
a = chainer.testing.generate_matrix(
self.shape, self.in_dtypes, singular_values=singular_values)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
if (numpy.lib.NumpyVersion(numpy.__version__) < '1.16.0'
and a.size == 0):
pytest.skip('Older NumPy versions do not work with empty arrays')
out = xp.linalg.qr(a, mode=self.mode)
if self.mode == 'r':
r = out
return r,
if self.mode == 'raw':
if a.dtype.char == 'f':
return out[0].astype(xp.float64), out[1].astype(xp.float64)
return out
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_qr_invalid_mode(device):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, (), 'float32')
with pytest.raises(ValueError):
chainerx.linalg.qr(a, mode='bad_mode')
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_qr_invalid_shape(device):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, (), 'float32')
with pytest.raises(chainerx.DimensionError):
chainerx.linalg.qr(a)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_qr_invalid_dtype(device):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, (2, 3), 'float16')
with pytest.raises(chainerx.DtypeError):
chainerx.linalg.qr(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(0, 0), (1, 1), (3, 3)],
'in_dtypes': ['float32', 'float64']
})
))
class TestCholesky(op_utils.NumpyOpTest):
# For input with shape (0, 0) strides are different
check_numpy_strides_compliance = False
dodge_nondifferentiable = True
def setup(self):
device = chainerx.get_default_device()
_skip_if_native_and_lapack_unavailable(device)
self.check_backward_options.update({
'eps': 1e-5, 'rtol': 1e-4, 'atol': 1e-4})
self.check_double_backward_options.update({
'eps': 1e-5, 'rtol': 1e-4, 'atol': 1e-4})
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.in_dtypes)
# Make random square matrix a symmetric positive definite one
a = numpy.array(a.T.dot(a)) + 1e-3 * numpy.eye(*self.shape)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
if (_numpy_does_not_support_0d_input113 and a.size == 0):
pytest.skip('Older NumPy versions do not work with empty arrays')
# Input has to be symmetrized for backward test to work
a = (a + a.T)/2. + 1e-3 * xp.eye(*self.shape)
L = xp.linalg.cholesky(a)
return L,
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_cholesky_invalid_not_positive_definite(device):
_skip_if_native_and_lapack_unavailable(device)
while True:
a = numpy.random.random((3, 3)).astype('float32')
try:
numpy.linalg.cholesky(a)
except numpy.linalg.LinAlgError:
break
a = chainerx.array(a)
with pytest.raises(chainerx.ChainerxError):
chainerx.linalg.cholesky(a)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_cholesky_invalid_semidefinite(device):
_skip_if_native_and_lapack_unavailable(device)
a = chainerx.array([[1, -2], [-2, 1]], dtype='float32')
with pytest.raises(chainerx.ChainerxError):
chainerx.linalg.cholesky(a)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_cholesky_invalid_shape(device):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, (2, 3), 'float32')
with pytest.raises(chainerx.DimensionError):
chainerx.linalg.cholesky(a)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_cholesky_invalid_dtype(device):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, (3, 3), 'float16')
with pytest.raises(chainerx.DtypeError):
chainerx.linalg.cholesky(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(0, 0), (1, 1), (3, 3)],
'in_dtypes': ['float32', 'float64'],
'UPLO': ['u', 'L']
})
))
class TestEigh(NumpyLinalgOpTest):
def setup(self):
device = chainerx.get_default_device()
_skip_if_native_and_lapack_unavailable(device)
self.check_backward_options.update({
'eps': 1e-5, 'rtol': 1e-3, 'atol': 1e-3})
self.check_double_backward_options.update({
'eps': 1e-5, 'rtol': 1e-3, 'atol': 1e-3})
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.in_dtypes)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
if (_numpy_does_not_support_0d_input113 and a.size == 0):
pytest.skip('Older NumPy versions do not work with empty arrays')
# Input has to be symmetrized for backward test to work
a = (a + a.T)/2. + 1e-3 * xp.eye(*self.shape)
w, v = xp.linalg.eigh(a, UPLO=self.UPLO)
# The sign of eigenvectors is not unique,
# therefore absolute values are compared
return w, xp.abs(v)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_eigh_invalid_uplo_type(device):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, (3, 3), 'float32')
with pytest.raises(TypeError):
chainerx.linalg.eigh(a, UPLO=None)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_eigh_invalid_uplo_value(device):
_skip_if_native_and_lapack_unavailable(device)
# TODO(hvy): Update the test when the error types are unified to either.
a = array_utils.create_dummy_ndarray(chainerx, (3, 3), 'float32')
with pytest.raises(ValueError):
chainerx.linalg.eigh(a, UPLO='bad_UPLO')
a = array_utils.create_dummy_ndarray(chainerx, (3, 3), 'float32')
with pytest.raises(chainerx.ChainerxError):
chainerx.linalg.eigh(a, UPLO='A')
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_eigh_invalid_shape(device):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, (2, 3), 'float32')
with pytest.raises(chainerx.DimensionError):
chainerx.linalg.eigh(a)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_eigh_invalid_dtype(device):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, (3, 3), 'float16')
with pytest.raises(chainerx.DtypeError):
chainerx.linalg.eigh(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(0, 0), (1, 1), (3, 3)],
'in_dtypes': ['float32', 'float64'],
'UPLO': ['u', 'L'],
'skip_backward_test': [True],
'skip_double_backward_test': [True]
})
))
class TestEigvalsh(NumpyLinalgOpTest):
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.in_dtypes)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
if (_numpy_does_not_support_0d_input113 and a.size == 0):
pytest.skip('Older NumPy versions do not work with empty arrays')
w = xp.linalg.eigvalsh(a, UPLO=self.UPLO)
return w,
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_eigvalsh_invalid_uplo_type(device):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, (3, 3), 'float32')
with pytest.raises(TypeError):
chainerx.linalg.eigvalsh(a, UPLO=None)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_eigvalsh_invalid_uplo_value(device):
_skip_if_native_and_lapack_unavailable(device)
# TODO(hvy): Update the test when the error types are unified to either.
a = array_utils.create_dummy_ndarray(chainerx, (3, 3), 'float32')
with pytest.raises(ValueError):
chainerx.linalg.eigvalsh(a, UPLO='wrong')
a = array_utils.create_dummy_ndarray(chainerx, (3, 3), 'float32')
with pytest.raises(chainerx.ChainerxError):
chainerx.linalg.eigvalsh(a, UPLO='A')
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_eigvalsh_invalid_shape(device):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, (2, 3), 'float32')
with pytest.raises(chainerx.DimensionError):
chainerx.linalg.eigvalsh(a)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_eigvalsh_invalid_dtype(device):
_skip_if_native_and_lapack_unavailable(device)
a = array_utils.create_dummy_ndarray(chainerx, (3, 3), 'float16')
with pytest.raises(chainerx.DtypeError):
chainerx.linalg.eigvalsh(a)
| 21,161
| 31.758514
| 78
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_connection.py
|
import unittest
import chainer
from chainer import functions as F
import numpy
import pytest
import chainerx
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import op_utils
# A special parameter object used to represent an unspecified argument.
class Unspecified(object):
pass
lstm_dtypes_valid = dtype_utils._permutate_dtype_mapping([
# Floats.
(('float16', 'float16'), ('float16', 'float16')),
(('float32', 'float32'), ('float32', 'float32')),
(('float64', 'float64'), ('float64', 'float64')),
])
lstm_dtypes_invalid = dtype_utils._permutate_dtype_mapping([
# Bools.
(('bool_', 'bool_'), ('bool_', 'bool_')),
# Floats.
(('float32', 'float16'), ('float32', 'float16')),
(('float64', 'float16'), ('float64', 'float16')),
(('float64', 'float32'), ('float64', 'float32')),
# Signed ints.
(('int8', 'int8'), ('int8', 'int8')),
(('int8', 'int16'), ('int8', 'int16')),
(('int8', 'int32'), ('int8', 'int32')),
(('int8', 'int64'), ('int8', 'int64')),
(('int16', 'int16'), ('int16', 'int16')),
(('int32', 'int32'), ('int32', 'int32')),
(('int64', 'int64'), ('int64', 'int64')),
(('int16', 'int32'), ('int16', 'int32')),
(('int16', 'int64'), ('int16', 'int64')),
(('int32', 'int64'), ('int32', 'int64')),
# Unsigned ints.
(('uint8', 'uint8'), ('uint8', 'uint8')),
# Signed int and unsigned int.
(('uint8', 'int8'), ('uint8', 'int8')),
(('uint8', 'int16'), ('uint8', 'int16')),
(('uint8', 'int32'), ('uint8', 'int32')),
# Int and float.
(('int8', 'float16'), ('int8', 'float16')),
(('uint8', 'float16'), ('uint8', 'float16')),
(('int16', 'float32'), ('int16', 'float32')),
(('int32', 'float32'), ('int32', 'float32')),
(('int64', 'float32'), ('int64', 'float32')),
# Bool and other.
(('bool_', 'uint8'), ('bool_', 'uint8')),
(('bool_', 'int8'), ('bool_', 'int8')),
(('bool_', 'int16'), ('bool_', 'int16')),
(('bool_', 'float16'), ('bool_', 'float16')),
(('bool_', 'float64'), ('bool_', 'float64')),
])
def _create_lstm_args(xp, device, c_shape, x_shape, float_dtype):
c = array_utils.create_dummy_ndarray(xp, c_shape, float_dtype[0])
x = array_utils.create_dummy_ndarray(xp, x_shape, float_dtype[1])
return c, x
def _create_conv_args(
xp, device, x_shape, w_shape, b_shape, stride, pad, cover_all,
float_dtype):
x = array_utils.create_dummy_ndarray(xp, x_shape, float_dtype)
w = array_utils.create_dummy_ndarray(xp, w_shape, float_dtype)
if b_shape is None:
b = None
else:
b = array_utils.create_dummy_ndarray(xp, b_shape, float_dtype)
if device.backend.name == 'cuda': # cover_all is not supported by CUDA.
cover_all = False
return x, w, b, stride, pad, cover_all
def _convert_to_nhwc_layout(array):
# Converts a contiguous array to NHWC data layout.
assert isinstance(array, numpy.ndarray)
assert array.flags.c_contiguous
shape = array.shape
transposed_array = numpy.transpose(array, (0, 3, 1, 2))
transposed_array = numpy.ascontiguousarray(transposed_array)
array = numpy.transpose(transposed_array, (0, 2, 3, 1))
assert array.shape == shape
return array
class _ConvTestBase(object):
def setup(self):
if len(self.in_dtypes) == 3:
x_dtype, w_dtype, b_dtype = self.in_dtypes
else:
(x_dtype, w_dtype), b_dtype = self.in_dtypes, None
x_kind = numpy.dtype(x_dtype).kind
w_kind = numpy.dtype(w_dtype).kind
b_kind = None if b_dtype is None else numpy.dtype(b_dtype).kind
device = chainerx.get_default_device()
if device.backend.name == 'cuda' and len(self.x_shape) <= 3:
# TODO(hvy): Support 1 dimensional convolution with CUDA.
pytest.skip('cudnn does not support 1-dim convolution')
if device.backend.name == 'cuda' and self.cover_all:
pytest.skip('cudnn does not support cover_all')
# Skip backward/double-backward tests for int dtypes
if (x_kind != 'f' and w_kind != 'f'
and (b_kind is None or b_kind != 'f')):
self.skip_backward_test = True
self.skip_double_backward_test = True
if (x_dtype == 'float16' or w_dtype == 'float16'
or b_dtype == 'float16'):
self.check_forward_options.update({'rtol': 5e-2, 'atol': 5e-3})
self.check_backward_options.update({
'eps': 2 ** -3, 'rtol': 1e-1, 'atol': 1e-2})
else:
self.check_forward_options.update({'rtol': 1e-3})
self.check_backward_options.update({
'eps': 1e-2, 'rtol': 1e-3, 'atol': 1e-4})
self.check_double_backward_options.update({
'rtol': 5e-2, 'atol': 5e-3})
def generate_inputs(self):
x_shape = self.x_shape
w_shape = self.w_shape
b_shape = self.b_shape
if len(self.in_dtypes) == 3:
x_dtype, w_dtype, b_dtype = self.in_dtypes
else:
(x_dtype, w_dtype), b_dtype = self.in_dtypes, None
x = array_utils.uniform(x_shape, x_dtype)
w = array_utils.uniform(w_shape, w_dtype)
if b_shape is None:
return x, w
else:
b = array_utils.uniform(b_shape, b_dtype)
return x, w, b
def forward_chainerx(self, inputs):
if len(inputs) == 2:
(x, w), b = inputs, None
else:
x, w, b = inputs
y = chainerx.conv(x, w, b, self.stride, self.pad, self.cover_all)
return y,
def forward_chainer(self, inputs):
if len(inputs) == 2:
(x, w), b = inputs, None
else:
x, w, b = inputs
if x.dtype.kind != 'f':
x = F.cast(x, 'float64')
if w.dtype.kind != 'f':
w = F.cast(w, 'float64')
if b is not None and b.dtype.kind != 'f':
b = F.cast(b, 'float64')
y = F.convolution_nd(
x, w, b, self.stride, self.pad, self.cover_all)
y = F.cast(y, self.out_dtype)
return y,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# without bias
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'x_shape,w_shape,b_shape,stride,pad', [
((1, 3), (5, 3), None, 1, 0),
((1, 3, 4), (5, 3, 2), None, 3, 2),
((2, 3, 4, 4), (2, 3, 3, 3), None, 2, (2, 0)),
((2, 3, 2, 6, 3), (2, 3, 1, 3, 2), None, (1, 2, 3), (2, 0, 1)),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes,out_dtype', dtype_utils.result_dtypes_two_arrays)
]) +
# with bias
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'x_shape,w_shape,b_shape,stride,pad', [
((1, 3), (5, 3), (5,), 1, 0),
((2, 3, 4), (5, 3, 1), (5,), 1, 0),
((1, 3, 4), (5, 3, 2), (5,), 3, 2),
((2, 3, 4, 4), (2, 3, 3, 3), (2,), 1, 0),
((1, 3, 4, 4), (2, 3, 3, 3), (2,), (1, 2), 1),
((1, 3, 4, 4), (2, 3, 3, 3), (2,), 2, (2, 0)),
((1, 3, 2, 6, 3), (2, 3, 1, 3, 2), (2,), 2, (2, 0, 1)),
((1, 3, 2, 6, 3), (2, 3, 1, 3, 2), (2,), (1, 2, 3), (2, 0, 1)),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes,out_dtype', dtype_utils.result_dtypes_three_arrays)
])
))
@chainer.testing.parameterize_pytest('cover_all', [True, False])
class TestConv(_ConvTestBase, op_utils.ChainerOpTest):
pass
# cudnnFindConvolutionForwardAlgorithmEx tends to choose an algorithm which
# uses TensorCore when:
# - The sizes of the input arrays are large
# - The input `x` is aligned with NHWC data layout
@op_utils.op_test(['cuda:0'])
class TestConvTensorCore(_ConvTestBase, op_utils.ChainerOpTest):
x_shape = (8, 3, 227, 227)
w_shape = (96, 3, 11, 11)
b_shape = (96,)
stride = 4
pad = 0
in_dtypes = ('float16', 'float16', 'float16')
out_dtype = 'float16'
cover_all = False
def __init__(self):
# TODO(imanishi): Skip only if compute_capability < 70.
# Huge floating-point error often arises, because our CI environment
# have no Tensor Cores.
raise unittest.SkipTest()
def generate_inputs(self):
x, w, b = super().generate_inputs()
return _convert_to_nhwc_layout(x), w, b
@pytest.mark.parametrize('x_shape,w_shape,b_shape,stride,pad', [
# Mismatched x and w input channels.
((1, 3, 4, 3), (5, 4, 2, 2), (5,), 3, 2),
# Mismatched x and w dimensions.
((2, 3, 4, 3), (5, 3, 2, 2, 1), (5,), 3, 2),
((1, 3, 4, 3), (5, 3, 2, 2), (6,), 1, 0), # Mismatched w and b.
((2, 3, 4, 3), (5, 3, 2, 2), None, (1,), 0), # Wrong number of strides.
((1, 3, 4, 3), (5, 3, 2, 2), None, 3, (2,)), # Wrong number of paddings.
])
@pytest.mark.parametrize('cover_all', [True, False])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_conv_invalid(
device, x_shape, w_shape, b_shape, stride, pad, cover_all,
float_dtype):
with pytest.raises(chainerx.DimensionError):
chainerx.conv(
*_create_conv_args(
chainerx, device, x_shape, w_shape, b_shape, stride, pad,
cover_all, float_dtype))
class _ConvTransposeTestBase(object):
def setup(self):
if len(self.in_dtypes) == 3:
x_dtype, w_dtype, b_dtype = self.in_dtypes
else:
(x_dtype, w_dtype), b_dtype = self.in_dtypes, None
x_kind = numpy.dtype(x_dtype).kind
w_kind = numpy.dtype(w_dtype).kind
b_kind = None if b_dtype is None else numpy.dtype(b_dtype).kind
device = chainerx.get_default_device()
if device.backend.name == 'cuda' and len(self.x_shape) <= 3:
# TODO(sonots): Support 1 dimensional convolution with CUDA.
pytest.skip(
'cuDNN does not support 1 dimensional convolution and throws '
'DimensionError')
if device.backend.name == 'cuda' and self.cover_all is True:
pytest.skip(
'outsize (for cover_all=True) is not supported by CUDA')
# Skip backward/double-backward tests for int dtypes
if (x_kind != 'f' and w_kind != 'f'
and (b_kind is None or b_kind != 'f')):
self.skip_backward_test = True
self.skip_double_backward_test = True
if (x_dtype == 'float16' or w_dtype == 'float16'
or b_dtype == 'float16'):
self.check_forward_options.update({'rtol': 5e-2, 'atol': 2e-2})
self.check_backward_options.update({
'eps': 2 ** -3, 'rtol': 1e-1, 'atol': 1e-2})
else:
self.check_forward_options.update({'rtol': 1e-3})
self.check_backward_options.update({
'eps': 1e-2, 'rtol': 1e-3, 'atol': 1e-4})
self.check_double_backward_options.update({
'rtol': 5e-2, 'atol': 5e-3})
# Determine outsize
cover_all = self.cover_all
if cover_all is None:
outsize = None
else:
x_shape = self.x_shape
w_shape = self.w_shape
stride = self.stride
pad = self.pad
in_dims = x_shape[2:]
kernel_size = w_shape[2:]
ndim = len(in_dims)
stride_tup = (
(stride,) * ndim if isinstance(stride, int) else stride)
pad_tup = (pad,) * ndim if isinstance(pad, int) else pad
outsize = tuple(
chainer.utils.conv.get_deconv_outsize(d, k, s, p, cover_all)
for (d, k, s, p)
in zip(in_dims, kernel_size, stride_tup, pad_tup))
self.outsize = outsize
def generate_inputs(self):
x_shape = self.x_shape
w_shape = self.w_shape
b_shape = self.b_shape
if len(self.in_dtypes) == 3:
x_dtype, w_dtype, b_dtype = self.in_dtypes
else:
(x_dtype, w_dtype), b_dtype = self.in_dtypes, None
x = array_utils.uniform(x_shape, x_dtype)
w = array_utils.uniform(w_shape, w_dtype)
if b_shape is None:
return x, w
else:
b = array_utils.uniform(b_shape, b_dtype)
return x, w, b
def forward_chainerx(self, inputs):
if len(inputs) == 3:
x, w, b = inputs
else:
(x, w), b = inputs, None
y = chainerx.conv_transpose(
x, w, b, self.stride, self.pad, self.outsize)
return y,
def forward_chainer(self, inputs):
if len(inputs) == 3:
x, w, b = inputs
else:
(x, w), b = inputs, None
if x.dtype.kind != 'f':
x = F.cast(x, 'float64')
if w.dtype.kind != 'f':
w = F.cast(w, 'float64')
if b is not None and b.dtype.kind != 'f':
b = F.cast(b, 'float64')
y = chainer.functions.deconvolution_nd(
x, w, b, self.stride, self.pad, self.outsize)
y = F.cast(y, self.out_dtype)
return y,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# without bias
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'x_shape,w_shape,b_shape,stride,pad', [
((1, 3), (3, 5), None, 1, 0),
((1, 3, 4), (3, 5, 2), None, 3, 2),
((2, 3, 4, 4), (3, 2, 3, 3), None, 2, (2, 0)),
((2, 3, 5, 6, 3), (3, 2, 1, 3, 2), None, (1, 2, 3), (2, 0, 1)),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes,out_dtype', dtype_utils.result_dtypes_two_arrays)
]) +
# with bias
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'x_shape,w_shape,b_shape,stride,pad', [
((1, 3), (3, 5), (5,), 1, 0),
((2, 3, 4), (3, 5, 1), (5,), 1, 0),
((1, 3, 4), (3, 5, 2), (5,), 3, 2),
((2, 3, 4, 4), (3, 2, 3, 3), (2,), 1, 0),
((1, 3, 4, 4), (3, 2, 3, 3), (2,), (1, 2), 1),
((1, 3, 4, 4), (3, 2, 3, 3), (2,), 2, (2, 0)),
((1, 3, 5, 6, 3), (3, 2, 1, 3, 2), (2,), 2, (2, 0, 1)),
((1, 3, 5, 6, 3), (3, 2, 1, 3, 2), (2,), (1, 2, 3), (2, 0, 1)),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes,out_dtype', dtype_utils.result_dtypes_three_arrays)
])
))
# If None, outsize argument will be None.
@chainer.testing.parameterize_pytest('cover_all', [None, True, False])
class TestConvTranspose(_ConvTransposeTestBase, op_utils.ChainerOpTest):
pass
# cudnnFindConvolutionForwardAlgorithmEx tends to choose an algorithm which
# uses TensorCore when:
# - The sizes of the input arrays are large
# - The input `x` is aligned with NHWC data layout
@op_utils.op_test(['cuda:0'])
@chainer.testing.parameterize_pytest('cover_all', [None, False])
class TestConvTransposeTensorCore(
_ConvTransposeTestBase, op_utils.ChainerOpTest):
x_shape = (8, 3, 227, 227)
w_shape = (3, 8, 11, 11)
b_shape = (8,)
stride = 4
pad = 0
in_dtypes = ('float16', 'float16', 'float16')
out_dtype = 'float16'
def __init__(self):
# TODO(imanishi): Skip only if compute_capability < 70.
# Huge floating-point error often arises, because our CI environment
# have no Tensor Cores.
raise unittest.SkipTest()
def generate_inputs(self):
x, w, b = super().generate_inputs()
return _convert_to_nhwc_layout(x), w, b
@pytest.mark.parametrize('x_shape,w_shape,b_shape,stride,pad,outsize', [
# Mismatched x and w input channels.
((1, 3, 4, 3), (5, 4, 2, 2), (5,), 3, 2, None),
# Mismatched x and w dimensions.
((2, 3, 4, 3), (3, 5, 2, 2, 1), (5,), 3, 2, None),
((1, 3, 4, 3), (3, 5, 2, 2), (6,), 1, 0, None), # Mismatched w and b.
# Wrong number of strides.
((2, 3, 4, 3), (3, 5, 2, 2), None, (1,), 0, None),
# Wrong number of paddings.
((1, 3, 4, 3), (3, 5, 2, 2), None, 3, (2,), None),
((1, 3, 2, 6, 3), (3, 2, 1, 3, 2), (2,), 2, (2, 0, 1),
(-1, 13, 4)), # All output sizes must be non-negative
# All output sizes must be non-negative
((1, 3, 2, 6, 3), (3, 2, 1, 3, 2), (2,), 2, (2, 0, 1), None),
((2, 3, 4), (3, 5, 1), (5,), 1, 0, (5,)), # Output dims are inconsistent
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_conv_transpose_invalid(
device, x_shape, w_shape, b_shape, stride, pad, outsize, float_dtype):
dtype = float_dtype
x = array_utils.create_dummy_ndarray(chainerx, x_shape, dtype)
w = array_utils.create_dummy_ndarray(chainerx, w_shape, dtype)
if b_shape is None:
b = None
else:
b = array_utils.create_dummy_ndarray(chainerx, b_shape, float_dtype)
with pytest.raises(chainerx.DimensionError):
chainerx.conv_transpose(x, w, b, stride, pad, outsize)
@op_utils.op_test(['native:0', 'cuda:0'])
# TODO(imanishi): Add test cases for more than 2 ndim
@chainer.testing.parameterize(*(
# without bias
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'x_shape,w_shape,b_shape,n_batch_axes', [
((2, 3), (4, 3), None, Unspecified),
((5, 2, 3), (4, 3), None, 2),
((2, 3), (4, 3), Unspecified, Unspecified),
((5, 2, 3), (4, 6), Unspecified, Unspecified),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes,out_dtype', dtype_utils.result_dtypes_two_arrays)
]) +
# with bias
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'x_shape,w_shape,b_shape,n_batch_axes', [
((2, 3), (4, 3), (4,), Unspecified),
((2, 0), (3, 0), (3,), Unspecified),
((0, 2), (0, 2), (0,), Unspecified),
((0, 0), (0, 0), (0,), Unspecified),
((5, 2, 3), (4, 3), (4,), 2),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes,out_dtype', dtype_utils.result_dtypes_three_arrays)
])
))
class TestLinear(op_utils.OpTest):
def setup(self):
if len(self.in_dtypes) == 3:
x_dtype, w_dtype, b_dtype = self.in_dtypes
else:
(x_dtype, w_dtype), b_dtype = self.in_dtypes, None
x_kind = numpy.dtype(x_dtype).kind
w_kind = numpy.dtype(w_dtype).kind
b_kind = None if b_dtype is None else numpy.dtype(b_dtype).kind
device = chainerx.get_default_device()
if device.backend.name == 'cuda' and (
x_kind != 'f' or w_kind != 'f' or b_kind != 'f'):
raise unittest.SkipTest('CUDA dot does not support integers.')
# Skip backward/double-backward tests for int dtypes
if (x_kind != 'f' and w_kind != 'f'
and (b_kind is None or b_kind != 'f')):
self.skip_backward_test = True
self.skip_double_backward_test = True
# Skip backward/double-backward tests if the output will be
# disconnected.
# TODO(niboshi): Remove this skip condition after enabling backward()
# for such cases.
if 0 in self.x_shape or 0 in self.w_shape:
self.skip_backward_test = True
self.skip_double_backward_test = True
if (x_dtype == 'float16' or w_dtype == 'float16'
or b_dtype == 'float16'):
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
def generate_inputs(self):
x_shape = self.x_shape
w_shape = self.w_shape
b_shape = self.b_shape
if len(self.in_dtypes) == 3:
x_dtype, w_dtype, b_dtype = self.in_dtypes
else:
(x_dtype, w_dtype), b_dtype = self.in_dtypes, None
x = array_utils.uniform(x_shape, x_dtype)
w = array_utils.uniform(w_shape, w_dtype)
if b_shape in (None, Unspecified):
return x, w
else:
b = array_utils.uniform(b_shape, b_dtype)
return x, w, b
def forward_chainerx(self, inputs):
if len(inputs) == 3:
x, w, b = inputs
else:
(x, w), b = inputs, None
n_batch_axes = self.n_batch_axes
if b is Unspecified:
y = chainerx.linear(x, w)
elif n_batch_axes is Unspecified:
y = chainerx.linear(x, w, b)
else:
y = chainerx.linear(x, w, b, n_batch_axes)
return y,
def forward_expected(self, inputs):
if len(inputs) == 3:
x, w, b = inputs
else:
(x, w), b = inputs, None
n_batch_axes = self.n_batch_axes
x_shape = self.x_shape
w_shape = self.w_shape
out_dtype = self.out_dtype
if n_batch_axes is Unspecified:
n_batch_axes = 1
y_shape = x_shape[:n_batch_axes] + (w_shape[0],)
x_ = x.reshape(numpy.prod(x_shape[:n_batch_axes]),
numpy.prod(x_shape[n_batch_axes:]))
x_ = x_.astype(out_dtype)
w_ = w.astype(out_dtype)
y = x_.dot(w_.T).reshape(y_shape)
if b is not None:
y += b
assert y.dtype == out_dtype
return y,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'c_shape,x_shape', [
((10, 3), (10, 12)),
((20, 32), (16, 128)),
((32, 100), (32, 400)),
((16, 20), (2, 80)),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes,out_dtype', lstm_dtypes_valid)
])
))
@chainer.testing.parameterize_pytest('cover_all', [True, False])
class TestLstm(op_utils.ChainerOpTest):
def setup(self):
c_dtype, x_dtype = self.in_dtypes
if 0 in self.c_shape or 0 in self.x_shape:
self.skip_backward_test = True
self.skip_double_backward_test = True
if (c_dtype == 'float16' or x_dtype == 'float16'):
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
def generate_inputs(self):
c_shape = self.c_shape
x_shape = self.x_shape
c_dtype, x_dtype = self.in_dtypes
c = array_utils.uniform(c_shape, c_dtype)
x = array_utils.uniform(x_shape, x_dtype)
return c, x
def forward_chainerx(self, inputs):
c, x = inputs
c, h = chainerx.lstm(c, x)
return c, h,
def forward_chainer(self, inputs):
c, x = inputs
c, h = chainer.functions.lstm(c, x)
return c, h,
@pytest.mark.parametrize('c_shape,x_shape', [
# x.shape()[0] > c.shape()[0]
((16, 20), (20, 80)),
((10, 3), (12, 12)),
# x.shape()[1] != 4 * c.shape()[1]
((32, 100), (32, 150)),
((16, 50), (16, 48)),
((64, 200), (16, 200)),
])
@pytest.mark.parametrize('cover_all', [True, False])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_lstm_invalid_dimension(
device, c_shape, x_shape, cover_all,
float_dtype):
with pytest.raises(chainerx.DimensionError):
chainerx.lstm(
*_create_lstm_args(
chainerx, device, c_shape, x_shape,
(float_dtype, float_dtype)))
@pytest.mark.parametrize('c_shape,x_shape', [
((10, 3), (10, 12)),
((20, 32), (16, 128)),
((32, 100), (32, 400)),
((16, 20), (2, 80)),
])
@pytest.mark.parametrize('in_dtypes, out_dtype', lstm_dtypes_invalid)
@pytest.mark.parametrize('cover_all', [True, False])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_lstm_invalid_dtype(device, c_shape, x_shape, in_dtypes,
out_dtype, cover_all):
with pytest.raises(chainerx.DtypeError):
chainerx.lstm(
*_create_lstm_args(
chainerx, device, c_shape, x_shape, in_dtypes))
| 24,647
| 35.407681
| 79
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_creation.py
|
import io
import sys
import tempfile
import chainer
import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import op_utils
_array_params_list = [
-2,
1,
-1.5,
2.3,
True,
False,
numpy.array(1),
float('inf'),
float('nan'),
]
def _array_params(list):
return list + [
list,
[list, list],
(list, list),
tuple(list),
(tuple(list), tuple(list)),
[tuple(list), tuple(list)],
]
# Traverses the entries in `obj` recursively and returns `True` if all of the
# entries are finite numbers.
def _is_all_finite(obj):
if isinstance(obj, (tuple, list)):
return all(_is_all_finite(o) for o in obj)
else:
return numpy.isfinite(obj)
def _get_default_dtype(value):
if isinstance(value, bool):
return 'bool_'
if isinstance(value, int):
return 'int32'
if isinstance(value, float):
return 'float32'
assert False
# A special parameter object used to represent an unspecified argument.
class Unspecified(object):
pass
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('obj', _array_params(_array_params_list))
@chainerx.testing.parametrize_dtype_specifier(
'dtype_spec', additional_args=(None, Unspecified))
def test_array_from_tuple_or_list(xp, obj, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
# Skip nan/inf -> integer conversion that would cause a cast error.
if (not _is_all_finite(obj)
and dtype_spec not in (None, Unspecified)
and chainerx.dtype(dtype_spec).kind not in ('f', 'c')):
return chainerx.testing.ignore()
if dtype_spec is Unspecified:
return xp.array(obj)
else:
return xp.array(obj, dtype_spec)
@pytest.mark.parametrize('obj', _array_params(_array_params_list))
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_array_from_python_tuple_or_list_with_device(obj, device):
a = chainerx.array(obj, 'float32', device=device)
b = chainerx.array(obj, 'float32')
chainerx.testing.assert_array_equal_ex(a, b)
array_utils.check_device(a, device)
def _check_array_from_numpy_array(a_chx, a_np, device=None):
assert a_chx.offset == 0
array_utils.check_device(a_chx, device)
# recovered data should be equal
a_np_recovered = chainerx.to_numpy(a_chx)
chainerx.testing.assert_array_equal_ex(
a_chx, a_np_recovered, strides_check=False)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_array_from_numpy_array(xp, shape, dtype, device):
a_np = array_utils.create_dummy_ndarray(numpy, shape, dtype)
a_xp = xp.array(a_np)
if xp is chainerx:
_check_array_from_numpy_array(a_xp, a_np, device)
# test possibly freed memory
a_np_copy = a_np.copy()
del a_np
chainerx.testing.assert_array_equal_ex(
a_xp, a_np_copy, strides_check=False)
return a_xp
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_array_from_numpy_non_contiguous_array(xp, shape, dtype, device):
a_np = array_utils.create_dummy_ndarray(numpy, shape, dtype, padding=True)
a_xp = xp.array(a_np)
if xp is chainerx:
_check_array_from_numpy_array(a_xp, a_np, device)
# test possibly freed memory
a_np_copy = a_np.copy()
del a_np
chainerx.testing.assert_array_equal_ex(
a_xp, a_np_copy, strides_check=False)
return a_xp
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_array_from_numpy_positive_offset_array(xp, device):
a_np = array_utils.create_dummy_ndarray(numpy, (2, 3), 'int32')[1, 1:]
a_xp = xp.array(a_np)
if xp is chainerx:
_check_array_from_numpy_array(a_xp, a_np, device)
# test possibly freed memory
a_np_copy = a_np.copy()
del a_np
chainerx.testing.assert_array_equal_ex(a_xp, a_np_copy)
return a_xp
def _array_from_numpy_array_with_dtype(xp, shape, src_dtype, dst_dtype_spec):
if xp is numpy and isinstance(dst_dtype_spec, chainerx.dtype):
dst_dtype_spec = dst_dtype_spec.name
t = array_utils.create_dummy_ndarray(numpy, shape, src_dtype)
return xp.array(t, dtype=dst_dtype_spec)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('src_dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('dst_dtype', chainerx.testing.all_dtypes)
def test_array_from_numpy_array_with_dtype(
xp, shape, src_dtype, dst_dtype, device):
return _array_from_numpy_array_with_dtype(xp, shape, src_dtype, dst_dtype)
@chainerx.testing.numpy_chainerx_array_equal()
@chainerx.testing.parametrize_dtype_specifier(
'dst_dtype_spec', additional_args=(None,))
def test_array_from_numpy_array_with_dtype_spec(xp, shape, dst_dtype_spec):
return _array_from_numpy_array_with_dtype(
xp, shape, 'float32', dst_dtype_spec)
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_array_from_numpy_array_with_device(shape, device):
orig = array_utils.create_dummy_ndarray(numpy, (2, ), 'float32')
a = chainerx.array(orig, device=device)
b = chainerx.array(orig)
chainerx.testing.assert_array_equal_ex(a, b)
array_utils.check_device(a, device)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('copy', [True, False])
def test_array_from_chainerx_array(shape, dtype, copy, device):
t = array_utils.create_dummy_ndarray(chainerx, shape, dtype, device=device)
a = chainerx.array(t, copy=copy)
if not copy:
assert t is a
else:
assert t is not a
chainerx.testing.assert_array_equal_ex(a, t, strides_check=False)
assert a.device is t.device
assert a.is_contiguous
def _check_array_from_chainerx_array_with_dtype(
shape, src_dtype, dst_dtype_spec, copy, device=None):
t = array_utils.create_dummy_ndarray(
chainerx, shape, src_dtype, device=device)
a = chainerx.array(t, dtype=dst_dtype_spec, copy=copy)
src_dtype = chainerx.dtype(src_dtype)
dst_dtype = src_dtype if dst_dtype_spec is None else chainerx.dtype(
dst_dtype_spec)
device = chainerx.get_device(device)
if (not copy
and src_dtype == dst_dtype
and device is chainerx.get_default_device()):
assert t is a
else:
assert t is not a
chainerx.testing.assert_array_equal_ex(a, t.astype(dst_dtype))
assert a.dtype == dst_dtype
assert a.device is chainerx.get_default_device()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('src_dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('dst_dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('copy', [True, False])
def test_array_from_chainerx_array_with_dtype(
shape, src_dtype, dst_dtype, copy, device):
_check_array_from_chainerx_array_with_dtype(
shape, src_dtype, dst_dtype, copy, device)
@chainerx.testing.parametrize_dtype_specifier(
'dst_dtype_spec', additional_args=(None,))
@pytest.mark.parametrize('copy', [True, False])
def test_array_from_chainerx_array_with_dtype_spec(
shape, dst_dtype_spec, copy):
_check_array_from_chainerx_array_with_dtype(
shape, 'float32', dst_dtype_spec, copy)
@pytest.mark.parametrize('src_dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('dst_dtype', chainerx.testing.all_dtypes + (None, ))
@pytest.mark.parametrize('copy', [True, False])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize(
'dst_device_spec',
[None, 'native:1', chainerx.get_device('native:1'), 'native:0'])
def test_array_from_chainerx_array_with_device(
src_dtype, dst_dtype, copy, device, dst_device_spec):
t = array_utils.create_dummy_ndarray(
chainerx, (2,), src_dtype, device=device)
a = chainerx.array(t, dtype=dst_dtype, copy=copy, device=dst_device_spec)
dst_device = chainerx.get_device(dst_device_spec)
if (not copy
and (dst_dtype is None or src_dtype == dst_dtype)
and (dst_device_spec is None or device is dst_device)):
assert t is a
else:
assert t is not a
if dst_dtype is None:
dst_dtype = t.dtype
chainerx.testing.assert_array_equal_ex(
a, t.to_device(dst_device).astype(dst_dtype))
assert a.dtype == chainerx.dtype(dst_dtype)
assert a.device is dst_device
def test_asarray_from_python_tuple_or_list():
obj = _array_params_list
a = chainerx.asarray(obj, dtype='float32')
e = chainerx.array(obj, dtype='float32', copy=False)
chainerx.testing.assert_array_equal_ex(e, a)
assert e.device is a.device
def test_asarray_from_numpy_array_with_zero_copy():
obj = array_utils.create_dummy_ndarray(
numpy, (2, 3), 'float32', padding=True)
obj_refcount_before = sys.getrefcount(obj)
a = chainerx.asarray(obj, dtype='float32')
assert sys.getrefcount(obj) == obj_refcount_before + 1
chainerx.testing.assert_array_equal_ex(obj, a)
# test buffer is shared (zero copy)
a += a
chainerx.testing.assert_array_equal_ex(obj, a)
# test possibly freed memory
obj_copy = obj.copy()
del obj
chainerx.testing.assert_array_equal_ex(obj_copy, a, strides_check=False)
# test possibly freed memory (the other way)
obj = array_utils.create_dummy_ndarray(
numpy, (2, 3), 'float32', padding=True)
a = chainerx.asarray(obj, dtype='float32')
a_copy = a.copy()
del a
chainerx.testing.assert_array_equal_ex(a_copy, obj, strides_check=False)
def test_asarray_from_numpy_array_with_copy():
obj = array_utils.create_dummy_ndarray(numpy, (2, 3), 'int32')
a = chainerx.asarray(obj, dtype='float32')
e = chainerx.array(obj, dtype='float32', copy=False)
chainerx.testing.assert_array_equal_ex(e, a)
assert e.device is a.device
# test buffer is not shared
a += a
assert not numpy.array_equal(obj, chainerx.to_numpy(a))
@pytest.mark.parametrize('dtype', ['int32', 'float32'])
def test_asarray_from_chainerx_array(dtype):
obj = array_utils.create_dummy_ndarray(chainerx, (2, 3), 'int32')
a = chainerx.asarray(obj, dtype=dtype)
if a.dtype == obj.dtype:
assert a is obj
else:
assert a is not obj
e = chainerx.array(obj, dtype=dtype, copy=False)
chainerx.testing.assert_array_equal_ex(e, a)
assert e.device is a.device
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_asarray_with_device(device):
a = chainerx.asarray([0, 1], 'float32', device)
b = chainerx.asarray([0, 1], 'float32')
chainerx.testing.assert_array_equal_ex(a, b)
array_utils.check_device(a, device)
@pytest.mark.parametrize('src_dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('dst_dtype', chainerx.testing.all_dtypes + (None, ))
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize(
'dst_device_spec',
[None, 'native:1', chainerx.get_device('native:1'), 'native:0'])
def test_asarray_from_chainerx_array_with_device(
src_dtype, dst_dtype, device, dst_device_spec):
t = array_utils.create_dummy_ndarray(
chainerx, (2,), src_dtype, device=device)
a = chainerx.asarray(t, dtype=dst_dtype, device=dst_device_spec)
dst_device = chainerx.get_device(dst_device_spec)
if ((dst_dtype is None or src_dtype == dst_dtype)
and (dst_device_spec is None or device is dst_device)):
assert t is a
else:
assert t is not a
if dst_dtype is None:
dst_dtype = t.dtype
chainerx.testing.assert_array_equal_ex(
a, t.to_device(dst_device).astype(dst_dtype))
assert a.dtype == chainerx.dtype(dst_dtype)
assert a.device is dst_device
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('padding', [False, True])
def test_ascontiguousarray_from_chainerx_array(device, shape, dtype, padding):
np_arr = array_utils.create_dummy_ndarray(
numpy, shape, dtype, padding=padding)
obj = chainerx.testing._fromnumpy(np_arr, keepstrides=True, device=device)
a = chainerx.ascontiguousarray(obj)
if not padding and shape != (): # () will be reshaped to (1,)
assert a is obj
e = np_arr
chainerx.testing.assert_array_equal_ex(e, a, strides_check=False)
assert a.is_contiguous
assert e.dtype.name == a.dtype.name
def test_ascontiguousarray_from_chainerx_array_device():
with chainerx.using_device(chainerx.get_device('native:0')):
dev = chainerx.get_device('native:1') # Non default one
assert chainerx.get_default_device() is not dev
a = chainerx.arange(10, device=dev)
b = chainerx.ascontiguousarray(a)
assert b.is_contiguous is True
assert b.device is dev
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('padding', [False, True])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_ascontiguousarray_with_dtype(xp, device, shape, padding, dtype_spec):
obj = array_utils.create_dummy_ndarray(xp, shape, 'int32', padding=padding)
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
a = xp.ascontiguousarray(obj, dtype=dtype_spec)
if xp is chainerx:
assert a.is_contiguous
return a
def test_asanyarray_from_python_tuple_or_list():
obj = _array_params_list
a = chainerx.asanyarray(obj, dtype='float32')
e = chainerx.array(obj, dtype='float32', copy=False)
chainerx.testing.assert_array_equal_ex(e, a)
assert e.device is a.device
def test_asanyarray_from_numpy_array():
obj = array_utils.create_dummy_ndarray(numpy, (2, 3), 'int32')
a = chainerx.asanyarray(obj, dtype='float32')
e = chainerx.array(obj, dtype='float32', copy=False)
chainerx.testing.assert_array_equal_ex(e, a)
assert e.device is a.device
def test_asanyarray_from_numpy_subclass_array():
class Subclass(numpy.ndarray):
pass
obj = array_utils.create_dummy_ndarray(
numpy, (2, 3), 'int32').view(Subclass)
a = chainerx.asanyarray(obj, dtype='float32')
e = chainerx.array(obj, dtype='float32', copy=False)
chainerx.testing.assert_array_equal_ex(e, a)
assert e.device is a.device
@pytest.mark.parametrize('dtype', ['int32', 'float32'])
def test_asanyarray_from_chainerx_array(dtype):
obj = array_utils.create_dummy_ndarray(chainerx, (2, 3), 'int32')
a = chainerx.asanyarray(obj, dtype=dtype)
if a.dtype == obj.dtype:
assert a is obj
else:
assert a is not obj
e = chainerx.array(obj, dtype=dtype, copy=False)
chainerx.testing.assert_array_equal_ex(e, a)
assert e.device is a.device
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_asanyarray_with_device(device):
a = chainerx.asanyarray([0, 1], 'float32', device)
b = chainerx.asanyarray([0, 1], 'float32')
chainerx.testing.assert_array_equal_ex(a, b)
array_utils.check_device(a, device)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier(
'dtype_spec', additional_args=(None, Unspecified))
def test_empty(xp, shape_as_sequence_or_int, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
if dtype_spec is Unspecified:
a = xp.empty(shape_as_sequence_or_int)
else:
a = xp.empty(shape_as_sequence_or_int, dtype_spec)
a.fill(0)
if dtype_spec in (None, Unspecified):
a = dtype_utils.cast_if_numpy_array(xp, a, 'float32')
return a
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_empty_with_device(device):
a = chainerx.empty((2,), 'float32', device)
b = chainerx.empty((2,), 'float32')
array_utils.check_device(a, device)
assert a.dtype == b.dtype
assert a.shape == b.shape
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_empty_like(xp, shape, dtype, device):
t = xp.empty(shape, dtype)
a = xp.empty_like(t)
a.fill(0)
return a
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_empty_like_with_device(device):
t = chainerx.empty((2,), 'float32')
a = chainerx.empty_like(t, device)
b = chainerx.empty_like(t)
array_utils.check_device(a, device)
assert a.dtype == b.dtype
assert a.shape == b.shape
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier(
'dtype_spec', additional_args=(None, Unspecified))
def test_zeros(xp, shape_as_sequence_or_int, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
if dtype_spec is Unspecified:
out = xp.zeros(shape_as_sequence_or_int)
else:
out = xp.zeros(shape_as_sequence_or_int, dtype_spec)
if dtype_spec in (None, Unspecified):
out = dtype_utils.cast_if_numpy_array(xp, out, 'float32')
return out
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_zeros_with_device(device):
a = chainerx.zeros((2,), 'float32', device=device)
b = chainerx.zeros((2,), 'float32')
chainerx.testing.assert_array_equal_ex(a, b)
array_utils.check_device(a, device)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_zeros_like(xp, shape, dtype, device):
t = xp.empty(shape, dtype)
return xp.zeros_like(t)
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_zeros_like_with_device(device):
t = chainerx.empty((2,), 'float32')
a = chainerx.zeros_like(t, device)
b = chainerx.zeros_like(t)
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier(
'dtype_spec', additional_args=(None, Unspecified))
def test_ones(xp, shape_as_sequence_or_int, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
if dtype_spec is Unspecified:
out = xp.ones(shape_as_sequence_or_int)
else:
out = xp.ones(shape_as_sequence_or_int, dtype_spec)
if dtype_spec in (None, Unspecified):
out = dtype_utils.cast_if_numpy_array(xp, out, 'float32')
return out
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_ones_with_device(device):
a = chainerx.ones((2,), 'float32', device)
b = chainerx.ones((2,), 'float32')
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_ones_like(xp, shape, dtype, device):
t = xp.empty(shape, dtype)
return xp.ones_like(t)
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_ones_like_with_device(shape, device):
t = chainerx.empty((2,), 'float32')
a = chainerx.ones_like(t, device)
b = chainerx.ones_like(t)
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize(
'value', [True, False, -2, 0, 1, 2, 2.3, float('inf'), float('nan')])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_full(xp, shape_as_sequence_or_int, value, device):
out = xp.full(shape_as_sequence_or_int, value)
return dtype_utils.cast_if_numpy_array(xp, out, _get_default_dtype(value))
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize(
'value', [True, False, -2, 0, 1, 2, 2.3, float('inf'), float('nan')])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_full_with_dtype(xp, shape, dtype_spec, value, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
return xp.full(shape, value, dtype_spec)
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_full_with_device(device):
a = chainerx.full((2,), 1, 'float32', device)
b = chainerx.full((2,), 1, 'float32')
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize(
'value', [True, False, -2, 0, 1, 2, 2.3, float('inf'), float('nan')])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_full_like(xp, shape, dtype, value, device):
t = xp.empty(shape, dtype)
return xp.full_like(t, value)
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_full_like_with_device(device):
t = chainerx.empty((2,), 'float32')
a = chainerx.full_like(t, 1, device)
b = chainerx.full_like(t, 1)
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
def _is_bool_spec(dtype_spec):
# Used in arange tests
if dtype_spec is None:
return False
return chainerx.dtype(dtype_spec) == chainerx.bool_
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('stop', [-2, 0, 0.1, 3, 3.2, False, True])
@pytest.mark.parametrize_device(['native:0'])
@chainerx.testing.parametrize_dtype_specifier(
'dtype_spec', additional_args=(None,))
def test_arange_stop(xp, stop, dtype_spec, device):
# TODO(hvy): xp.arange(True) should return an ndarray of type int64
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
# Checked in test_invalid_arange_too_long_bool
if _is_bool_spec(dtype_spec) and stop > 2:
return chainerx.testing.ignore()
if isinstance(stop, bool) and dtype_spec is None:
# TODO(niboshi): This pattern needs dtype promotion.
return chainerx.testing.ignore()
out = xp.arange(stop, dtype=dtype_spec)
if dtype_spec in (None, Unspecified):
expected_dtype = _get_default_dtype(stop)
out = dtype_utils.cast_if_numpy_array(xp, out, expected_dtype)
return out
@chainerx.testing.numpy_chainerx_allclose(
atol=1e-7, float16_rtol=5e-3, float16_atol=5e-3)
@pytest.mark.parametrize('start,stop', [
(0, 0),
(0, 3),
(-3, 2),
(2, 0),
(-2.2, 3.4),
(True, True),
(False, False),
(True, False),
(False, True),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier(
'dtype_spec', additional_args=(None,))
def test_arange_start_stop(xp, start, stop, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
# Checked in test_invalid_arange_too_long_bool
if _is_bool_spec(dtype_spec) and abs(stop - start) > 2:
return chainerx.testing.ignore()
if ((isinstance(start, bool)
or isinstance(stop, bool))
and dtype_spec is None):
# TODO(niboshi): This pattern needs dtype promotion.
return chainerx.testing.ignore()
out = xp.arange(start, stop, dtype=dtype_spec)
if dtype_spec in (None, Unspecified):
expected_dtype = _get_default_dtype(stop)
out = dtype_utils.cast_if_numpy_array(xp, out, expected_dtype)
return out
@chainerx.testing.numpy_chainerx_allclose(float16_rtol=1e-3)
@pytest.mark.parametrize('start,stop,step', [
(0, 3, 1),
(0, 0, 2),
(0, 1, 2),
(3, -1, -2),
(-1, 3, -2),
(3., 2., 1.2),
(2., -1., 1.),
(1, 4, -1.2),
# (4, 1, -1.2), # TODO(niboshi): Fix it (or maybe NumPy bug?)
(False, True, True),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier(
'dtype_spec', additional_args=(None,))
def test_arange_start_stop_step(xp, start, stop, step, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
# Checked in test_invalid_arange_too_long_bool
if _is_bool_spec(dtype_spec) and abs((stop - start) / step) > 2:
return chainerx.testing.ignore()
if ((isinstance(start, bool)
or isinstance(stop, bool)
or isinstance(step, bool))
and dtype_spec is None):
# TODO(niboshi): This pattern needs dtype promotion.
return chainerx.testing.ignore()
out = xp.arange(start, stop, step, dtype=dtype_spec)
if dtype_spec in (None, Unspecified):
expected_dtype = _get_default_dtype(step)
out = dtype_utils.cast_if_numpy_array(xp, out, expected_dtype)
return out
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_arange_with_device(device):
def check(*args, **kwargs):
a = chainerx.arange(*args, device=device, **kwargs)
b = chainerx.arange(*args, **kwargs)
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
check(3)
check(3, dtype='float32')
check(0, 3)
check(0, 3, dtype='float32')
check(0, 3, 2)
check(0, 3, 2, dtype='float32')
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_arange_invalid_too_long_bool(device):
def check(xp, err):
with pytest.raises(err):
xp.arange(3, dtype='bool_')
with pytest.raises(err):
xp.arange(1, 4, 1, dtype='bool_')
# Should not raise since the size is <= 2.
xp.arange(1, 4, 2, dtype='bool_')
check(chainerx, chainerx.DtypeError)
check(numpy, ValueError)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_arange_invalid_zero_step(device):
def check(xp, err):
with pytest.raises(err):
xp.arange(1, 3, 0)
check(chainerx, chainerx.ChainerxError)
check(numpy, ZeroDivisionError)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
@pytest.mark.parametrize('n', [0, 1, 2, 257])
def test_identity(xp, n, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
out = xp.identity(n, dtype_spec)
if dtype_spec in (None, Unspecified):
out = dtype_utils.cast_if_numpy_array(xp, out, 'float32')
return out
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_identity_with_device(device):
a = chainerx.identity(3, 'float32', device)
b = chainerx.identity(3, 'float32')
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(ValueError, chainerx.DimensionError))
@pytest.mark.parametrize('device', ['native:0', 'native:0'])
def test_identity_invalid_negative_n(xp, device):
xp.identity(-1, 'float32')
@chainerx.testing.numpy_chainerx_array_equal(accept_error=(TypeError,))
@pytest.mark.parametrize('device', ['native:1', 'native:0'])
def test_identity_invalid_n_type(xp, device):
xp.identity(3.0, 'float32')
# TODO(hvy): Add tests with non-ndarray but array-like inputs when supported.
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('N,M,k', [
(0, 0, 0),
(0, 0, 1),
(2, 1, -2),
(2, 1, -1),
(2, 1, 0),
(2, 1, 1),
(2, 1, 2),
(3, 4, -4),
(3, 4, -1),
(3, 4, 1),
(3, 4, 4),
(6, 3, 1),
(6, 3, -1),
(3, 6, 1),
(3, 6, -1),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_eye(xp, N, M, k, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
out = xp.eye(N, M, k, dtype_spec)
if dtype_spec in (None, Unspecified):
out = dtype_utils.cast_if_numpy_array(xp, out, 'float32')
return out
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('N,M,k', [
(3, None, 1),
(3, 4, None),
(3, None, None),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_eye_with_default(xp, N, M, k, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
if M is None and k is None:
return xp.eye(N, dtype=dtype_spec)
elif M is None:
return xp.eye(N, k=k, dtype=dtype_spec)
elif k is None:
return xp.eye(N, M=M, dtype=dtype_spec)
assert False
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_eye_with_device(device):
a = chainerx.eye(1, 2, 1, 'float32', device)
b = chainerx.eye(1, 2, 1, 'float32')
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(ValueError, chainerx.DimensionError))
@pytest.mark.parametrize('N,M', [
(-1, 2),
(1, -1),
(-2, -1),
])
@pytest.mark.parametrize('device', ['native:0', 'native:0'])
def test_eye_invalid_negative_N_M(xp, N, M, device):
xp.eye(N, M, 1, 'float32')
@chainerx.testing.numpy_chainerx_array_equal(accept_error=(TypeError,))
@pytest.mark.parametrize('N,M,k', [
(1.0, 2, 1),
(2, 1.0, 1),
(2, 3, 1.0),
(2.0, 1.0, 1),
])
@pytest.mark.parametrize('device', ['native:1', 'native:0'])
def test_eye_invalid_NMk_type(xp, N, M, k, device):
xp.eye(N, M, k, 'float32')
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('k', [0, -2, -1, 1, 2, -5, 4])
@pytest.mark.parametrize('shape', [(4,), (2, 3), (6, 5)])
@pytest.mark.parametrize('transpose', [False, True])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_diag(xp, k, shape, transpose, device):
v = xp.arange(array_utils.total_size(shape), dtype='int32').reshape(shape)
if transpose: # Test non-contiguous inputs for multi-dimensional shapes.
v = v.T
return xp.diag(v, k)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(ValueError, chainerx.DimensionError))
@pytest.mark.parametrize('k', [0, -2, -1, 1, 2, -5, 4])
@pytest.mark.parametrize('shape', [(), (2, 1, 2), (2, 0, 1)])
@pytest.mark.parametrize('device', ['native:1', 'native:0'])
def test_diag_invalid_ndim(xp, k, shape, device):
v = xp.arange(array_utils.total_size(shape), dtype='int32').reshape(shape)
return xp.diag(v, k)
# TODO(hvy): Add tests with non-ndarray but array-like inputs when supported.
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('k', [0, -2, -1, 1, 2, -5, 4])
@pytest.mark.parametrize('shape', [(), (4,), (2, 3), (6, 5), (2, 0)])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_diagflat(xp, k, shape, device):
v = xp.arange(array_utils.total_size(shape), dtype='int32').reshape(shape)
return xp.diagflat(v, k)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(ValueError, chainerx.DimensionError))
@pytest.mark.parametrize('k', [0, -2, -1, 1, 2, -5, 4])
@pytest.mark.parametrize('shape', [(2, 1, 2), (2, 0, 1)])
@pytest.mark.parametrize('device', ['native:1', 'native:0'])
def test_diagflat_invalid_ndim(xp, k, shape, device):
v = xp.arange(array_utils.total_size(shape), dtype='int32').reshape(shape)
return xp.diagflat(v, k)
@chainerx.testing.numpy_chainerx_allclose(float16_rtol=1e-3)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('start,stop', [
(0, 0),
(0, 1),
(1, 0),
(-1, 0),
(0, -1),
(1, -1),
(-13.3, 352.5),
(13.3, -352.5),
])
@pytest.mark.parametrize('num', [0, 1, 2, 257])
@pytest.mark.parametrize('endpoint', [True, False])
@pytest.mark.parametrize('range_type', [float, int])
def test_linspace(xp, start, stop, num, endpoint, range_type, dtype, device):
start = range_type(start)
stop = range_type(stop)
return xp.linspace(start, stop, num, endpoint=endpoint, dtype=dtype)
# Check only for closeness to numpy not the dtype
# as the default float of numpy and chainerx may differ.
@chainerx.testing.numpy_chainerx_allclose(dtype_check=False, float16_rtol=1e-7)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('start,stop', [
(0, 0),
(0, 1),
(1, 0),
(-1, 0),
(0, -1),
(1, -1),
(-13.3, 352.5),
(13.3, -352.5),
])
@pytest.mark.parametrize('num', [0, 1, 2, 257])
@pytest.mark.parametrize('endpoint', [True, False])
@pytest.mark.parametrize('range_type', [float, int])
def test_linspace_default_dtype(xp, start, stop, num, endpoint,
range_type, device):
start = range_type(start)
stop = range_type(stop)
return xp.linspace(start, stop, num, endpoint=endpoint)
@chainerx.testing.numpy_chainerx_allclose()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_linspace_dtype_spec(xp, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
return xp.linspace(3, 5, 10, dtype=dtype_spec)
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_linspace_with_device(device):
a = chainerx.linspace(3, 5, 10, dtype='float32', device=device)
b = chainerx.linspace(3, 5, 10, dtype='float32')
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(ValueError, chainerx.ChainerxError))
@pytest.mark.parametrize('device', ['native:0', 'native:0'])
def test_linspace_invalid_num(xp, device):
xp.linspace(2, 4, -1)
@pytest.mark.parametrize_device(['native:0'])
def test_frombuffer_from_numpy_array(device):
obj = array_utils.create_dummy_ndarray(
numpy, (2, 3), 'int32', padding=False)
a_chx = chainerx.frombuffer(obj, obj.dtype)
a_np = numpy.frombuffer(obj, obj.dtype)
chainerx.testing.assert_array_equal_ex(a_np, a_chx)
assert a_chx.device is chainerx.get_device(device)
# test buffer is shared
obj += obj
chainerx.testing.assert_array_equal_ex(obj.ravel(), a_chx)
# test possibly freed memory
obj_copy = obj.copy()
del obj
chainerx.testing.assert_array_equal_ex(obj_copy.ravel(), a_chx)
@pytest.mark.parametrize_device(['cuda:0'])
def test_frombuffer_from_numpy_array_with_cuda(device):
obj = array_utils.create_dummy_ndarray(numpy, (2, 3), 'int32')
with pytest.raises(chainerx.ChainerxError):
chainerx.frombuffer(obj, obj.dtype)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(ValueError, chainerx.ChainerxError))
def test_frombuffer_from_numpy_array_with_noncontiguous(xp):
obj = array_utils.create_dummy_ndarray(
numpy, (2, 3), 'int32', padding=True)
return xp.frombuffer(obj, obj.dtype)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(ValueError, chainerx.ChainerxError))
@pytest.mark.parametrize('count', [-1, 0, 1, 3, 4])
@pytest.mark.parametrize('offset', [-1, 0, 1, 4, 3 * 4, 3 * 4 + 4])
def test_frombuffer_from_numpy_array_with_offset_count(xp, count, offset):
obj = array_utils.create_dummy_ndarray(numpy, (3,), 'int32')
return xp.frombuffer(obj, obj.dtype, count=count, offset=offset)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_frombuffer_from_device_buffer(device):
dtype = 'int32'
device_buffer = chainerx.testing._DeviceBuffer(
[1, 2, 3, 4, 5, 6], (2, 3), dtype)
a = chainerx.frombuffer(device_buffer, dtype)
e = chainerx.array([1, 2, 3, 4, 5, 6], dtype)
chainerx.testing.assert_array_equal_ex(e, a)
assert a.device is chainerx.get_device(device)
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_frombuffer_with_device(device):
obj = array_utils.create_dummy_ndarray(
numpy, (2, 3), 'int32', padding=False)
a = chainerx.frombuffer(obj, obj.dtype, device=device)
b = chainerx.frombuffer(obj, obj.dtype)
chainerx.testing.assert_array_equal_ex(a, b)
array_utils.check_device(a, device)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('count', [-1, 0, 2])
@pytest.mark.parametrize('sep', ['', 'a'])
@pytest.mark.parametrize('device', ['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_fromfile(xp, count, sep, dtype_spec, device):
# Skip if bool_ dtype and text mode
if numpy.dtype(dtype_spec) == numpy.bool_ and sep == 'a':
pytest.skip(
'numpy.fromfile does not work with bool_ dtype and text mode')
# Write array data to temporary file.
if isinstance(dtype_spec, chainerx.dtype):
numpy_dtype_spec = dtype_spec.name
else:
numpy_dtype_spec = dtype_spec
data = numpy.arange(2, dtype=numpy_dtype_spec)
f = tempfile.TemporaryFile()
data.tofile(f, sep=sep)
# Read file.
f.seek(0)
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = numpy_dtype_spec
return xp.fromfile(f, dtype=dtype_spec, count=count, sep=sep)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('device', ['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_loadtxt(xp, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
txt = '''// Comment to be ignored.
1 2 3 4
5 6 7 8
'''
txt = io.StringIO(txt)
# Converter that is used to add 1 to each element in the 3rd column.
def converter(element_str):
return float(element_str) + 1
return xp.loadtxt(
txt, dtype=dtype_spec, comments='//', delimiter=' ',
converters={3: converter}, skiprows=2, usecols=(1, 3), unpack=False,
ndmin=2, encoding='bytes')
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('count', [-1, 0, 5])
@pytest.mark.parametrize('device', ['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_fromiter(xp, count, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
iterable = (x * x for x in range(5))
return xp.fromiter(iterable, dtype=dtype_spec, count=count)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('count', [-1, 0, 3])
@pytest.mark.parametrize('sep', [' ', 'a'])
@pytest.mark.parametrize('device', ['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_fromstring(xp, count, sep, dtype_spec, device):
if isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
elements = ['1', '2', '3']
string = sep.join(elements)
return xp.fromstring(string, dtype=dtype_spec, count=count, sep=sep)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('device', ['native:0', 'cuda:0'])
@pytest.mark.parametrize('shape', [(2, 2), [2, 2]])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_fromfunction(xp, shape, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
def function(i, j, addend):
return i * j + addend
# addend should be passed as a keyword argument to function.
return xp.fromfunction(function, shape, dtype=dtype_spec, addend=2)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_copy(xp, shape, dtype, device, is_module):
a = array_utils.create_dummy_ndarray(xp, shape, dtype)
if is_module:
return xp.copy(a)
else:
return a.copy()
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('shape,k', [
((2,), -1),
((2,), 0),
((2,), 1),
((3, 3), -1),
((3, 3), 0),
((3, 3), 1),
((4, 3), -1),
((4, 3), 0),
((4, 3), 1),
((4, 3), 5),
((4, 3), -5),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_tri(xp, shape, k, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
out = xp.tri(*shape, k=k, dtype=dtype_spec)
if dtype_spec in (None, Unspecified):
out = dtype_utils.cast_if_numpy_array(xp, out, 'float32')
return out
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('N,M,k', [
(3, None, 1),
(3, 4, None),
(3, None, None),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_tri_with_default(xp, N, M, k, dtype_spec, device):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
if M is None and k is None:
return xp.tri(N, dtype=dtype_spec)
elif M is None:
return xp.tri(N, k=k, dtype=dtype_spec)
elif k is None:
return xp.tri(N, M=M, dtype=dtype_spec)
assert False
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
def test_tri_with_device(device):
a = chainerx.tri(1, 2, 1, 'float32', device)
b = chainerx.tri(1, 2, 1, 'float32')
array_utils.check_device(a, device)
chainerx.testing.assert_array_equal_ex(a, b)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(3, 3), (4, 3), (2, 3, 4)],
'k': [0, 1, -1, 5, -5]
}) +
chainer.testing.product({
'shape': [(3,)],
'k': [0, 1, -1, 5, -5],
'skip_backward_test': [True],
'skip_double_backward_test': [True]
})
))
class TestTrilTriu(op_utils.NumpyOpTest):
def setup(self, float_dtype):
self.dtype = float_dtype
# backward with float16 sometimes does not pass tests with default rtol
if self.dtype == 'float16':
self.check_backward_options.update({'rtol': 2e-3, 'atol': 2e-3})
self.check_double_backward_options.update(
{'rtol': 2e-3, 'atol': 2e-3})
def generate_inputs(self):
a = numpy.random.random(self.shape).astype(self.dtype)
return a,
def forward_xp(self, inputs, xp):
a, = inputs
tril = xp.tril(a, self.k)
triu = xp.triu(a, self.k)
return tril, triu,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('indexing', ['xy', 'ij'])
@chainer.testing.parameterize_pytest('input_lens', [
# Test up to 4 inputs to check `indexing` behaviors
# 'xy': (1, 0, 2, ..., N-1)
# 'ij': (0, 1, 2, ..., N-1)
(7,),
(6, 4),
(2, 3, 5),
(4, 3, 2, 5),
])
class TestMeshgrid(op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
def setup(self, dtype):
if numpy.dtype(dtype).kind != 'f':
self.skip_backward_test = True
self.skip_double_backward_test = True
self.dtype = dtype
if dtype == 'float16':
self.check_backward_options.update({'rtol': 1e-2, 'atol': 1e-2})
def generate_inputs(self):
return tuple(
numpy.linspace(
numpy.random.uniform(-5, -1),
numpy.random.uniform(1, 5),
size)
.astype(self.dtype)
for size in self.input_lens)
def forward_xp(self, inputs, xp):
return tuple(xp.meshgrid(*inputs, indexing=self.indexing))
@chainerx.testing.numpy_chainerx_array_equal()
def test_meshgrid_no_array(xp):
return xp.meshgrid()
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('indexing', ['xy', 'ij'])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_meshgrid_one_array_multi_dim(xp, indexing, dtype_spec):
if xp is numpy and isinstance(dtype_spec, chainerx.dtype):
dtype_spec = dtype_spec.name
return xp.meshgrid(xp.ones((3, 3), dtype=dtype_spec), indexing=indexing)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('indexing', ['xy', 'ij'])
def test_meshgrid_no_array_indexing(xp, indexing):
return xp.meshgrid(indexing=indexing)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.ChainerxError, ValueError))
def test_meshgrid_invalid_kwarg_value(xp):
a = array_utils.create_dummy_ndarray(xp, (10,), 'float32')
return xp.meshgrid(a, a, indexing='xz')
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.ChainerxError, TypeError))
def test_meshgrid_invalid_kwarg(xp):
a = array_utils.create_dummy_ndarray(xp, (10,), 'float32')
return xp.meshgrid(a, a, indexing='xy', invalid_arg=0)
| 46,107
| 33.408955
| 79
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.