repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_pooling.py
|
import functools
from operator import mul
import unittest
import chainer
import numpy
import pytest
import chainerx
from chainerx_tests import op_utils
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('x_shape,ksize,stride,pad', [
((2, 3, 4), (1,), 1, 0),
((1, 3, 4), (2, ), 3, 2),
((1, 3, 4), (2,), 3, 2),
((2, 3, 4, 4), (3, 3), 1, 0),
((2, 3, 4, 4), (3, 3), None, 0),
((1, 3, 4, 4), (3, 3), (1, 2), 1),
((1, 3, 4, 4), (3, 3), 2, (2, 0)),
((1, 3, 2, 6, 3), (1, 3, 2), 2, (2, 0, 1)),
((1, 3, 2, 6, 3), (1, 3, 2), (1, 2, 3), (2, 0, 1)),
((2, 3, 2, 6, 3), (1, 3, 2), (1, 2, 3), (2, 0, 1)),
((1, 3, 2, 6, 3, 2), (1, 3, 2, 2), 2, 2),
])
@chainer.testing.parameterize_pytest('cover_all', [True, False])
class TestMaxPool(op_utils.ChainerOpTest):
dodge_nondifferentiable = True
def setup(self, float_dtype):
dtype = float_dtype
ksize = self.ksize
device = chainerx.get_default_device()
if (device.backend.name == 'cuda'
and len(ksize) != 2
and len(ksize) != 3):
raise unittest.SkipTest(
'cuDNN supports only 2 and 3 spatial dimensions')
if dtype == 'float16':
self.check_backward_options.update({'rtol': 5e-2, 'atol': 1e-3})
self.check_double_backward_options.update({
'rtol': 5e-2, 'atol': 1e-3})
self.dtype = dtype
def generate_inputs(self):
x_shape = self.x_shape
dtype = self.dtype
if self.test_name in ('test_backward', 'test_double_backward'):
x = numpy.arange(functools.reduce(mul, x_shape), dtype=dtype)
x = x.reshape(x_shape)
x = 2 * x / x.size - 1
else:
x = numpy.random.randn(*x_shape).astype(dtype, copy=False)
return x,
def forward_chainerx(self, inputs):
x, = inputs
y = chainerx.max_pool(
x, ksize=self.ksize, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
# This function can return -inf (or huge negative numbers in case of
# CUDA) around boundaries.
# Convert them to finite numbers in order to properly calculate numeric
# gradients.
y = chainerx.maximum(y, -1e4)
return y,
def forward_chainer(self, inputs):
x, = inputs
y = chainer.functions.max_pooling_nd(
x, ksize=self.ksize, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
# Convert -inf to finite numbers.
y = chainer.functions.maximum(y, numpy.full_like(y.array, -1e4))
return y,
@pytest.mark.parametrize('x_shape,ksize,stride,pad', [
((1, 3), (), 1, 0), # Requires at least one spatial dimension
((2, 3, 4, 3), (2, 2, 1), 3, 2), # Wrong number of ksize.
((2, 3, 4, 3), (2, 2), (1,), 0), # Wrong number of strides.
((1, 3, 4, 3), (2, 2), 3, (2,)), # Wrong number of paddings.
((4, 4, 2, 2), 5, 3, 0), # Output size should be positive.
])
@pytest.mark.parametrize('cover_all', [True, False])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_max_pool_invalid(
device, x_shape, ksize, stride, pad, cover_all, float_dtype):
x = numpy.random.uniform(-1, 1, x_shape).astype(float_dtype)
x = chainerx.array(x)
with pytest.raises(chainerx.DimensionError):
chainerx.max_pool(
x, ksize=ksize, stride=stride, pad=pad, cover_all=cover_all)
def _get_pad_mode_kwargs(pad_mode, is_chainerx):
# ChainerX
if is_chainerx:
if pad_mode is None:
return {}
return {'pad_mode': pad_mode}
# Chainer
# chainerx `pad_mode` defaults to 'ignore', whereas chainer's default is
# pad_value=0.
if pad_mode == 'zero':
return {'pad_value': 0}
if pad_mode in ('ignore', None):
return {'pad_value': None}
assert False, pad_mode
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('x_shape,ksize,stride,pad', [
((2, 3, 4), (1,), 1, 0),
((1, 3, 4), (2, ), 3, 2),
((1, 3, 4), (2,), 3, 2),
((2, 3, 4, 4), (3, 3), 1, 0),
((2, 3, 4, 4), (3, 3), None, 0),
((1, 3, 4, 4), (3, 3), (1, 2), 1),
((1, 3, 4, 4), (3, 3), 2, (2, 0)),
((1, 3, 2, 6, 3), (1, 3, 2), 2, (2, 0, 1)),
((1, 3, 2, 6, 3), (1, 3, 2), (1, 2, 3), (2, 0, 1)),
((2, 3, 2, 6, 3), (1, 3, 2), (1, 2, 3), (2, 0, 1)),
((1, 3, 2, 6, 3, 2), (1, 3, 1, 1), 1, 1),
])
@chainer.testing.parameterize_pytest('pad_mode', ['zero', 'ignore', None])
# ignore warning occurring when pad_value is None in chainer
@pytest.mark.filterwarnings('ignore:invalid value encountered in true_divide')
class TestAveragePool(op_utils.ChainerOpTest):
def setup(self, float_dtype):
dtype = float_dtype
ksize = self.ksize
device = chainerx.get_default_device()
if (device.backend.name == 'cuda'
and len(ksize) != 2
and len(ksize) != 3):
raise unittest.SkipTest(
'cuDNN supports only 2 and 3 spatial dimensions.')
# TODO(niboshi): average_pool can return nan if pad_mode is 'ignore',
# and numeric gradients cannot be calculated.
# If chainerx.where is implemented, we can replace nans and remove
# this skip.
if self.pad_mode in ('ignore', None):
self.skip_backward_test = True
self.skip_double_backward_test = True
self.check_double_backward_options.update({'rtol': 5e-3, 'atol': 5e-3})
if dtype == 'float16':
self.check_forward_options.update({'rtol': 5e-3, 'atol': 5e-4})
self.check_backward_options.update({'rtol': 5e-2, 'atol': 5e-3})
else:
self.check_backward_options.update({'rtol': 5e-3, 'atol': 5e-3, })
self.dtype = dtype
def generate_inputs(self):
x_shape = self.x_shape
dtype = self.dtype
x = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
return x,
def forward_chainerx(self, inputs):
x, = inputs
pad_mode_kwargs = _get_pad_mode_kwargs(self.pad_mode, True)
y = chainerx.average_pool(
x, ksize=self.ksize, stride=self.stride, pad=self.pad,
**pad_mode_kwargs)
return y,
def forward_chainer(self, inputs):
x, = inputs
pad_value_kwargs = _get_pad_mode_kwargs(self.pad_mode, False)
y = chainer.functions.average_pooling_nd(
x, ksize=self.ksize, stride=self.stride, pad=self.pad,
**pad_value_kwargs)
return y,
@pytest.mark.parametrize('x_shape,ksize,stride,pad', [
((1, 3), (), 1, 0), # Requires at least one spatial dimension
((2, 3, 4, 3), (2, 2, 1), 3, 2), # Wrong number of ksize.
((2, 3, 4, 3), (2, 2), (1,), 0), # Wrong number of strides.
((1, 3, 4, 3), (2, 2), 3, (2,)), # Wrong number of paddings.
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('pad_mode', ['zero', 'ignore', None])
def test_average_pool_invalid(
device, x_shape, ksize, stride, pad, pad_mode, float_dtype):
x = numpy.random.uniform(-1, 1, x_shape).astype(float_dtype)
x = chainerx.array(x)
pad_mode_kwargs = _get_pad_mode_kwargs(pad_mode, True)
with pytest.raises(chainerx.DimensionError):
chainerx.average_pool(
x, ksize=ksize, stride=stride, pad=pad, **pad_mode_kwargs)
| 7,499
| 35.585366
| 79
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_sorting.py
|
import unittest
import chainer
import numpy
import chainerx.testing
from chainerx_tests import op_utils
_min_max_single_axis_params = [
# input, axis
# valid params
(numpy.asarray(0), None),
(numpy.asarray(-1), None),
(numpy.asarray(float('inf')), None),
(numpy.asarray(float('nan')), None),
(numpy.asarray(-float('inf')), None),
(numpy.asarray([4, 1, 4, 1]), None),
(numpy.asarray([4, 1, 4, 1]), 0),
(numpy.asarray([[4, 4, 1, 1], [4, 1, 4, 1]]), 0),
(numpy.asarray([[4, 4, 1, 1], [4, 1, 4, 1]]).T, 1),
(numpy.asarray([-2, -3, -1]), 0),
(numpy.asarray([-0.0, +0.0, +0.0, -0.0]), None),
(numpy.asarray([[True, True, False, False],
[True, False, True, False]]), 0),
(numpy.ones((2, 0, 3)), 2),
(numpy.ones((2, 3)), 1),
(numpy.ones((2, 3)), -2),
# invalid params
(numpy.ones((0,)), None),
(numpy.ones((2, 0, 3)), 1),
(numpy.ones((2, 0, 3)), None),
(numpy.ones((2, 3)), 2),
(numpy.ones((2, 3)), -3),
]
_count_nonzero_params = [
# input, axis
# valid params
(numpy.asarray(0), None),
(numpy.asarray(-1), None),
(numpy.asarray(float('inf')), None),
(numpy.asarray(float('nan')), None),
(numpy.asarray(-float('inf')), None),
(numpy.asarray([4, 0, 0, 0]), None),
(numpy.asarray([0, 0, 0, 0]), 0),
(numpy.asarray([[4, 0, 0, 1], [0, 0, 4, 1]]), 0),
(numpy.asarray([[4, 4, 1, 1], [4, 1, 4, 1]]).T, 1),
(numpy.asarray([-2, -3, 0]), 0),
(numpy.asarray([-0.0, +0.0, +0.0, -0.0]), None),
(numpy.asarray([[True, 0, False, False],
[True, 0, True, False]]), 0),
(numpy.ones((2, 0, 3)), 2),
(numpy.ones((2, 3)), 1),
(numpy.ones((2, 3)), -2),
# invalid params
(numpy.ones((0,)), None),
(numpy.ones((2, 0, 3)), 1),
(numpy.ones((2, 0, 3)), None),
(numpy.ones((2, 3)), 2),
(numpy.ones((2, 3)), -3),
# tuple axis
(numpy.asarray([[4, 0, 0, 1], [0, 0, 4, 1]]), (0, 1)),
(numpy.asarray([[4, 0, 0, 0], [0, 0, 4, 1]]), (0,)),
(numpy.ones((2, 3)), (1, 0)),
(numpy.ones((2, 3, 4)), (2, 0)),
]
_nan_min_max_single_axis_params = [
# input, axis
# valid params
(numpy.asarray(0), None),
(numpy.asarray(-1), None),
(numpy.asarray(float('inf')), None),
(numpy.asarray(-float('inf')), None),
(numpy.asarray([4, 1, 4, numpy.nan]), None),
(numpy.asarray([numpy.nan, 1, numpy.nan, 1]), 0),
(numpy.asarray([[4, 4, 1, 1], [4, 1, 4, 1]]), 0),
(numpy.asarray([[4, 4, 1, 1], [numpy.nan, numpy.nan, numpy.nan, 1]]).T, 1),
(numpy.asarray([-2, -3, -1]), 0),
(numpy.asarray([-0.0, +0.0, +0.0, -0.0]), None),
(numpy.asarray([[True, True, False, False],
[True, False, True, numpy.nan]]), 0),
(numpy.ones((2, 0, 3)), 2),
(numpy.ones((2, 3)), 1),
(numpy.ones((2, 3)), -2),
# invalid params
(numpy.ones((0,)), None),
(numpy.ones((2, 0, 3)), 1),
(numpy.ones((2, 0, 3)), None),
(numpy.ones((2, 3)), 2),
(numpy.ones((2, 3)), -3),
]
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('input,axis', _min_max_single_axis_params)
@chainer.testing.parameterize_pytest('is_module', [True, False])
class TestArgmax(op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
forward_accept_errors = (ValueError, chainerx.DimensionError)
def setup(self, dtype):
try:
a_np = self.input.astype(dtype)
except (ValueError, OverflowError):
raise unittest.SkipTest('invalid combination of data and dtype')
self.a_np = a_np
def generate_inputs(self):
return self.a_np,
def forward_xp(self, inputs, xp):
a, = inputs
axis = self.axis
if self.is_module:
b = xp.argmax(a, axis)
else:
b = a.argmax(axis)
return b,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('input,axis', _min_max_single_axis_params)
@chainer.testing.parameterize_pytest('is_module', [True, False])
class TestArgmin(op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
forward_accept_errors = (ValueError, chainerx.DimensionError)
def setup(self, dtype):
try:
a_np = self.input.astype(dtype)
except (ValueError, OverflowError):
raise unittest.SkipTest('invalid combination of data and dtype')
self.a_np = a_np
def generate_inputs(self):
return self.a_np,
def forward_xp(self, inputs, xp):
a, = inputs
axis = self.axis
if self.is_module:
b = xp.argmin(a, axis)
else:
b = a.argmin(axis)
return b,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('input,axis', _count_nonzero_params)
class TestCountNonzero(op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
skip_backward_test = True
skip_double_backward_test = True
forward_accept_errors = (ValueError, chainerx.DimensionError)
def setup(self, dtype):
try:
a_np = self.input.astype(dtype)
except (ValueError, OverflowError):
raise unittest.SkipTest('invalid combination of data and dtype')
self.a_np = a_np
def generate_inputs(self):
return self.a_np,
def forward_xp(self, inputs, xp):
a, = inputs
axis = self.axis
b = xp.count_nonzero(a, axis)
return b,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('input,axis',
_nan_min_max_single_axis_params)
class TestNanArgmax(op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
forward_accept_errors = (ValueError, chainerx.DimensionError)
def setup(self, dtype):
try:
a_np = self.input.astype(dtype)
except (ValueError, OverflowError):
raise unittest.SkipTest('invalid combination of data and dtype')
self.a_np = a_np.astype(numpy.float32)
def generate_inputs(self):
return self.a_np,
def forward_xp(self, inputs, xp):
a, = inputs
axis = self.axis
b = xp.nanargmax(a, axis)
return b,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('input,axis',
_nan_min_max_single_axis_params)
class TestNanArgmin(op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
forward_accept_errors = (ValueError, chainerx.DimensionError)
def setup(self, dtype):
try:
a_np = self.input.astype(dtype)
except (ValueError, OverflowError):
raise unittest.SkipTest('invalid combination of data and dtype')
self.a_np = a_np.astype(numpy.float32)
def generate_inputs(self):
return self.a_np,
def forward_xp(self, inputs, xp):
a, = inputs
axis = self.axis
b = xp.nanargmin(a, axis)
return b,
| 7,116
| 28.903361
| 79
|
py
|
chainer
|
chainer-master/tests/chainerx_tests/unit_tests/routines_tests/test_trigonometric_hyperbolic.py
|
import chainer
import chainerx
import chainerx.testing
from chainerx_tests import dtype_utils
from chainerx_tests import math_utils
from chainerx_tests import op_utils
_trigonometric_hyperbolic_params = (
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [-2, 0, 2],
'contiguous': [None, 'C'],
}) + chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [1.57, 2, 3.14, float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
)
def _make_inverse_trig_params(name):
# Makes test parameters for inverse trigonometric functions
inverse_trig_differentiable_inputs = {
'arcsin': [-0.9, 0, 0.9],
'arccos': [-0.9, 0, 0.9],
'arctan': [-3, -0.2, 0, 0.2, 3],
'arcsinh': [-3, -0.2, 0, 0.2, 3],
'arccosh': [1.2, 3],
'arctanh': [-0.9, 0, 0.9],
}
inverse_trig_nondifferentiable_inputs = {
'arcsin': [-3, -1, 1, 3],
'arccos': [-3, -1, 1, 3],
'arctan': [],
'arcsinh': [],
'arccosh': [-3, 0, 0.2, 1],
'arctanh': [-3, -1, 1, 3],
}
nonfinite_numbers = [float('inf'), -float('inf'), float('nan')]
return (
# Various shapes and differentiable inputs
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': inverse_trig_differentiable_inputs[name],
'contiguous': [None, 'C'],
})
+
# Nondifferentiable inputs
chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': (
math_utils.in_out_float_dtypes_math_functions),
'input': (
inverse_trig_nondifferentiable_inputs[name]
+ nonfinite_numbers),
'skip_backward_test': [True],
'skip_double_backward_test': [True],
}))
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
_trigonometric_hyperbolic_params
))
class TestSin(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.sin(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
_trigonometric_hyperbolic_params
))
class TestCos(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.cos(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
_trigonometric_hyperbolic_params
))
class TestTan(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
check_backward_options = {'atol': 3e-5}
def func(self, xp, a):
return xp.tan(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
_make_inverse_trig_params('arcsin')
))
class TestArcsin(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.arcsin(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
_make_inverse_trig_params('arccos')
))
class TestArccos(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.arccos(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
_make_inverse_trig_params('arctan')
))
class TestArctan(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.arctan(a)
# Since the gradient of arctan2 is quite flaky.
# for smaller values especially `float16`.
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': [1],
'input_rhs': [2],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
# Differentiable points
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': [-3, -0.75, 0.75, 3],
'input_rhs': [-3, -0.75, 0.75, 3],
})
# Mixed dtypes
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_binary_functions,
'input_lhs': [-1.],
'input_rhs': [-1.],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan'),
+0.0, -0.0],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan'),
+0.0, -0.0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestArctan2(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
return xp.arctan2(a, b)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
_trigonometric_hyperbolic_params
))
class TestSinh(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.sinh(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
_trigonometric_hyperbolic_params
))
class TestCosh(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.cosh(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
_trigonometric_hyperbolic_params
))
class TestTanh(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.tanh(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
_make_inverse_trig_params('arcsinh')
))
class TestArcsinh(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.arcsinh(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
_make_inverse_trig_params('arccosh')
))
class TestArccosh(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.arccosh(a)
| 6,751
| 27.854701
| 78
|
py
|
chainer
|
chainer-master/tests/onnx_chainer_tests/conftest.py
|
import chainer
import onnx
import pytest
import onnx_chainer
def pytest_addoption(parser):
parser.addoption(
'--value-check-runtime',
dest='value-check-runtime', default='onnxruntime',
choices=['skip', 'onnxruntime', 'mxnet'], help='select test runtime')
parser.addoption(
'--opset-versions', dest='opset-versions', default=None,
help='select opset versions, select from "min", "latest", '
'or a list of numbers like "9,10"')
@pytest.fixture(scope='function')
def disable_experimental_warning():
org_config = chainer.disable_experimental_feature_warning
chainer.disable_experimental_feature_warning = True
try:
yield
finally:
chainer.disable_experimental_feature_warning = org_config
@pytest.fixture(scope='function')
def check_model_expect(request):
selected_runtime = request.config.getoption('value-check-runtime')
if selected_runtime == 'onnxruntime':
from onnx_chainer.testing.test_onnxruntime import check_model_expect # NOQA
_checker = check_model_expect
elif selected_runtime == 'mxnet':
from onnx_chainer.testing.test_mxnet import check_model_expect
_checker = check_model_expect
else:
def empty_func(*args, **kwargs):
pass
_checker = empty_func
return _checker
@pytest.fixture(scope='function')
def target_opsets(request):
opsets = request.config.getoption('opset-versions')
min_version = onnx_chainer.MINIMUM_OPSET_VERSION
max_version = min(
onnx.defs.onnx_opset_version(), onnx_chainer.MAXIMUM_OPSET_VERSION)
if opsets is None:
return list(range(min_version, max_version + 1))
elif opsets == 'min':
return [min_version]
elif opsets == 'latest':
return [max_version]
else:
try:
versions = [int(i) for i in opsets.split(',')]
except ValueError:
raise ValueError('cannot convert {} to versions list'.format(
opsets))
return versions
| 2,049
| 31.03125
| 84
|
py
|
chainer
|
chainer-master/tests/onnx_chainer_tests/test_external_converter.py
|
import os
import chainer
from chainer import testing
import numpy as np
import onnx
import pytest
from onnx_chainer import export_testcase
from onnx_chainer import onnx_helper
from onnx_chainer.testing import input_generator
def test_export_external_converters_overwrite(tmpdir, check_model_expect):
path = str(tmpdir)
model = chainer.Sequential(chainer.functions.sigmoid)
x = input_generator.positive_increasing(2, 5)
def custom_converter(params):
return onnx_helper.make_node(
'Tanh', params.input_names, params.output_names),
addon_converters = {'Sigmoid': custom_converter}
export_testcase(model, x, path, external_converters=addon_converters)
tanh_outputs = chainer.functions.tanh(x).array
output_path = os.path.join(path, 'test_data_set_0', 'output_0.pb')
onnx_helper.write_tensor_pb(output_path, '', tanh_outputs) # overwrite
check_model_expect(path)
@pytest.mark.parametrize('domain,version', [(None, 0), ('domain', 0)])
def test_export_external_converters_custom_op(tmpdir, domain, version):
path = str(tmpdir)
class Dummy(chainer.FunctionNode):
def forward_cpu(self, inputs):
self.x = inputs[0]
return np.ones_like(inputs[0]),
def backward(self, indexes, grad_outputs):
return chainer.Variable(np.zeros_like(self.x)),
def dummy_function(x):
return Dummy().apply((x,))[0]
model = chainer.Sequential(dummy_function)
x = input_generator.increasing(2, 5)
def custom_converter(params):
return onnx_helper.make_node(
'Dummy', params.input_names, params.output_names, domain=domain),
addon_converters = {'Dummy': custom_converter}
external_opset_imports = {}
is_set_domain = domain is not None
if is_set_domain:
external_opset_imports[domain] = version
if is_set_domain and onnx_helper.is_support_non_standard_domain():
export_testcase(
model, x, path, external_converters=addon_converters,
external_opset_imports=external_opset_imports)
else:
with testing.assert_warns(UserWarning):
export_testcase(
model, x, path, external_converters=addon_converters,
external_opset_imports=external_opset_imports)
output_path = os.path.join(path, 'test_data_set_0', 'output_0.pb')
assert os.path.isfile(output_path)
output = onnx.numpy_helper.to_array(onnx.load_tensor(output_path))
expected_output = np.ones_like(x)
np.testing.assert_allclose(output, expected_output, rtol=1e-5, atol=1e-5)
| 2,603
| 32.384615
| 77
|
py
|
chainer
|
chainer-master/tests/onnx_chainer_tests/test_inout.py
|
import unittest
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import testing
import numpy as np
import pytest
from onnx_chainer import export
from onnx_chainer.export import RetainInputHook
from onnx_chainer.testing import input_generator
from onnx_chainer_tests.helper import ONNXModelTest
@testing.parameterize(
{'condition': 'tuple'},
{'condition': 'tuple_with_name', 'input_names': ['x', 'y', 'z']},
{'condition': 'list', 'in_type': 'list'},
{'condition': 'list_with_names', 'in_type': 'list',
'input_names': ['x', 'y', 'z']},
{'condition': 'var', 'in_type': 'variable'},
{'condition': 'var_with_names', 'in_type': 'variable',
'input_names': ['x', 'y', 'z']},
{'condition': 'varlist', 'in_type': 'variable_list'},
{'condition': 'varlist_with_names', 'in_type': 'variable_list',
'input_names': ['x', 'y', 'z']},
{'condition': 'dict', 'in_type': 'dict'},
{'condition': 'dict_with_names', 'in_type': 'dict',
'input_names': {'x': 'in_x', 'y': 'in_y', 'z': 'in_z'}},
{'condition': 'dict_with_name_list', 'in_type': 'dict',
'input_names': ['x', 'y', 'z']},
{'condition': 'vardict', 'in_type': 'variable_dict'},
{'condition': 'vardict_with_names', 'in_type': 'variable_dict',
'input_names': {'x': 'in_x', 'y': 'in_y', 'z': 'in_z'}},
)
class TestMultipleInputs(ONNXModelTest):
def get_model(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
with self.init_scope():
self.prelu = L.PReLU()
def __call__(self, x, y, z):
return F.relu(x) + self.prelu(y) * z
return Model()
def get_x(self, in_type=None):
base_x = (input_generator.increasing(1, 5),
input_generator.increasing(1, 5)*1.1,
input_generator.increasing(1, 5)*1.2)
names = ['x', 'y', 'z']
if in_type is None:
return base_x
elif in_type == 'list':
return list(base_x)
elif in_type == 'variable':
return tuple(chainer.Variable(v) for v in base_x)
elif in_type == 'variable_list':
return [chainer.Variable(v) for v in base_x]
elif in_type == 'dict':
return {names[i]: v for i, v in enumerate(base_x)}
elif in_type == 'variable_dict':
return {names[i]: chainer.Variable(v)
for i, v in enumerate(base_x)}
def test_multiple_inputs(self):
model = self.get_model()
x = self.get_x(getattr(self, 'in_type', None))
name = 'multipleinputs_' + self.condition
input_names = getattr(self, 'input_names', None)
self.expect(model, x, name=name, input_names=input_names)
class TestImplicitInput(ONNXModelTest):
def test_implicit_param(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
self.frac = chainer.Parameter(np.array(2, dtype=np.float32))
def forward(self, x):
return x / self.frac
x = chainer.Variable(np.array(1, dtype=np.float32))
self.expect(Model(), x, name='implicit_param')
def test_implicit_param_ndarray(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
self.frac = np.array(2, dtype=np.float32)
def forward(self, x):
return x / self.frac
x = chainer.Variable(np.array(1, dtype=np.float32))
self.expect(Model(), x, name='implicit_param_ndarray')
def test_implicit_temporary_input(self):
class Model(chainer.Chain):
def forward(self, x):
return x + chainer.Variable(np.array(3, dtype=np.float32))
x = np.array(5, dtype=np.float32)
self.expect(Model(), x, name='implicit_temp_input')
def test_implicit_temporary_input_ndarray(self):
class Model(chainer.Chain):
def forward(self, x):
return x + np.array(3, dtype=np.float32)
x = np.array(5, dtype=np.float32)
self.expect(Model(), x, name='implicit_temp_input_ndarray')
class TestRetainInputHook(object):
def get_x(self, test_type):
if test_type == 'list':
return [
chainer.Variable(np.array(3, dtype=np.float32)),
chainer.Variable(np.array(5, dtype=np.float32))]
elif test_type == 'dict':
return {'x': chainer.Variable(np.array(3, dtype=np.float32))}
elif test_type == 'array':
return np.array(3, dtype=np.float32)
else:
assert test_type == 'variable'
return chainer.Variable(np.array(3, dtype=np.float32))
@pytest.mark.parametrize(
'test_type', ['variable', 'list', 'dict', 'array'])
def test_hook_for_funcnode(self, test_type):
class Model(chainer.Chain):
def forward(self, x):
if test_type in ['variable', 'array']:
x = [chainer.as_variable(x)]
elif test_type == 'dict':
x = list(x.values())
x.append(chainer.Variable(np.array(7, np.float32)))
return F.stack(x)
model = Model()
x = self.get_x(test_type)
with RetainInputHook() as h:
model(x)
expected_count = 1
if test_type == 'array':
# input is ndarray and not checked in forward_preprocess
expected_count += 1
assert len(h.retain_inputs) == expected_count
@pytest.mark.parametrize('test_type', ['array'])
def test_hook_for_childlink(self, test_type):
# TODO(disktnk): test_type='variable' is failed
class ChildModel(chainer.Chain):
def forward(self, x, h):
if test_type in ['variable', 'array']:
h = [chainer.as_variable(h)]
elif test_type == 'dict':
h = list(h.values())
h.append(x)
return F.stack(h)
class ParentModel(chainer.Chain):
def __init__(self, get_x):
super().__init__()
self.get_x = get_x
with self.init_scope():
self.m = ChildModel()
def forward(self, x):
h = self.get_x(test_type)
return self.m(x, h)
model = ParentModel(self.get_x)
x = self.get_x('variable')
with RetainInputHook() as h:
model(x)
assert len(h.retain_inputs) == 1
@testing.parameterize(
{'use_bn': True, 'out_type': 'dict', 'condition': 'bn_out_dict'},
{'use_bn': False, 'out_type': 'dict', 'condition': 'out_dict'},
{'use_bn': True, 'out_type': 'dict', 'condition': 'bn_out_dict_with_name',
'output_names': {'tanh': 'out_tanh', 'sigmoid': 'out_sigmoid'}},
{'use_bn': True, 'out_type': 'dict',
'condition': 'bn_out_dict_with_name_list',
'output_names': ('out_tanh', 'out_sigmoid')},
{'use_bn': True, 'out_type': 'tuple', 'condition': 'bn_out_tuple'},
{'use_bn': True, 'out_type': 'tuple',
'condition': 'bn_out_tuple_with_name',
'output_names': ['out_tanh', 'out_sigmoid']},
{'use_bn': True, 'out_type': 'list', 'condition': 'bn_out_list'},
{'use_bn': True, 'out_type': 'list', 'condition': 'bn_out_list_with_name',
'output_names': ['out_tanh', 'out_sigmoid']},
)
class TestMultipleOutput(ONNXModelTest):
def get_model(self, use_bn=False, out_type=None):
class Model(chainer.Chain):
def __init__(self, use_bn=False, out_type=None):
super(Model, self).__init__()
self._use_bn = use_bn
self._out_type = out_type
with self.init_scope():
self.conv = L.Convolution2D(None, 32, ksize=3, stride=1)
if self._use_bn:
self.bn = L.BatchNormalization(32)
def __call__(self, x):
h = self.conv(x)
if self._use_bn:
h = self.bn(h)
o1 = F.tanh(h)
o2 = F.sigmoid(h)
if self._out_type == 'dict':
return {
'tanh': o1,
'sigmoid': o2
}
elif self._out_type == 'tuple':
return o1, o2
elif self._out_type == 'list':
return [o1, o2]
return Model(use_bn=use_bn, out_type=out_type)
def test_multiple_outputs(self):
model = self.get_model(use_bn=self.use_bn, out_type=self.out_type)
x = np.zeros((1, 3, 32, 32), dtype=np.float32)
name = 'multipleoutput_' + self.condition
output_names = getattr(self, 'output_names', None)
self.expect(model, x, name=name, output_names=output_names)
class TestIntermediateOutput(ONNXModelTest):
def get_model(self):
class Model(chainer.Chain):
def __init__(self):
super().__init__()
with self.init_scope():
self.l1 = L.Linear(4)
self.l2 = L.Linear(5, initial_bias=0.1)
def __call__(self, x):
y = self.l1(x)
z = self.l2(y)
return y, z
return Model()
def test_outputs(self):
model = self.get_model()
x = np.ones((1, 3), dtype=np.float32)
self.expect(model, x, output_names=['y', 'z'])
@testing.parameterize(
{'out_kind': 'var'},
{'out_kind': 'array'},
{'out_kind': 'array_in_tuple'},
{'out_kind': 'list_in_tuple'},
)
class TestOutputTypeCheck(unittest.TestCase):
def test_output_type_check(self):
class Model(chainer.Chain):
def __init__(self, out_kind):
super().__init__()
self.out_kind = out_kind
def __call__(self, x):
if self.out_kind == 'array':
return x.array
elif self.out_kind == 'array_in_tuple':
return x, x.array
elif self.out_kind == 'list_in_tuple':
return ([x]),
else:
assert self.out_kind == 'var'
return x
model = Model(self.out_kind)
x = np.ones((1, 3, 4, 5), dtype=np.float32)
if self.out_kind == 'var':
export(model, (x,)) # should be no error
elif self.out_kind == 'array':
with self.assertRaises(RuntimeError) as e:
export(model, (x,))
assert 'Unexpected output type'.find(e.exception.args[0])
else:
with self.assertRaises(ValueError) as e:
export(model, (x,))
assert 'must be Chainer Variable'.find(e.exception.args[0])
class TestUnusedLink(ONNXModelTest):
# When some links are under init scope but not used on forwarding, params
# of the links are not initialized. This means exporter cannot convert them
# to ONNX's tensor because of lack of shape etc.
def test_outputs(self):
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__()
with self.init_scope():
self.l1 = L.Linear(None, n_units)
self.l2 = L.Linear(None, n_units)
self.l3 = L.Linear(None, n_out)
def __call__(self, x):
h1 = F.relu(self.l1(x))
# Unused for some reason, then params are not initialized.
# h2 = F.relu(self.l2(h1))
return self.l3(h1)
model = MLP(100, 10)
x = np.random.rand(1, 768).astype(np.float32)
with testing.assert_warns(UserWarning):
self.expect(model, x)
@testing.parameterize(
{
'x_shape': (10, 3, 28, 28), 'shape_option': ('b', 3, 28, 28),
},
{
'x_shape': (10, 3, 28, 28),
'shape_option': [('b', 3, 28, 28)],
'condition': 'var_list'
},
{
'x_shape': [(10, 3, 28, 28), (8, 3, 28, 28)],
'shape_option': [('b', 3, 28, 28), ('b', 3, 28, 28)],
'condition': 'list_list'
},
{
'x_shape': {'1': (10, 3, 28, 28), '2': (8, 3, 28, 28)},
'shape_option': {'2': ('b', 3, 28, 28), '1': ('b', 3, 28, 28)},
'condition': 'dict_dict'
},
{
'x_shape': {'1': (10, 3, 28, 28), '2': (8, 3, 28, 28)},
'shape_option': [('b', 3, 28, 28), ('b', 3, 28, 28)],
'condition': 'dict_list'
},
)
class TestCustomizedInputShape(ONNXModelTest):
def test_output(self):
class Model(chainer.Chain):
def __init__(self):
super().__init__()
with self.init_scope():
self.l1 = L.Convolution2D(None, 16, 5, 1, 2)
self.l2 = L.Convolution2D(16, 8, 5, 1, 2)
def forward(self, *xs, **kwxs):
if kwxs:
h = F.vstack(list(kwxs.values()))
elif len(xs) > 1:
h = F.vstack(xs)
else:
h = xs[0]
h2 = self.l1(h)
h3 = F.relu(h2)
h4 = self.l2(h3)
return F.relu(h4)
def check_input_shape(onnx_model, path):
assert [v.type.tensor_type.shape.dim[0] == 'b' for
v in onnx_model.graph.input]
assert [v.type.tensor_type.shape.dim[0] == 'b' for
v in onnx_model.graph.output]
if isinstance(self.x_shape, tuple):
xs = np.zeros(self.x_shape, dtype=np.float32)
elif isinstance(self.x_shape, list):
xs = tuple(
np.zeros(shape, dtype=np.float32) for shape in self.x_shape)
else:
assert isinstance(self.x_shape, dict)
xs = {k: np.zeros(shape, dtype=np.float32) for
k, shape in self.x_shape.items()}
name = 'customized_input_shape'
if hasattr(self, 'condition'):
name += '_{}'.format(self.condition)
self.expect(
Model(), xs, name=name, input_shapes=self.shape_option,
custom_model_test_func=check_input_shape)
@pytest.mark.parametrize('x_shape,shape_option', [
((10, 5), '?'), # not tuple
((10, 5), ('?', 5, 5)), # shape length error
((10, 5), [('?', 5), ('?', 5)]), # not single
([(10, 5), (10, 5)], [('?', 5), ('?', 5), ('?', 5)]), # list length error
([(10, 5), (10, 5)], [('?', 5), ('?', 5, 5)]), # shape length error
({'a': (10, 5), 'b': (10, 5)}, {'a': ('?', 5), 'c': ('?', 5)}), # NOQA not key found
({'a': (10, 5), 'b': (10, 5)}, [('?', 5), ('?', 5), ('?', 5)]), # NOQA list length error
({'a': (10, 5), 'b': (10, 5)}, {'a': ('?', 5), 'b': ('?', 5, 5)}), # NOQA not key found
])
def test_invalid_customized_input_shape(x_shape, shape_option):
model = chainer.Sequential(F.relu)
if isinstance(x_shape, tuple):
xs = np.zeros(x_shape, dtype=np.float32)
elif isinstance(x_shape, list):
xs = tuple(
np.zeros(shape, dtype=np.float32) for shape in x_shape)
else:
assert isinstance(x_shape, dict)
xs = {k: np.zeros(shape, dtype=np.float32) for
k, shape in x_shape.items()}
with pytest.raises(ValueError):
export(model, xs, input_shapes=shape_option)
| 15,620
| 34.421769
| 93
|
py
|
chainer
|
chainer-master/tests/onnx_chainer_tests/helper.py
|
import glob
import os
import unittest
import chainer
import numpy as np
import onnx
import pytest
from onnx_chainer.testing.get_test_data_set import gen_test_data_set
def load_input_data(data_dir):
input_data = []
for pb in sorted(glob.glob(os.path.join(
data_dir, 'test_data_set_0', 'input_*.pb'))):
tensor = onnx.load_tensor(pb)
ndarray = onnx.numpy_helper.to_array(tensor)
input_data.append(ndarray)
return input_data
class ONNXModelChecker(object):
"""Base class to check outputs.
Some configurations are set by fixture, for example, target opset versions,
output directory name, and so on.
Example:
>>> class TestForSomething(ONNXModelChecker):
... def test_output(self):
... model, x = self.setup() # setup for target test
... self.expect(model, x) # to check outputs
This class supports ``pytest.mark.parametrize``.
Example:
>>> class TestForSomething(ONNXModelChecker):
... @pytest.mark.parametrize('param', [True,False])
... def test_output(self, param):
... model, x = self.setup(param) # use a test case parameter
... self.expect(model, x)
This class is **not** subclass of ``unittest.TestCase``, so does not
support ``chainer.testing.parameterize``. If tests requires it, see
``ONNXModelTest`` class.
"""
@pytest.fixture(autouse=True)
def set_config(self, disable_experimental_warning, target_opsets):
self.target_opsets = target_opsets
@pytest.fixture(autouse=True, scope='function')
def set_name(self, request, check_model_expect):
cls_name = request.cls.__name__
self.default_name = cls_name[len('Test'):].lower()
self.check_out_values = check_model_expect
def expect(self, model, args, name=None, skip_opset_version=None,
skip_outvalue_version=None, custom_model_test_func=None,
expected_num_initializers=None, **kwargs):
"""Compare model output and test runtime output.
Make an ONNX model from target model with args, and put output
directory. Then test runtime load the model, and compare.
Arguments:
model (~chainer.Chain): The target model.
args (list or dict): Arguments of the target model.
name (str): name of test. Set class name on default.
skip_opset_version (list): Versions to skip test.
skip_outvalue_version (list): Versions to skip output value check.
custom_model_test_func (func): A function to check generated
model. The functions is called before checking output values.
ONNX model is passed to arguments.
expected_num_initializers (int): The expected number of
initializers in the output ONNX model.
**kwargs (dict): keyward arguments for ``onnx_chainer.export``.
"""
test_name = name
if test_name is None:
test_name = self.default_name
for opset_version in self.target_opsets:
if skip_opset_version is not None and\
opset_version in skip_opset_version:
continue
dir_name = 'test_' + test_name
test_path = gen_test_data_set(
model, args, dir_name, opset_version, **kwargs)
onnx_model_path = os.path.join(test_path, 'model.onnx')
assert os.path.isfile(onnx_model_path)
with open(onnx_model_path, 'rb') as f:
onnx_model = onnx.load_model(f)
check_all_connected_from_inputs(onnx_model)
if expected_num_initializers is not None:
actual_num_initializers = len(onnx_model.graph.initializer)
assert expected_num_initializers == actual_num_initializers
graph_input_names = _get_graph_input_names(onnx_model)
if kwargs.get('input_names', {}):
input_names = kwargs['input_names']
if isinstance(input_names, dict):
expected_names = list(sorted(input_names.values()))
else:
expected_names = list(sorted(input_names))
assert list(sorted(graph_input_names)) == expected_names
if kwargs.get('output_names', {}):
output_names = kwargs['output_names']
if isinstance(output_names, dict):
expected_names = list(sorted(output_names.values()))
else:
expected_names = list(sorted(output_names))
graph_output_names = [v.name for v in onnx_model.graph.output]
assert list(sorted(graph_output_names)) == expected_names
# Input data is generaged by `network_inputs` dict, this can
# introduce unexpected conversions. Check values of input PB with
# test args.
if isinstance(args, (tuple, list)):
flat_args = args
elif isinstance(args, dict):
flat_args = args.values()
else:
flat_args = [args]
input_data = load_input_data(test_path)
assert len(input_data) == len(flat_args)
for i, arg in enumerate(flat_args):
array = arg.array if isinstance(arg, chainer.Variable) else arg
array = chainer.cuda.to_cpu(array)
np.testing.assert_allclose(
array, input_data[i], rtol=1e-5, atol=1e-5)
if custom_model_test_func is not None:
custom_model_test_func(onnx_model, test_path)
if skip_outvalue_version is not None and\
opset_version in skip_outvalue_version:
continue
# Export function can be add unexpected inputs. Collect inputs
# from ONNX model, and compare with another input list got from
# test runtime.
if self.check_out_values is not None:
self.check_out_values(test_path, input_names=graph_input_names)
def to_gpu(self, model, x):
model = model.copy()
model.to_device('@cupy:0')
x = chainer.cuda.to_gpu(x)
return model, x
class ONNXModelTest(ONNXModelChecker, unittest.TestCase):
"""Base class to check outputs.
This class enables ``chainer.testing.parameterize``
Example:
>>> @chainer.testing.parameterize({'param': True},{'param': False})
... class TestForSomething(ONNXModelTest):
... def test_output(self):
... model, x = self.setup(self.param) # use a parameter
... self.expect(model, x)
"""
pass
def check_all_connected_from_inputs(onnx_model):
edge_names = get_initializer_names(onnx_model) |\
_get_input_names(onnx_model)
# Nodes which are not connected from the network inputs.
orphan_nodes = []
for node in onnx_model.graph.node:
if not node.input:
for output_name in node.output:
edge_names.add(output_name)
continue
if not edge_names.intersection(node.input):
orphan_nodes.append(node)
for output_name in node.output:
edge_names.add(output_name)
assert not(orphan_nodes), '{}'.format(orphan_nodes)
def get_initializer_names(onnx_model):
return {i.name for i in onnx_model.graph.initializer}
def _get_input_names(onnx_model):
return {i.name for i in onnx_model.graph.input}
def _get_graph_input_names(onnx_model):
return list(
_get_input_names(onnx_model) - get_initializer_names(onnx_model))
| 7,723
| 36.862745
| 79
|
py
|
chainer
|
chainer-master/tests/onnx_chainer_tests/test_export_testcase.py
|
import os
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
import onnx
import pytest
from onnx_chainer import export_testcase
from onnx_chainer import export
@pytest.fixture(scope='function')
def model():
return chainer.Sequential(
L.Convolution2D(None, 16, 5, 1, 2),
F.relu,
L.Convolution2D(16, 8, 5, 1, 2),
F.relu,
L.Convolution2D(8, 5, 5, 1, 2),
F.relu,
L.Linear(None, 100),
L.BatchNormalization(100),
F.relu,
L.Linear(100, 10)
)
@pytest.fixture(scope='function')
def x():
return np.zeros((10, 3, 28, 28), dtype=np.float32)
@pytest.mark.parametrize('in_names,out_names',
[(None, None), (['x'], ['y'])])
def test_export_testcase(
tmpdir, model, x, disable_experimental_warning, in_names, out_names):
# Just check the existence of pb files
path = str(tmpdir)
export_testcase(model, (x,), path,
input_names=in_names, output_names=out_names)
assert os.path.isfile(os.path.join(path, 'model.onnx'))
input_pb_path = os.path.join(path, 'test_data_set_0', 'input_0.pb')
assert os.path.isfile(input_pb_path)
input_tensor = onnx.load_tensor(input_pb_path)
assert input_tensor.name == (in_names[0] if in_names else 'Input_0')
output_pb_path = os.path.join(path, 'test_data_set_0', 'output_0.pb')
assert os.path.isfile(output_pb_path)
output_tensor = onnx.load_tensor(output_pb_path)
assert output_tensor.name == (
out_names[0] if out_names else 'LinearFunction_1')
@pytest.mark.parametrize('train', [True, False])
def test_output_grad(tmpdir, model, x, train, disable_experimental_warning):
path = str(tmpdir)
export_testcase(model, (x,), path, output_grad=True, train=train)
model_filename = os.path.join(path, 'model.onnx')
assert os.path.isfile(model_filename)
assert os.path.isfile(os.path.join(path, 'test_data_set_0', 'input_0.pb'))
assert os.path.isfile(os.path.join(path, 'test_data_set_0', 'output_0.pb'))
onnx_model = onnx.load(model_filename)
initializer_names = {i.name for i in onnx_model.graph.initializer}
# 10 gradient files should be there
for i in range(12):
tensor_filename = os.path.join(
path, 'test_data_set_0', 'gradient_{}.pb'.format(i))
assert os.path.isfile(tensor_filename)
tensor = onnx.load_tensor(tensor_filename)
assert tensor.name.startswith('param_')
assert tensor.name in initializer_names
assert not os.path.isfile(
os.path.join(path, 'test_data_set_0', 'gradient_12.pb'))
def test_check_warning(tmpdir, model, x):
path = str(tmpdir)
with pytest.warns(None):
export_testcase(model, (x,), os.path.join(path, "with_testcase"))
with pytest.warns(None):
export(
model, (x,),
os.path.join(path, 'no_testcase.onnx'),
no_testcase=True)
with pytest.warns(DeprecationWarning):
export(model, (x,), os.path.join(path, 'model.onnx'))
| 3,087
| 32.565217
| 79
|
py
|
chainer
|
chainer-master/tests/onnx_chainer_tests/test_replace_func.py
|
import os
import warnings
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import testing
import numpy as np
import onnx
import pytest
from onnx_chainer import export
from onnx_chainer import export_testcase
from onnx_chainer import onnx_helper
from onnx_chainer.replace_func import as_funcnode
from onnx_chainer.replace_func import fake_as_funcnode
from onnx_chainer.testing import input_generator
from onnx_chainer_tests.helper import ONNXModelChecker
from onnx_chainer_tests.helper import ONNXModelTest
def test_fake_as_funcnode_without_replace():
class Model(chainer.Chain):
def _init__(self):
super().__init__()
def add(self, xs, value=0.01):
return xs.array + value
def __call__(self, xs):
return F.sigmoid(self.add(xs))
model = Model()
x = input_generator.increasing(3, 4)
onnx_model = export(model, x)
sigmoid_nodes = [
node for node in onnx_model.graph.node if node.op_type == 'Sigmoid']
assert len(sigmoid_nodes) == 1
# sigmoid node should be expected to connect with input
# but the connection is cut because `add` method takes array.
assert not sigmoid_nodes[0].input[0] == 'Input_0'
class TestReplaceNumpyFullToConstantOfShape(ONNXModelTest):
# This test case is a real-world example, to handle np.full
def test_output(self):
class Model(chainer.Chain):
def __init__(self, value):
super().__init__()
self.value = value
@as_funcnode('NumpyFull')
def full(self, xs, value=0):
# not support `def full(self, xs_shape, value=0)`
# wrapped function node cannot handle shape directly yet.
return np.full(xs.array.shape, value, dtype=np.float32)
def __call__(self, xs):
return F.sigmoid(self.full(xs, value=self.value))
model = Model(value=5)
x = input_generator.increasing(2, 3, 4)
def numpy_full_converter(params):
gb = onnx_helper.GraphBuilder()
output = gb.op('Shape', params.input_names)
value = onnx.helper.make_tensor(
'value', onnx.TensorProto.FLOAT, [1], [params.func.value])
gb.op_output_named(
'ConstantOfShape', [output], params.output_names, value=value)
return gb.nodes()
addon_converters = {'NumpyFull': numpy_full_converter}
self.expect(
model, x, skip_opset_version=[7, 8],
external_converters=addon_converters)
class TestReplaceWithOutputGrad(ONNXModelChecker):
def get_model(self):
class Model(chainer.Chain):
def __init__(self):
super().__init__()
with self.init_scope():
self.l = L.Linear(None, 2)
def half(self, xs, value=0.5):
return xs * value
def forward(self, xs):
h = self.l(xs)
h = self.half(h)
return F.sum(chainer.as_variable(h))
return Model()
def test_grad_error(self):
model = self.get_model()
# this alternative function does not return chainer.Variable
# backward propagation will fail
model.half = fake_as_funcnode(
lambda xs, value=0.5: xs.array * value, 'MulConstant')
x = input_generator.increasing(2, 5)
with pytest.raises(ValueError):
self.expect(model, x, output_grad=True)
def test_output(self, tmpdir):
# first, make expected gradients to temp directory
expected_result_path = str(tmpdir)
model = self.get_model()
x = input_generator.increasing(2, 5)
export_testcase(model, x, expected_result_path, output_grad=True)
data_set_name = 'test_data_set_0'
expected_gradients = [os.path.join(
expected_result_path, data_set_name, 'gradient_{}.pb').format(i)
for i in range(2)]
assert all([os.path.isfile(path) for path in expected_gradients])
# model.half returns chainer.Variable and enabled backward
# regardless using replacing
model.half = fake_as_funcnode(model.half, 'MulConstant')
x = input_generator.increasing(2, 5)
def gradient_check(model, path):
actual_gradients = [os.path.join(
path, data_set_name, 'gradient_{}.pb').format(i)
for i in range(2)]
assert all([os.path.isfile(path) for path in actual_gradients])
def load_tensor(path):
tensor = onnx.load_tensor(path)
return onnx.numpy_helper.to_array(tensor)
for e_path, a_path in zip(expected_gradients, actual_gradients):
expected = load_tensor(e_path)
actual = load_tensor(a_path)
np.testing.assert_allclose(expected, actual)
self.expect(
model, x, output_grad=True, custom_model_test_func=gradient_check)
class TestReplaceFuncBackward(ONNXModelTest):
def _test_replace_func(self, fn, xs, set_grad=False):
def make_list(v):
if isinstance(v, (list, tuple)):
return list(v)
else:
return [v]
xvs = [x for x in xs if isinstance(x, chainer.Variable)]
rfn = as_funcnode('fn')(fn)
eys = make_list(fn(*xs))
egxs = chainer.grad(eys, xvs, set_grad=set_grad)
ays = make_list(rfn(*xs))
agxs = chainer.grad(ays, xvs, set_grad=set_grad)
assert len(eys) == len(ays)
for ay, ey in zip(ays, eys):
np.testing.assert_allclose(ay.array, ey.array)
assert len(egxs) == len(agxs)
for agx, egx in zip(agxs, egxs):
if egx is None:
assert egx is None
else:
np.testing.assert_allclose(agx.array, egx.array)
def test_backward_simple(self):
self._test_replace_func(lambda a, b: a * b,
[chainer.Variable(np.array(2.3)),
chainer.Variable(np.array(4.2))])
def test_backward_partially_differentiable(self):
self._test_replace_func(lambda a, b: a * b.array,
[chainer.Variable(np.array(2.3)),
chainer.Variable(np.array(4.2))])
def test_backward_multi_outputs(self):
self._test_replace_func(lambda a, b, c: (a * b, a / b, a * b * c),
[chainer.Variable(np.array(2.3)),
chainer.Variable(np.array(4.2)),
5])
def test_backward_no_side_effect(self):
a = chainer.Variable(np.array(2.3))
b = chainer.Variable(np.array(4.2))
x0 = a * b
x1 = chainer.Variable(np.array(3.7))
self._test_replace_func(lambda a, b: a * b, [x0, x1])
# No side-effect to `grad`.
assert x0.grad is None
assert x1.grad is None
assert a.grad is None
assert b.grad is None
# Gradient computation must stop at `x0` and `x1`.
self._test_replace_func(lambda a, b: a * b, [x0, x1], set_grad=True)
assert x0.grad is not None
assert x1.grad is not None
assert a.grad is None
assert b.grad is None
@testing.parameterize(
{'func_kind': 'list', 'in_shape': (2, 3, 4), 'op_type': 'Add'},
{'func_kind': 'list_kwargs', 'in_shape': (2, 3, 4), 'op_type': 'Add'},
{'func_kind': 'var_with_deco', 'in_shape': (3, 4),
'op_type': 'AddConstant'},
{'func_kind': 'var_kwargs', 'in_shape': (3, 4), 'op_type': 'AddConstant'},
{'func_kind': 'var', 'in_shape': (3, 4), 'op_type': 'AddConstant'},
)
class TestReplaceFunc(ONNXModelTest):
def get_model(self, target_func, input_converter):
class Model(chainer.Chain):
def __init__(self, target_func, input_converter):
super().__init__()
self.input_converter = input_converter
self.fn = target_func
def __call__(self, xs):
args, kwargs = self.input_converter(xs)
h = self.fn(*args, **kwargs)
return F.sigmoid(h)
return Model(target_func, input_converter)
def test_output(self):
attr = None
is_deco = False
if self.func_kind == 'list':
def input_converter(xs):
return ([xs[0], xs[1]],), {}
def target_func(xs):
return xs[0].array + xs[1].array
elif self.func_kind == 'list_kwargs':
def input_converter(xs):
return (), {'xs': [xs[0], xs[1]]}
def target_func(xs=None):
assert xs is not None
return xs[0].array + xs[1].array
elif self.func_kind == 'var_with_deco':
def input_converter(xs):
return (xs,), {}
@as_funcnode('AddConstant', rename_attributes=[('b', 'value')])
def target_func(x, b=0.01):
return x.array + b
is_deco = True
elif self.func_kind == 'var_kwargs':
def input_converter(xs):
return (), {'x': xs, 'value': 0.02}
def target_func(x=None, value=0.01):
assert x is not None
return x.array + value
else:
assert self.func_kind == 'var'
def input_converter(xs):
return (xs, 0.01), {}
def target_func(x, value):
return x.array + value
attr = [(1, 'value')]
model = self.get_model(target_func, input_converter)
x = input_generator.increasing(*self.in_shape)
if not is_deco:
model.fn = fake_as_funcnode(
model.fn, self.op_type, rename_attributes=attr)
name = 'replace_func_' + self.func_kind
self.expect(model, x, name=name)
@pytest.mark.parametrize('return_type', ['list', 'dict'])
def test_replace_func_collection_return(tmpdir, return_type):
path = str(tmpdir)
class Model(chainer.Chain):
def __init__(self, return_type):
super().__init__()
self.return_type = return_type
def tiled_array(self, xs, n=5):
if self.return_type == 'list':
return [xs.array * i for i in range(1, 1+n)]
else:
assert self.return_type == 'dict'
return {str(i): xs.array * i for i in range(1, 1+n)}
def __call__(self, xs):
return self.tiled_array(xs)
model = Model(return_type)
x = input_generator.increasing(1, 5)
with warnings.catch_warnings(record=True):
model.tiled_array = fake_as_funcnode(model.tiled_array, 'xTiledArray')
def tiled_array_converter(params):
return onnx_helper.make_node(
'xTiledArray', params.input_names, params.output_names),
addon_converters = {'xTiledArray': tiled_array_converter}
with testing.assert_warns(UserWarning):
export_testcase(model, x, path, external_converters=addon_converters)
model_filepath = os.path.join(path, 'model.onnx')
assert os.path.isfile(model_filepath)
onnx_model = onnx.load(model_filepath)
node_names = [n.name for n in onnx_model.graph.node]
assert len(node_names) == 1
assert node_names[0] == 'xTiledArray_0'
output_names = [n.name for n in onnx_model.graph.output]
assert len(output_names) == 5
for i, name in enumerate(output_names):
assert name == 'xTiledArray_0_{:d}'.format(i)
def test_fake_as_funcnode_keep_structure(tmpdir):
path = str(tmpdir)
class Model(chainer.Chain):
def __init__(self):
super().__init__()
def f(self, x):
return {'a': (x, x+1), 'b': [x+2, x+3, x+4]}
def __call__(self, x):
ret = self.f(x)
return ret['a'][0] + ret['b'][1]
model = Model()
x = input_generator.increasing(2, 3)
with warnings.catch_warnings(record=True):
model.f = fake_as_funcnode(model.f, 'xF')
def f_converter(params):
return onnx_helper.make_node(
'xF', params.input_names, params.output_names),
addon_converters = {'xF': f_converter}
with testing.assert_warns(UserWarning):
export_testcase(model, x, path, external_converters=addon_converters)
model_filepath = os.path.join(path, 'model.onnx')
assert os.path.isfile(model_filepath)
onnx_model = onnx.load(model_filepath)
node_names = [n.name for n in onnx_model.graph.node]
assert len(node_names) == 2
assert node_names[0] == 'xF_0'
assert len(onnx_model.graph.node[0].output) == 5
assert len(onnx_model.graph.output) == 1
| 12,849
| 32.994709
| 78
|
py
|
chainer
|
chainer-master/tests/onnx_chainer_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/onnx_chainer_tests/functions_tests/test_arrays.py
|
import chainer
import chainer.functions as F
from chainer import testing
import numpy as np
import onnx
import pytest
from onnx_chainer import export
from onnx_chainer.testing import input_generator
from onnx_chainer_tests.helper import ONNXModelChecker
from onnx_chainer_tests.helper import ONNXModelTest
@testing.parameterize(
# cast
# {'ops': 'cast', 'input_shape': (1, 5),
# 'input_argname': 'x',
# 'args': {'typ': np.float16}},
{'ops': 'cast', 'input_shape': (1, 5),
'input_argname': 'x',
'args': {'typ': np.float64}},
# depth2space
{'ops': 'depth2space', 'input_shape': (1, 12, 6, 6),
'input_argname': 'X',
'args': {'r': 2}},
# pad
{'ops': 'pad', 'input_shape': (1, 2, 3, 4),
'input_argname': 'x',
'args': {'pad_width': ((0, 0), (0, 0), (2, 2), (2, 2)),
'mode': 'constant'},
'name': 'pad_constant'},
{'ops': 'pad', 'input_shape': (1, 2, 3, 4),
'input_argname': 'x',
'args': {'pad_width': ((0, 0), (0, 0), (2, 2), (2, 2)),
'mode': 'reflect'},
'name': 'pad_reflect'},
{'ops': 'pad', 'input_shape': (1, 2, 3, 4),
'input_argname': 'x',
'args': {'pad_width': ((0, 0), (0, 0), (2, 2), (2, 2)),
'mode': 'edge'},
'name': 'pad_edge'},
{'ops': 'pad', 'input_shape': (1, 2, 3, 4),
'input_argname': 'x',
'args': {'pad_width': ((1, 3), (2, 0), (7, 1), (4, 4)),
'mode': 'constant'},
'name': 'pad_imbalance_pad_width'},
{'ops': 'pad', 'input_shape': (1, 2, 3, 4),
'input_argname': 'x',
'args': {'pad_width': ((0, 0), (0, 0), (2, 2), (2, 2)),
'mode': 'constant',
'constant_values': -1},
'name': 'pad_with_constant_values'},
{'ops': 'pad', 'input_shape': (1, 2, 3, 4),
'input_argname': 'x',
'args': {'pad_width': 2,
'mode': 'constant'},
'name': 'pad_scalar_pad_width'},
# reshape
{'ops': 'reshape', 'input_shape': (1, 6),
'input_argname': 'x',
'args': {'shape': (1, 2, 1, 3)}},
# space2depth
{'ops': 'space2depth', 'input_shape': (1, 12, 6, 6),
'input_argname': 'X',
'args': {'r': 2}},
# split_axis
{'ops': 'split_axis', 'input_shape': (1, 6),
'input_argname': 'x',
'args': {'indices_or_sections': 2,
'axis': 1, 'force_tuple': True},
'name': 'split_axis_force_tuple_true'},
{'ops': 'split_axis', 'input_shape': (1, 6),
'input_argname': 'x',
'args': {'indices_or_sections': 2,
'axis': 1, 'force_tuple': False},
'name': 'split_axis_force_tuple_false'},
{'ops': 'split_axis', 'input_shape': (1, 6),
'input_argname': 'x',
'args': {'indices_or_sections': [1, 2], 'axis': 1},
'name': 'split_axis_list'},
# squeeze
{'ops': 'squeeze', 'input_shape': (1, 3, 1, 2),
'input_argname': 'x',
'args': {'axis': None},
'name': 'squeeze_axis_none'},
{'ops': 'squeeze', 'input_shape': (1, 3, 1, 2, 1),
'input_argname': 'x',
'args': {'axis': (2, 4)}},
# swapaxes
{'ops': 'swapaxes', 'input_shape': (2, 3, 4, 5),
'input_argname': 'x',
'args': {'axis1': 1, 'axis2': 2}},
{'ops': 'swapaxes', 'input_shape': (2, 3, 4, 5),
'input_argname': 'x',
'args': {'axis1': -3, 'axis2': -1}},
# tile
{'ops': 'tile', 'input_shape': (1, 5),
'input_argname': 'x',
'args': {'reps': (1, 2)}},
# transpose
{'ops': 'transpose', 'input_shape': (1, 5),
'input_argname': 'x',
'args': {'axes': None}},
# copy
{'ops': 'copy', 'input_shape': (1, 5),
'input_argname': 'x',
'args': {'dst': -1}},
# get_item
{'ops': 'get_item', 'input_shape': (2, 2, 3),
'input_argname': 'x',
'args': {'slices': slice(0, 2)},
'name': 'get_item_0to2'},
{'ops': 'get_item', 'input_shape': (2, 2, 3),
'input_argname': 'x',
'args': {'slices': (slice(1))},
'name': 'get_item_to1'},
{'ops': 'get_item', 'input_shape': (2, 2, 3),
'input_argname': 'x',
'args': {'slices': (slice(1, None))},
'name': 'get_item_1tonone'},
{'ops': 'get_item', 'input_shape': (2, 2, 3),
'input_argname': 'x',
'args': {'slices': 0},
'name': 'get_item_0'},
{'ops': 'get_item', 'input_shape': (2, 2, 3),
'input_argname': 'x',
'args': {'slices': -1},
'name': 'get_item_minus_1'},
{'ops': 'get_item', 'input_shape': (2, 2, 3),
'input_argname': 'x',
'args': {'slices': np.array(0)},
'name': 'get_item_npscalar0'},
{'ops': 'get_item', 'input_shape': (2, 2, 3),
'input_argname': 'x',
'args': {'slices': (None, slice(0, 2))},
'name': 'get_item_none_0to2'},
{'ops': 'get_item', 'input_shape': (2, 2, 3),
'input_argname': 'x',
'args': {'slices': (Ellipsis, slice(0, 2))},
'name': 'get_item_ellipsis_0to2'},
# get_item, combine newaxis, slice, single index, ellipsis
{'ops': 'get_item', 'input_shape': (2, 2, 3, 3, 3, 4),
'input_argname': 'x',
'args': {'slices': (0, None, Ellipsis, 0, None, slice(0, 2), None, 0)},
'name': 'get_item_complicated'},
{'ops': 'get_item', 'input_shape': (2, 2, 3),
'input_argname': 'x',
'args': {'slices': (slice(None), slice(0, 1), slice(None, 2))},
'name': 'get_item_start_from_none'},
# expand_dims
{'ops': 'expand_dims', 'input_shape': (3,),
'input_argname': 'x', 'args': {'axis': 0},
'name': 'expand_dims_0'},
{'ops': 'expand_dims', 'input_shape': (3,),
'input_argname': 'x', 'args': {'axis': 1},
'name': 'expand_dims_1'},
{'ops': 'expand_dims', 'input_shape': (3,),
'input_argname': 'x', 'args': {'axis': -2},
'name': 'expand_dims_minus2'},
# repeat
{'ops': 'repeat', 'input_shape': (3,),
'input_argname': 'x', 'args': {'repeats': 2},
'name': 'repeat_ndim1'},
{'ops': 'repeat', 'input_shape': (2, 3),
'input_argname': 'x', 'args': {'repeats': 2, 'axis': 1},
'name': 'repeat_with_axis'},
{'ops': 'repeat', 'input_shape': (2, 3),
'input_argname': 'x', 'args': {'repeats': 2},
'name': 'repeat_default_axis'},
# separate
{'ops': 'separate', 'input_shape': (2, 3),
'input_argname': 'x', 'args': {}, 'name': 'separate_axis0'},
{'ops': 'separate', 'input_shape': (2, 3),
'input_argname': 'x', 'args': {'axis': 1}, 'name': 'separate_axis1'},
{'ops': 'separate', 'input_shape': (1, 2, 3),
'input_argname': 'x', 'args': {}, 'name': 'separate_single_output'},
# moveaxis
{'ops': 'moveaxis', 'input_shape': (2, 3, 4, 5),
'input_argname': 'x', 'args': {'source': 0, 'destination': -1}},
{'ops': 'moveaxis', 'input_shape': (2, 3, 4, 5),
'input_argname': 'x', 'args': {'source': (0, 3), 'destination': (2, 0)}},
# rollaxis
{'ops': 'rollaxis', 'input_shape': (2, 3, 4, 5),
'input_argname': 'x', 'args': {'axis': 2, 'start': 0}},
)
class TestArrayOperators(ONNXModelTest):
def setUp(self):
class Model(chainer.Chain):
def __init__(self, ops, args, input_argname):
super(Model, self).__init__()
self.ops = getattr(F, ops)
self.args = args
self.input_argname = input_argname
def __call__(self, x):
self.args[self.input_argname] = x
return self.ops(**self.args)
self.model = Model(self.ops, self.args, self.input_argname)
self.x = input_generator.increasing(*self.input_shape)
def test_output(self):
name = self.ops
if hasattr(self, 'name'):
name = self.name
self.expect(
self.model, self.x, name=name, expected_num_initializers=0)
class TestGetItem(ONNXModelChecker):
# When chainer.testing.parameterize is used with list or ndarray parameter,
# it causes regex warning. To resolve, use pytest's parameterize.
@pytest.mark.parametrize(
'name,slices', [
('gather_axis0', ([[0, 1], [0, 1]],)),
('gather_axis1', (slice(None), [[0, 1], [1, 2]], slice(None))),
('gather_axis2', (slice(None), slice(None), [[0, 1], [1, 2]])),
('gather_ndarray', (
Ellipsis, np.array([[0, 1], [1, 2]], dtype=np.int64))),
('gather_before_squeezed', (slice(None), 0, [[0, 1], [2, 3]])),
('gather_after_squeezed', (slice(None), [[0, 1], [1, 2]], 0)),
('gather_unsqueezed', (
slice(None), None, [[0, 1], [1, 2]], slice(None))),
('gathernd', [[0, 1], [1, 2]]),
('gathernd_slice_none', [[0, 1], [0, 1], slice(None)]),
('gathernd_full_idx', [[0, 1], [0, 1], [2, 3]]),
('gathernd_before_slice', [0, [0, 1], [2, 3]]),
('gathernd_after_slice', [[0, 1], [0, 2], 0]),
('gathernd_unsqueezed', [[0, 1], [0, 2], None])
])
def test_get_item_gather(self, name, slices):
skip_opsets = None
if name.startswith('gathernd'):
skip_opsets = tuple(range(7, 11))
name = 'get_item_' + name
model = chainer.Sequential(
lambda x: F.get_item(x, slices=slices))
x = input_generator.increasing(2, 3, 4)
self.expect(
model, x, name=name, expected_num_initializers=0,
skip_opset_version=skip_opsets)
@pytest.mark.parametrize(
'name,slices', [
('step1', [slice(1, None, 1)]),
('step2', [slice(None, None, None), slice(None, 4, 2)]),
('step_neg1', [slice(None, None, -1)]),
('step_neg2', [slice(None, None, None), slice(4, None, -2)]),
])
def test_get_item_slice_step(self, name, slices):
skip_opsets = tuple(range(7, 11))
name = 'get_item_' + name
model = chainer.Sequential(
lambda x: F.get_item(x, slices=slices))
x = input_generator.increasing(2, 3, 4)
self.expect(
model, x, name=name, expected_num_initializers=0,
skip_opset_version=skip_opsets)
class TestGetItemError(object):
@pytest.mark.parametrize('slices', [
[[0, 1], [1, 2]], [slice(None, None, 2)]
])
def test_get_item_unsupported(self, slices):
model = chainer.Sequential(
lambda x: F.get_item(x, slices=slices))
x = input_generator.increasing(2, 3, 4)
with pytest.raises(ValueError):
export(model, x, opset_version=7)
@pytest.mark.skipif(
onnx.defs.onnx_opset_version() < 11, reason='not support GatherND')
@pytest.mark.parametrize(
'slices', [
[[0, 1], 0, [0, 1]],
[slice(None), [0, 1], [0, 1]],
[None, [0, 1], [0, 1]]
]
)
def test_get_item_unsupported_advanced_index(self, slices):
model = chainer.Sequential(
lambda x: F.get_item(x, slices=slices))
x = input_generator.increasing(2, 3, 4)
with pytest.raises(ValueError):
export(model, x)
class TestConcat(ONNXModelTest):
def setUp(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
def __call__(self, x1, x2):
return F.concat((x1, x2))
self.model = Model()
self.x1 = input_generator.increasing(2, 5)
self.x2 = input_generator.increasing(2, 4)
def test_output(self):
self.expect(self.model, (self.x1, self.x2))
class TestWhere(ONNXModelTest):
def test_output(self):
model = chainer.Sequential(
F.where
)
cond = np.array([[1, 0, 0], [0, 1, 0]], dtype=np.bool)
x = input_generator.increasing(2, 3)
y = np.zeros((2, 3), np.float32)
self.expect(model, (cond, x, y), skip_opset_version=[7, 8])
class TestResizeImages(ONNXModelTest):
def setUp(self):
class Model(chainer.Chain):
def __init__(self, ops, args, input_argname):
super(Model, self).__init__()
self.ops = ops
self.args = args
self.input_argname = input_argname
def __call__(self, x):
self.args[self.input_argname] = x
return self.ops(**self.args)
# (batch, channel, height, width) = (1, 1, 2, 2)
self.x = np.array([[[[64, 32], [64, 32]]]], np.float32)
# 2x upsampling
args = {'output_shape': (4, 4)}
self.model = Model(F.resize_images, args, 'x')
def test_output(self):
# FIXME(syoyo): Currently the test will fail due to the different
# behavior of bilinear interpolation between Chainer and onnxruntime.
# So disable output value check for a while.
#
# Currently Chainer will give [64, 53.333336, 42.666668, 32]
# (same result with tensorflow r1.13.1 with `align_corners=True`),
# while onnxruntime gives [64, 48, 32, 32]
# (same result with tensorflow r1.13.1 with `align_corners=False`)
#
# However, the correct behavior will be [64, 54, 40, 32].
# (cv2.resize and tensorflow master(r1.14 or r2.0) after this fix:
# https://github.com/tensorflow/tensorflow/issues/6720)
self.check_out_values = None # Skip output value check
with testing.assert_warns(UserWarning):
self.expect(self.model, self.x, expected_num_initializers=0)
@testing.parameterize(
{'ops': 'stack', 'in_shapes': [(3, 4), (3, 4)], 'kwargs': {},
'name': 'stack_default'},
{'ops': 'stack', 'in_shapes': [(3, 4), (3, 4)], 'kwargs': {'axis': 1},
'name': 'stack_axis1'},
{'ops': 'stack', 'in_shapes': [(3, 4), (3, 4)], 'kwargs': {'axis': 2},
'name': 'stack_axis2'},
{'ops': 'stack', 'in_shapes': [(3, 4), (3, 4)], 'kwargs': {'axis': -1},
'name': 'stack_axis_neg'},
{'ops': 'vstack', 'inputs': [2, 3], 'kwargs': {},
'name': 'vstack_ndim0'},
{'ops': 'vstack', 'in_shapes': [(3,), (3,)], 'kwargs': {},
'name': 'vstack_ndim1'},
{'ops': 'vstack', 'in_shapes': [(3, 4), (2, 4)], 'kwargs': {},
'name': 'vstack_ndim2'},
{'ops': 'hstack', 'inputs': [2, 3], 'kwargs': {},
'name': 'hstack_ndim0'},
{'ops': 'hstack', 'in_shapes': [(3,), (3,)], 'kwargs': {},
'name': 'hstack_ndim1'},
{'ops': 'hstack', 'in_shapes': [(3, 4), (3, 2)], 'kwargs': {},
'name': 'hstack_ndim2'},
{'ops': 'dstack', 'inputs': [2, 3], 'kwargs': {},
'name': 'dstack_ndim0'},
{'ops': 'dstack', 'in_shapes': [(3,), (3,)], 'kwargs': {},
'name': 'dstack_ndim1'},
{'ops': 'dstack', 'in_shapes': [(3, 2), (3, 2)], 'kwargs': {},
'name': 'dstack_ndim2'},
{'ops': 'dstack', 'in_shapes': [(3, 2, 2), (3, 2, 1)], 'kwargs': {},
'name': 'dstack_ndim3'},
)
class TestStack(ONNXModelTest):
def test_output(self):
class Model(chainer.Chain):
def __init__(self, ops, kwargs):
super(Model, self).__init__()
self.ops = getattr(F, ops)
self.kwargs = kwargs
def __call__(self, *xs):
return self.ops(xs, **self.kwargs)
model = Model(ops=self.ops, kwargs=self.kwargs)
if hasattr(self, 'inputs'):
xs = [np.array(value, dtype=np.float32) for value in self.inputs]
else:
xs = [input_generator.increasing(*shape) for
shape in self.in_shapes]
self.expect(model, xs, name=self.name)
class TestShape(ONNXModelTest):
def test_output(self):
from onnx_chainer.replace_func import as_funcnode
class Model(chainer.Chain):
def __init__(self):
super().__init__()
@as_funcnode('Shape')
def shape(self, x):
# ONNX Shape operator constrains to return int64 type
return np.array(x.shape, dtype=np.int64)
def forward(self, x):
# use shape method instead of x.shape to connect graph.
return self.shape(x)
model = Model()
x = input_generator.increasing(3, 4, 5)
self.expect(model, (x,))
class TestDynamicReshape(ONNXModelTest):
def test_output(self):
from onnx_chainer.replace_func import as_funcnode
class Model(chainer.Chain):
def __init__(self):
super().__init__()
@as_funcnode('Reshape')
def dynamic_reshape(self, x, shape):
# shape is expected as variable type
return F.reshape(x, tuple(shape.array))
def forward(self, x, shape):
return self.dynamic_reshape(x, shape)
model = Model()
x = input_generator.increasing(3, 4, 5)
shape = np.array([12, 5], dtype=np.int64)
def check_no_param(onnx_model, path):
assert not any(['param' in v.name for v in onnx_model.graph.input])
self.expect(model, (x, shape), custom_model_test_func=check_no_param)
@testing.parameterize(
{'kwargs': {}, 'name': 'permutate'},
{'kwargs': {'inv': True}, 'name': 'permutate_inv'},
{'kwargs': {'axis': 1}, 'name': 'permutate_axis1'},
{'kwargs': {'axis': 1, 'inv': True}, 'name': 'permutate_axis1_inv'},
)
class TestPermutate(ONNXModelTest):
def test_output(self):
class Model(chainer.Chain):
def __init__(self, kwargs):
super(Model, self).__init__()
self.kwargs = kwargs
def forward(self, x, indices):
return F.permutate(x, indices, **self.kwargs)
model = Model(kwargs=self.kwargs)
x = np.arange(6).reshape((3, 2)).astype(np.float32)
if self.kwargs.get('axis') == 1:
indices = np.array([1, 0], np.int32)
else:
indices = np.array([2, 0, 1], np.int32)
self.expect(model, (x, indices), name=self.name,
skip_opset_version=[7, 8])
@testing.parameterize(
{'in_shapes': [(3, 4)], 'name': 'transpose_sequence_single_input'},
{'in_shapes': [(1, 3), (1, 3)],
'name': 'transpose_sequence_single_output'},
{'in_shapes': [(2, 3), (2, 3), (2, 3), (2, 3)],
'name': 'transpose_sequence_same_shape'},
)
class TestTransposeSequence(ONNXModelTest):
def test_output(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
def __call__(self, *xs):
return F.transpose_sequence(xs)
model = Model()
xs = [input_generator.increasing(*shape) for
shape in self.in_shapes]
self.expect(model, xs, name=self.name)
class TestSelectItem(ONNXModelTest):
def test_output(self):
class Model(chainer.Chain):
def forward(self, x, t):
return F.select_item(x, t)
model = Model()
x = input_generator.increasing(3, 5)
t = np.array([4, 1, 0], dtype=np.int32)
self.expect(
model, (x, t), expected_num_initializers=0,
skip_opset_version=list(range(1, 9)))
| 19,110
| 32.705467
| 79
|
py
|
chainer
|
chainer-master/tests/onnx_chainer_tests/functions_tests/test_loss.py
|
import unittest
import chainer
from chainer import testing
import numpy as np
from onnx_chainer_tests.helper import ONNXModelTest
@testing.parameterize(
{'in_shape': (3, 5), 'name': 'softmax_cross_entropy'},
)
@unittest.skipUnless(
int(chainer.__version__.split('.')[0]) >= 6,
"SoftmaxCrossEntropy is supported from Chainer v6")
class TestSoftmaxCrossEntropy(ONNXModelTest):
def setUp(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
def __call__(self, x, t):
return chainer.functions.softmax_cross_entropy(x, t)
self.model = Model()
self.x = np.random.uniform(size=self.in_shape).astype('f')
self.t = np.random.randint(size=self.in_shape[0], low=0,
high=self.in_shape[1]).astype(np.int32)
def test_output(self):
self.expect(self.model, [self.x, self.t], name=self.name,
skip_opset_version=[7, 8])
| 1,014
| 28
| 74
|
py
|
chainer
|
chainer-master/tests/onnx_chainer_tests/functions_tests/test_normalizations.py
|
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import testing
import numpy as np
from onnx_chainer.testing import input_generator
from onnx_chainer_tests.helper import get_initializer_names
from onnx_chainer_tests.helper import ONNXModelTest
@testing.parameterize(
{
'name': 'local_response_normalization',
'input_argname': 'x',
'args': {'k': 1, 'n': 3, 'alpha': 1e-4, 'beta': 0.75},
'opset_version': 1
},
{
'name': 'normalize',
'input_argname': 'x',
'args': {'axis': 1},
'opset_version': 1
}
)
class TestNormalizations(ONNXModelTest):
def setUp(self):
class Model(chainer.Chain):
def __init__(self, ops, args, input_argname):
super(Model, self).__init__()
self.ops = ops
self.args = args
self.input_argname = input_argname
def __call__(self, x):
self.args[self.input_argname] = x
return self.ops(**self.args)
ops = getattr(F, self.name)
self.model = Model(ops, self.args, self.input_argname)
self.x = input_generator.increasing(2, 5, 3, 3)
def test_output(self):
self.expect(self.model, self.x, name=self.name)
@testing.parameterize(
{'kwargs': {}},
{'kwargs': {'use_beta': False}, 'condition': 'use_beta_false'},
{'kwargs': {'use_gamma': False}, 'condition': 'use_gamma_false'},
{'train': True, 'kwargs': {}},
{'train': True,
'kwargs': {'use_beta': False}, 'condition': 'use_beta_false'},
{'train': True,
'kwargs': {'use_gamma': False}, 'condition': 'use_gamma_false'},
{'train': True,
'kwargs': {'initial_avg_mean': 0.5}, 'condition': 'init_avg_mean'},
)
class TestBatchNormalization(ONNXModelTest):
def setUp(self):
class Model(chainer.Chain):
def __init__(self, **kwargs):
super(Model, self).__init__()
with self.init_scope():
self.bn = L.BatchNormalization(5, **kwargs)
def __call__(self, x):
return self.bn(x)
self.model = Model(**self.kwargs)
self.x = input_generator.increasing(2, 5)
def test_output(self):
train = getattr(self, 'train', False)
name = 'batch_normalization'
if not train:
name = 'fixed_' + name
if hasattr(self, 'condition'):
name += '_' + self.condition
def test_input_names(onnx_model, path):
initializer_names = get_initializer_names(onnx_model)
assert len(initializer_names) == 4
assert 'param_bn_avg_mean' in initializer_names
assert 'param_bn_avg_var' in initializer_names
self.expect(
self.model, self.x, name=name, train=train,
custom_model_test_func=test_input_names)
class TestGroupNormalization(ONNXModelTest):
def get_model(self):
class Model(chainer.Chain):
def __init__(self):
super().__init__()
with self.init_scope():
self.gn = L.GroupNormalization(2)
def forward(self, x):
return self.gn(x)
return Model()
def test_output(self):
model = self.get_model()
x = np.zeros((10, 4, 256, 256), dtype=np.float32)
self.expect(model, x, train=True)
class TestBatchNormalizationFunction(ONNXModelTest):
def setUp(self):
class Model(chainer.Chain):
def __call__(self, x):
gamma = np.ones(x.shape[1:], dtype=x.dtype)
beta = np.zeros(x.shape[1:], dtype=x.dtype)
return F.batch_normalization(x, gamma, beta)
self.model = Model()
self.x = input_generator.increasing(2, 5)
def test_output(self):
def test_input_names(onnx_model, path):
initializer_names = get_initializer_names(onnx_model)
assert len(initializer_names) == 4
assert 'BatchNormalization_0_param_avg_mean' in initializer_names
assert 'BatchNormalization_0_param_avg_var' in initializer_names
self.expect(
self.model, self.x, custom_model_test_func=test_input_names)
class TestFixedBatchNormalizationFunctionImplicitInputs(ONNXModelTest):
def setUp(self):
class Model(chainer.Chain):
def __call__(self, x):
mean = x.array.mean(axis=0)
var = x.array.var(axis=0)
gamma = np.ones_like(mean, dtype=x.dtype)
beta = np.zeros_like(mean, dtype=x.dtype)
return F.fixed_batch_normalization(x, gamma, beta, mean, var)
self.model = Model()
self.x = input_generator.increasing(2, 5)
def test_output(self):
def test_input_names(onnx_model, path):
initializer_names = get_initializer_names(onnx_model)
assert len(initializer_names) == 4
assert 'FixedBatchNormalization_0_param_avg_mean' in\
initializer_names
assert 'FixedBatchNormalization_0_param_avg_var' in\
initializer_names
self.expect(
self.model, self.x, custom_model_test_func=test_input_names)
class TestFixedBatchNormalizationFunctionExplicitInputs(ONNXModelTest):
def setUp(self):
class Model(chainer.Chain):
def __call__(self, x, gamma, beta, mean, var):
return F.fixed_batch_normalization(x, gamma, beta, mean, var)
self.model = Model()
self.x = input_generator.increasing(2, 5)
self.mean = self.x.mean(axis=0)
self.var = self.x.var(axis=0)
self.gamma = np.ones_like(self.mean, dtype=self.x.dtype)
self.beta = np.zeros_like(self.mean, dtype=self.x.dtype)
def test_output(self):
self.expect(
self.model, [self.x, self.gamma, self.beta, self.mean, self.var])
| 5,991
| 30.046632
| 77
|
py
|
chainer
|
chainer-master/tests/onnx_chainer_tests/functions_tests/test_poolings.py
|
import chainer
import chainer.functions as F
from chainer import testing
import numpy as np
from onnx_chainer.testing import input_generator
from onnx_chainer_tests.helper import ONNXModelTest
@testing.parameterize(
{'op_name': 'average_pooling_2d',
'in_shape': (1, 3, 6, 6), 'args': [2, 1, 0], 'cover_all': None},
{'op_name': 'average_pooling_2d', 'condition': 'pad1',
'in_shape': (1, 3, 6, 6), 'args': [3, 2, 1], 'cover_all': None},
{'op_name': 'average_pooling_nd',
'in_shape': (1, 3, 6, 6, 6), 'args': [2, 1, 1], 'cover_all': None},
{'op_name': 'max_pooling_2d',
'in_shape': (1, 3, 6, 6), 'args': [2, 1, 1], 'cover_all': False},
{'op_name': 'max_pooling_2d', 'condition': 'coverall',
'in_shape': (1, 3, 6, 5), 'args': [3, (2, 1), 1], 'cover_all': True},
{'op_name': 'max_pooling_nd',
'in_shape': (1, 3, 6, 6, 6), 'args': [2, 1, 1], 'cover_all': False},
{'op_name': 'max_pooling_nd', 'condition': 'coverall',
'in_shape': (1, 3, 6, 5, 4), 'args': [3, 2, 1], 'cover_all': True},
{'op_name': 'unpooling_2d',
'in_shape': (1, 3, 6, 6), 'args': [3, None, 0], 'cover_all': False},
# TODO(disktnk): when cover_all=True, interpolation between Chainer and
# ONNXRuntime does not match, so skip output value check.
{'op_name': 'unpooling_2d', 'condition': 'coverall',
'in_shape': (1, 3, 6, 6), 'args': [3, None, 0], 'cover_all': True,
'skip_check_ver': True},
)
class TestPoolings(ONNXModelTest):
def setUp(self):
ops = getattr(F, self.op_name)
self.model = Model(ops, self.args, self.cover_all)
self.x = input_generator.increasing(*self.in_shape)
def test_output(self):
name = self.op_name
if hasattr(self, 'condition'):
name += '_' + self.condition
skip_out_check = getattr(self, 'skip_check_ver', None)
if skip_out_check is not None:
skip_out_check = self.target_opsets
self.expect(
self.model, self.x, name=name,
skip_outvalue_version=skip_out_check, expected_num_initializers=0)
class Model(chainer.Chain):
def __init__(self, ops, args, cover_all):
super(Model, self).__init__()
self.ops = ops
self.args = args
self.cover_all = cover_all
def __call__(self, x):
if self.cover_all is not None:
return self.ops(*([x] + self.args), cover_all=self.cover_all)
else:
return self.ops(*([x] + self.args))
class TestROIPooling2D(ONNXModelTest):
def setUp(self):
# these parameters are referenced from chainer test
in_shape = (3, 3, 12, 8)
self.x = input_generator.positive_increasing(*in_shape)
# In chainer test, x is shuffled and normalize-like conversion,
# In this test, those operations are skipped.
# If x includes negative value, not match with onnxruntime output.
# You can reproduce this issue by changing `positive_increasing` to
# `increase`
self.rois = np.array([
[0, 1, 1, 6, 6],
[2, 6, 2, 7, 11],
[1, 3, 1, 5, 10],
[0, 3, 3, 3, 3]], dtype=np.float32)
kwargs = {
'outh': 3,
'outw': 7,
'spatial_scale': 0.6
}
class Model(chainer.Chain):
def __init__(self, kwargs):
super(Model, self).__init__()
self.kwargs = kwargs
def __call__(self, x, rois):
return F.roi_pooling_2d(x, rois, **self.kwargs)
self.model = Model(kwargs)
def test_output(self):
with testing.assert_warns(UserWarning):
self.expect(self.model, [self.x, self.rois])
| 3,725
| 35.529412
| 78
|
py
|
chainer
|
chainer-master/tests/onnx_chainer_tests/functions_tests/test_connections.py
|
import chainer
import chainer.links as L
from chainer import testing
import numpy as np
from onnx_chainer.testing import input_generator
from onnx_chainer_tests.helper import ONNXModelTest
@testing.parameterize(
# Convolution2D
{'link': L.Convolution2D, 'in_shape': (1, 3, 5, 5), 'in_type': np.float32,
'args': [None, 3, 3, 1, 1],
'kwargs': {}},
{'link': L.Convolution2D, 'in_shape': (1, 3, 5, 5), 'in_type': np.float32,
'args': [None, 3, 3, 1, 2, True],
'kwargs': {}, 'name': 'Convolution2D_pad2_bias'},
{'link': L.Convolution2D, 'in_shape': (1, 3, 5, 5), 'in_type': np.float32,
'args': [None, 3, 3, 1, 1],
'kwargs': {'groups': 3}, 'name': 'Convolution2D_groups3'},
# ConvolutionND
{'link': L.ConvolutionND, 'in_shape': (1, 2, 3, 5), 'in_type': np.float32,
'args': [2, 2, 4, 3, 1, 0],
'kwargs': {}},
{'link': L.ConvolutionND, 'in_shape': (1, 2, 3, 5), 'in_type': np.float32,
'args': [2, 2, 4, 3, 1, 0, True],
'kwargs': {}, 'name': 'ConvolutionND_bias'},
{'link': L.ConvolutionND, 'in_shape': (1, 3, 5, 5, 5),
'in_type': np.float32,
'args': [3, 3, 4, 3, 1, 0],
'kwargs': {}, 'name': 'ConvolutionND_ndim3'},
{'link': L.ConvolutionND, 'in_shape': (1, 6, 5, 5, 5),
'in_type': np.float32, 'args': [3, 6, 4, 3, 1, 0],
'kwargs': {'groups': 2}, 'name': 'ConvolutionND_group2'},
# DilatedConvolution2D
{'link': L.DilatedConvolution2D, 'in_shape': (1, 3, 5, 5),
'in_type': np.float32, 'args': [None, 3, 3, 1, 1, 2],
'kwargs': {}},
{'link': L.DilatedConvolution2D, 'in_shape': (1, 3, 5, 5),
'in_type': np.float32, 'args': [None, 3, 3, 1, 1, 2, True],
'kwargs': {}, 'name': 'DilatedConvolution2D_bias'},
# Deconvolution2D
{'link': L.Deconvolution2D, 'in_shape': (1, 3, 5, 5),
'in_type': np.float32, 'args': [None, 3, 4, 2, 0],
'kwargs': {}},
{'link': L.Deconvolution2D, 'in_shape': (1, 3, 5, 5),
'in_type': np.float32, 'args': [None, 3, 4, 2, 0, True],
'kwargs': {}, 'name': 'Deconvolution2D_bias'},
{'link': L.Deconvolution2D, 'in_shape': (1, 4, 5, 5),
'in_type': np.float32, 'args': [None, 6, 2, 2, 0, True],
'kwargs': {'groups': 2}, 'name': 'Deconvolution2D_group3'},
# DeconvolutionND
# NOTE(disktnk): ONNX runtime accepts only 4-dimensional input X
{'link': L.DeconvolutionND, 'in_shape': (1, 3, 5, 5),
'in_type': np.float32, 'args': [2, 3, 3, 2, 2, 0, True],
'kwargs': {}, 'name': 'DeconvolutionND'},
{'link': L.DeconvolutionND, 'in_shape': (1, 6, 5, 5),
'in_type': np.float32, 'args': [2, 6, 4, 2, 2, 0, True],
'kwargs': {'groups': 2}, 'name': 'DeconvolutionND_group3'},
# EmbedID
{'link': L.EmbedID, 'in_shape': (1, 10), 'in_type': np.int,
'args': [5, 8],
'kwargs': {}},
# Linear
{'link': L.Linear, 'in_shape': (1, 10), 'in_type': np.float32,
'args': [None, 8],
'kwargs': {}},
{'link': L.Linear, 'in_shape': (1, 10), 'in_type': np.float32,
'args': [None, 8, True],
'kwargs': {}, 'name': 'Linear_bias'},
)
class TestConnections(ONNXModelTest):
def setUp(self):
class Model(chainer.Chain):
def __init__(self, link, args, kwargs):
super(Model, self).__init__()
with self.init_scope():
self.l1 = link(*args, **kwargs)
def __call__(self, x):
return self.l1(x)
self.model = Model(self.link, self.args, self.kwargs)
if self.link is L.EmbedID:
self.x = np.random.randint(0, self.args[0], size=self.in_shape)
self.x = self.x.astype(self.in_type)
else:
self.x = input_generator.increasing(
*self.in_shape, dtype=self.in_type)
def test_output(self):
name = self.link.__name__.lower()
if hasattr(self, 'name'):
name = self.name.lower()
self.expect(self.model, self.x, name=name)
| 3,974
| 36.857143
| 78
|
py
|
chainer
|
chainer-master/tests/onnx_chainer_tests/functions_tests/test_rnn.py
|
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import testing
import numpy as np
from onnx_chainer import onnx_helper
from onnx_chainer.testing import input_generator
from onnx_chainer_tests.helper import ONNXModelTest
@testing.parameterize(
{'n_layers': 1, 'name': 'n_step_gru_1_layer'},
{'n_layers': 2, 'name': 'n_step_gru_2_layer'},
)
class TestNStepGRU(ONNXModelTest):
def test_output(self):
n_layers = self.n_layers
dropout_ratio = 0.0
batch_size = 3
input_size = 4
hidden_size = 5
seq_length = 6
class Model(chainer.Chain):
def __init__(self):
super().__init__()
def __call__(self, hx, ws1, ws2, ws3, bs, xs):
ws = [F.separate(ws1) + F.separate(ws2)]
if n_layers > 1:
ws.extend([F.separate(w) for w in F.separate(ws3)])
bs = [F.separate(b) for b in F.separate(bs)]
xs = F.separate(xs)
hy, ys = F.n_step_gru(n_layers, dropout_ratio,
hx, ws, bs, xs)
return hy, F.stack(ys, axis=0)
model = Model()
hx = input_generator.increasing(n_layers, batch_size, hidden_size)
ws1 = input_generator.increasing(3, hidden_size, input_size)
ws2 = input_generator.increasing(3, hidden_size, hidden_size)
ws3 = input_generator.increasing(
n_layers - 1, 6, hidden_size, hidden_size)
bs = input_generator.increasing(n_layers, 6, hidden_size)
xs = input_generator.increasing(seq_length, batch_size, input_size)
self.expect(model, (hx, ws1, ws2, ws3, bs, xs))
def convert_Permutate(params):
gb = onnx_helper.GraphBuilder()
# indices_name = params.context.get_name(func.indices)
indices_name = params.context.add_const(params.func.indices,
'indices') # XXX
if params.func.inv:
empty = params.context.add_const(
np.zeros(dtype=np.int64, shape=params.func.indices.shape), 'empty')
r = params.context.add_const(
np.arange(len(params.func.indices), dtype=np.int64),
'range')
op = 'ScatterElements' if params.opset_version == 11 else 'Scatter'
indices_name = gb.op(op, [empty, indices_name, r])
params.input_names.append(indices_name)
gb.op_output_named('Gather', params.input_names, params.output_names,
axis=params.func.axis)
return gb.nodes()
@testing.parameterize(
{'n_layers': 1, 'name': 'TestNStepGRU_1_layer'},
{'n_layers': 2, 'name': 'TestNStepGRU_2_layer'},
)
class TestNStepGRULink(ONNXModelTest):
def test_output(self):
n_layers = self.n_layers
dropout_ratio = 0.0
batch_size = 3
input_size = 4
hidden_size = 5
seq_length = 6
class Model(chainer.Chain):
def __init__(self):
super().__init__()
with self.init_scope():
self.gru = L.NStepGRU(
n_layers, input_size, hidden_size, dropout_ratio)
def __call__(self, *xs):
hy, ys = self.gru(None, xs)
return [hy] + ys
model = Model()
xs = [input_generator.increasing(seq_length, input_size)
for i in range(batch_size)]
# NOTE(msakai): Replace Permutate converter for avoiding error like:
# ValidationError: Nodes in a graph must be topologically sorted, \
# however input 'v330' of node:
# input: "Permutate_0_const_empty" input: "v330" \
# input: "Permutate_0_const_range" output: "Permutate_0_tmp_0" \
# name: "Permutate_0_tmp_0" op_type: "Scatter"
# is not output of any previous nodes.
addon_converters = {
'Permutate': convert_Permutate,
}
self.expect(model, xs, skip_opset_version=[7, 8],
external_converters=addon_converters)
| 4,059
| 35.576577
| 79
|
py
|
chainer
|
chainer-master/tests/onnx_chainer_tests/functions_tests/test_noises.py
|
import chainer
import chainer.functions as F
from chainer import testing
import numpy as np
from onnx_chainer_tests.helper import ONNXModelTest
@testing.parameterize(
{'name': 'dropout', 'ops': lambda x: F.dropout(x, ratio=0.5)},
)
class TestNoises(ONNXModelTest):
def setUp(self):
class Model(chainer.Chain):
def __init__(self, ops):
super(Model, self).__init__()
self.ops = ops
def __call__(self, x):
with chainer.using_config('train', True):
y = self.ops(x)
return y
self.model = Model(self.ops)
self.x = np.zeros((1, 5), dtype=np.float32)
def test_output(self):
self.expect(self.model, self.x, name=self.name)
| 775
| 23.25
| 66
|
py
|
chainer
|
chainer-master/tests/onnx_chainer_tests/functions_tests/test_activations.py
|
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import testing
from onnx_chainer.testing import input_generator
from onnx_chainer_tests.helper import ONNXModelTest
@testing.parameterize(
{'name': 'clipped_relu'},
{'name': 'elu'},
{'name': 'hard_sigmoid'},
{'name': 'leaky_relu'},
{'name': 'log_softmax'},
{'name': 'log_softmax',
'args': {'axis': 0}, 'test_name': 'log_softmax_axis0'},
{'name': 'log_softmax',
'args': {'axis': 2}, 'test_name': 'log_softmax_axis2'},
{'name': 'relu'},
{'name': 'selu'},
{'name': 'sigmoid'},
{'name': 'softmax'},
{'name': 'softmax',
'args': {'axis': 0}, 'test_name': 'softmax_axis0'},
{'name': 'softmax',
'args': {'axis': 2}, 'test_name': 'softmax_axis2'},
{'name': 'softplus'},
{'name': 'tanh'},
)
class TestActivations(ONNXModelTest):
def setUp(self):
class Model(chainer.Chain):
def __init__(self, ops, args):
super(Model, self).__init__()
self.ops = ops
self.args = args
def __call__(self, x):
return self.ops(x, **self.args)
ops = getattr(F, self.name)
args = {}
if hasattr(self, 'args'):
args = self.args
self.model = Model(ops, args)
self.x = input_generator.increasing(2, 5, 3)
def test_output(self):
test_name = getattr(self, 'test_name', self.name)
self.expect(self.model, self.x, test_name)
class TestPReLU(ONNXModelTest):
def setUp(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
with self.init_scope():
self.prelu = L.PReLU()
def __call__(self, x):
return self.prelu(x)
self.model = Model()
self.x = input_generator.increasing(2, 5)
def test_output(self):
self.expect(self.model, self.x)
| 2,005
| 25.394737
| 60
|
py
|
chainer
|
chainer-master/tests/onnx_chainer_tests/functions_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/onnx_chainer_tests/functions_tests/test_maths.py
|
import chainer
import chainer.functions as F
from chainer import testing
from chainer.testing import attr
import numpy as np
import pytest
from onnx_chainer.testing import input_generator
from onnx_chainer_tests.helper import ONNXModelTest
@testing.parameterize(
{'op_name': 'Neg', 'ops': '-a'},
{'op_name': 'Absolute', 'ops': 'abs(a)'},
{'op_name': 'Arccos', 'ops': 'chainer.functions.arccos(a)'},
{'op_name': 'Arcsin', 'ops': 'chainer.functions.arcsin(a)'},
{'op_name': 'Arctan', 'ops': 'chainer.functions.arctan(a)'},
{'op_name': 'Cos', 'ops': 'chainer.functions.cos(a)'},
{'op_name': 'Cosh', 'ops': 'chainer.functions.cosh(a)'},
{'op_name': 'Clip', 'ops': 'chainer.functions.clip(a, 0.1, 0.2)'},
{'op_name': 'Exp', 'ops': 'chainer.functions.exp(a)'},
{'op_name': 'Sqrt', 'ops': 'chainer.functions.sqrt(a)'},
{'op_name': 'RSqrt', 'ops': 'chainer.functions.rsqrt(a)'},
{'op_name': 'PowVarConst',
'ops': 'a ** 2.3'},
{'op_name': 'PowConstVar',
'ops': '2.3 ** a'},
{'op_name': 'Sum', 'ops': 'chainer.functions.sum(a)'},
{'op_name': 'Sum', 'ops': 'chainer.functions.sum(a, axis=1)',
'condition': 'axis1'},
{'op_name': 'Sum', 'ops': 'chainer.functions.sum(a, keepdims=True)',
'condition': 'keepdims'},
{'op_name': 'AddConstant', 'ops': 'a + 1'},
{'op_name': 'MulConstant', 'ops': 'a * 2'},
{'op_name': 'SubFromConstant', 'ops': '1 - a'},
{'op_name': 'DivFromConstant', 'ops': '2 / a'},
{'op_name': 'Max', 'ops': 'chainer.functions.max(a)'},
{'op_name': 'Max', 'ops': 'chainer.functions.max(a, axis=0)',
'condition': 'axis0'},
{'op_name': 'Max', 'ops': 'chainer.functions.max(a, keepdims=True)',
'condition': 'keepdims'},
{'op_name': 'Mean', 'ops': 'chainer.functions.mean(a)'},
{'op_name': 'Mean', 'ops': 'chainer.functions.mean(a, axis=0)',
'condition': 'axis0'},
{'op_name': 'Mean', 'ops': 'chainer.functions.mean(a, keepdims=True)',
'condition': 'keepdims'},
{'op_name': 'Min', 'ops': 'chainer.functions.min(a)'},
{'op_name': 'Min', 'ops': 'chainer.functions.min(a, axis=0)',
'condition': 'axis0'},
{'op_name': 'Min', 'ops': 'chainer.functions.min(a, keepdims=True)',
'condition': 'keepdims'},
{'op_name': 'Prod', 'ops': 'chainer.functions.prod(a)'},
{'op_name': 'Prod', 'ops': 'chainer.functions.prod(a, axis=0)',
'condition': 'axis0'},
{'op_name': 'Prod', 'ops': 'chainer.functions.prod(a, keepdims=True)',
'condition': 'keepdims'},
{'op_name': 'Log', 'ops': 'chainer.functions.log(a)'},
{'op_name': 'LogSumExp', 'ops': 'chainer.functions.logsumexp(a)'},
{'op_name': 'LogSumExp', 'ops': 'chainer.functions.logsumexp(a, axis=0)',
'condition': 'axis0'},
{'op_name': 'Sin', 'ops': 'chainer.functions.sin(a)'},
{'op_name': 'Sinh', 'ops': 'chainer.functions.sinh(a)'},
{'op_name': 'Square', 'ops': 'chainer.functions.square(a)'},
{'op_name': 'Tan', 'ops': 'chainer.functions.tan(a)'},
{'op_name': 'BroadcastTo',
'ops': 'chainer.functions.broadcast_to(a, (2,2,3))'},
{'op_name': 'Sign', 'ops': 'chainer.functions.sign(a)'},
)
class TestUnaryMathOperators(ONNXModelTest):
@pytest.fixture(autouse=True)
def setup_test_case(self):
class Model(chainer.Chain):
def __init__(self, ops):
super(Model, self).__init__()
self.ops = ops
def __call__(self, a):
if not isinstance(a, chainer.Variable):
a = chainer.Variable(a)
return eval(self.ops)
self.model = Model(self.ops)
self.a = input_generator.positive_increasing(2, 3) / 10.0
name = self.op_name.lower()
if hasattr(self, 'condition'):
name += '_' + self.condition
self.name = name
skip_opset_version = []
if self.op_name == 'Cosh' or self.op_name == 'Sinh' or\
self.op_name == 'Sign':
skip_opset_version.append(7)
skip_opset_version.append(8)
if self.op_name == 'BroadcastTo':
skip_opset_version.append(7)
self.skip_opset_version = skip_opset_version
def test_output(self):
self.expect(self.model, self.a, name=self.name,
skip_opset_version=self.skip_opset_version,
expected_num_initializers=0)
@attr.gpu
def test_output_gpu(self):
model, a = self.to_gpu(self.model, self.a)
# test outputs are overwritten
self.expect(model, a, name=self.name,
skip_opset_version=self.skip_opset_version,
expected_num_initializers=0)
@testing.parameterize(
{'op_name': 'Add', 'ops': 'a + b'},
{'op_name': 'Sub', 'ops': 'a - b'},
{'op_name': 'Mul', 'ops': 'a * b'},
{'op_name': 'Div', 'ops': 'a / b'},
{'op_name': 'MatMul_transa',
'ops': 'chainer.functions.matmul(a, b, transa=True)'},
{'op_name': 'MatMul_transb',
'ops': 'chainer.functions.matmul(a, b, transb=True)'},
{'op_name': 'Maximum', 'ops': 'chainer.functions.maximum(a, b)'},
{'op_name': 'Minimum', 'ops': 'chainer.functions.minimum(a, b)'},
{'op_name': 'PowVarVar', 'ops': 'a ** b'},
)
class TestBinaryMathOperators(ONNXModelTest):
def get_model(self):
class Model(chainer.Chain):
def __init__(self, ops):
super(Model, self).__init__()
self.ops = ops
def __call__(self, a, b):
return eval(self.ops)
return Model(self.ops)
def test_output(self):
test_cases = ['matrix', 'vector']
if not self.op_name.startswith('MatMul'):
test_cases.append('scalar')
for tc in test_cases:
xs = getattr(self, tc)()
name = '{}_{}'.format(self.op_name.lower(), tc)
self.expect(
self.get_model(), xs, name=name, expected_num_initializers=0)
def matrix(self):
a = chainer.Variable(input_generator.positive_increasing(5, 2, 3))
b = chainer.Variable(input_generator.nonzero_increasing(5, 2, 3) * 0.3)
return (a, b)
def vector(self):
a = chainer.Variable(input_generator.positive_increasing(2,))
b = chainer.Variable(input_generator.nonzero_increasing(2,) * 0.3)
return (a, b)
def scalar(self):
a = chainer.Variable(np.array(7, dtype=np.float32))
b = chainer.Variable(np.array(2, dtype=np.float32))
return (a, b)
@testing.parameterize(
{'op_name': 'LinearInterpolate',
'ops': 'chainer.functions.linear_interpolate(a, b, c)'},
)
class TestTernaryMathOperators(ONNXModelTest):
def setUp(self):
class Model(chainer.Chain):
def __init__(self, ops):
super(Model, self).__init__()
self.ops = ops
def __call__(self, a, b, c):
if not isinstance(a, chainer.Variable):
a = chainer.Variable(a)
if not isinstance(b, chainer.Variable):
b = chainer.Variable(b)
if not isinstance(c, chainer.Variable):
c = chainer.Variable(c)
return eval(self.ops)
self.model = Model(self.ops)
a = chainer.Variable(input_generator.increasing(2, 3))
b = chainer.Variable(input_generator.increasing(2, 3) * 0.3)
c = chainer.Variable(input_generator.increasing(2, 3) * 0.7)
self.x = (a, b, c)
def test_output(self):
name = self.op_name.lower()
self.expect(self.model, self.x, name=name,
expected_num_initializers=0)
@testing.parameterize(
{'op': 'argmax'},
{'op': 'argmax', 'axis': 0},
{'op': 'argmax', 'axis': 1},
{'op': 'argmin'},
{'op': 'argmin', 'axis': 0},
{'op': 'argmin', 'axis': 1},
)
class TestArgMaxArgMin(ONNXModelTest):
def test_output(self):
class Model(chainer.Chain):
def __init__(self, op, axis):
self.axis = axis
self.op = getattr(F, op)
super(Model, self).__init__()
def forward(self, x):
return self.op(x, axis=self.axis)
x = np.random.rand(2, 3).astype(np.float32)
axis = getattr(self, 'axis', None)
model = Model(self.op, axis)
name = self.op
if axis is not None:
name += '_axis_{}'.format(self.axis)
self.expect(model, x, name=name, expected_num_initializers=0)
| 8,544
| 35.517094
| 79
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/conftest.py
|
import multiprocessing
import platform
import pytest
def dummy_func():
pass
@pytest.fixture(scope='session', autouse=True)
def scope_session():
if int(platform.python_version_tuple()[0]) >= 3:
multiprocessing.set_start_method('forkserver')
p = multiprocessing.Process(target=dummy_func)
p.start()
p.join()
yield
| 360
| 19.055556
| 54
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/communicator_tests/test_mpi.py
|
import os
try:
import queue
except ImportError:
import Queue as queue
import re
import signal
from subprocess import CalledProcessError
from subprocess import PIPE
from subprocess import Popen
import sys
import threading
import unittest
import numpy as np
import pytest
import chainer.testing
import chainer.testing.attr
import chainermn
from chainermn.communicators import _memory_utility
class _TimeoutThread(threading.Thread):
def __init__(self, queue, rank):
super(_TimeoutThread, self).__init__()
self.queue = queue
self.rank = rank
def run(self):
try:
self.queue.get(timeout=60)
except queue.Empty:
# Show error message and information of the problem
try:
p = Popen(['ompi_info', '--all', '--parsable'], stdout=PIPE)
out, err = p.communicate()
if type(out) == bytes:
out = out.decode('utf-8')
m = re.search(r'ompi:version:full:(\S+)', out)
version = m.group(1)
msg = "\n\n" \
"***** ERROR: bcast test deadlocked. " \
"One of the processes " \
"***** crashed or you encountered a known bug of " \
"Open MPI." \
"***** The following Open MPI versions have a bug\n" \
"***** that cause MPI_Bcast() deadlock " \
"when GPUDirect is used:\n" \
"***** 3.0.0, 3.0.1, 3.0.2, 3.1.0, 3.1.1, 3.1.2\n" \
"***** Your Open MPI version: {}\n".format(version)
if self.rank == 1:
# Rank 1 prints the error message.
# This is because rank 0 is the root of Bcast(), and it
# may finish Bcast() immediately
# without deadlock, depending on the timing.
print(msg.format(version))
sys.stdout.flush()
os.kill(os.getpid(), signal.SIGKILL)
except CalledProcessError:
pass
class TestBcastDeadlock(unittest.TestCase):
def setup(self, gpu):
if gpu:
self.communicator = chainermn.create_communicator('flat')
self.device = self.communicator.intra_rank
chainer.cuda.get_device_from_id(self.device).use()
else:
self.device = -1
if self.communicator.size < 2:
pytest.skip('This test is for at least two processes')
self.queue = queue.Queue(maxsize=1)
def tearDown(self):
pass
@chainer.testing.attr.gpu
def test_bcast_gpu_large_buffer_deadlock(self):
"""Regression test of Open MPI's issue #3972"""
self.setup(True)
buf_size = 10000
mpi_comm = self.communicator.mpi_comm
if self.communicator.rank == 0:
array = np.arange(buf_size, dtype=np.float32)
else:
array = np.empty(buf_size, dtype=np.float32)
array = chainer.cuda.to_gpu(array, device=self.device)
ptr = _memory_utility.array_to_buffer_object(array)
# This Bcast() cause deadlock if the underlying MPI has the bug.
th = _TimeoutThread(self.queue, self.communicator.rank)
th.start()
mpi_comm.Bcast(ptr, root=0)
mpi_comm.barrier()
self.queue.put(True)
assert True
| 3,453
| 31.584906
| 76
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/communicator_tests/test_communicator.py
|
import mock
import mpi4py.MPI
import numpy as np
import pytest
import unittest
import chainer
import chainer.initializers
import chainer.links
import chainer.testing
import chainer.testing.attr
import chainermn
import chainerx
from chainermn.communicators import _communication_utility
from chainermn.communicators.flat_communicator \
import FlatCommunicator
from chainermn.communicators.naive_communicator \
import NaiveCommunicator
from chainermn.communicators.non_cuda_aware_communicator \
import NonCudaAwareCommunicator
from chainermn.communicators.pure_nccl_communicator \
import PureNcclCommunicator
from chainermn import nccl
import chainermn.testing
class ExampleModel(chainer.Chain):
def __init__(self, dtype=None):
W = None
bias = None
if dtype is not None:
self.dtype = dtype
W = chainer.initializers.Normal(dtype=self.dtype)
bias = chainer.initializers.Zero(dtype=self.dtype)
super(ExampleModel, self).__init__()
with self.init_scope():
self.a = chainer.links.Linear(2, 3, initialW=W, initial_bias=bias)
self.b = chainer.links.Linear(3, 4, initialW=W, initial_bias=bias)
self.c = chainer.links.Linear(None, 5, initialW=W,
initial_bias=bias)
class ExampleMixedModel(chainer.Chain):
def __init__(self):
W16 = chainer.initializers.Normal(dtype=np.float16)
W32 = chainer.initializers.Normal(dtype=np.float32)
bias16 = chainer.initializers.Zero(dtype=np.float16)
bias32 = chainer.initializers.Zero(dtype=np.float32)
super(ExampleMixedModel, self).__init__()
with self.init_scope():
self.a = chainer.links.Linear(2, 3, initialW=W32,
initial_bias=bias32)
self.b = chainer.links.Linear(3, 4, initialW=W16,
initial_bias=bias16)
self.c = chainer.links.Linear(None, 5, initialW=W16,
initial_bias=bias32)
class Param(object):
def __init__(self, param):
self.gpu = False
self.nccl1 = False
self.model_dtype = None
self.allreduce_grad_dtype = None
self.batched_copy = True
self.global_dtype = None
self.__dict__.update(param)
def __repr__(self):
import pprint
return pprint.pformat(self.__dict__)
cpu_params = [Param(p) for p in [
{
'communicator_class': NaiveCommunicator,
'multi_node': True,
}]]
gpu_params = [Param(p) for p in [
{
'communicator_class': NaiveCommunicator,
'multi_node': True,
}, {
'communicator_class': NaiveCommunicator,
'model_dtype': np.float16,
'multi_node': True,
}, {
'communicator_class': FlatCommunicator,
'multi_node': True,
}, {
'communicator_class': FlatCommunicator,
'model_dtype': np.float16,
'multi_node': True,
}, {
'communicator_class': NonCudaAwareCommunicator,
'multi_node': True,
}, {
'communicator_class': NonCudaAwareCommunicator,
'model_dtype': np.float16,
'multi_node': False,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'allreduce_grad_dtype': np.float16,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'model_dtype': np.float16,
'allreduce_grad_dtype': np.float16,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'model_dtype': np.float64,
'allreduce_grad_dtype': np.float64,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'model_dtype': np.float16,
'allreduce_grad_dtype': np.float16,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'model_dtype': np.float16,
'allreduce_grad_dtype': np.float32,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'model_dtype': np.float32,
'allreduce_grad_dtype': np.float32,
}, {
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'nccl1': False,
'model_dtype': np.float32,
'allreduce_grad_dtype': np.float16,
}]]
gpu_mixed_dtype_params = [Param(p) for p in [
{
'communicator_class': NonCudaAwareCommunicator,
'multi_node': True,
}, {
'communicator_class': NaiveCommunicator,
'multi_node': True,
}, {
'communicator_class': FlatCommunicator,
'multi_node': True,
}
]]
for global_dtype in [np.float32, np.float16, chainer.mixed16, None]:
for allreduce_dtype in [np.float32, np.float16, None]:
if global_dtype is None and allreduce_dtype is None:
continue
for batched_copy in [True, False]:
gpu_mixed_dtype_params.append(Param({
'communicator_class': PureNcclCommunicator,
'multi_node': True,
'global_dtype': global_dtype,
'allreduce_grad_dtype': allreduce_dtype,
'batched_copy': batched_copy,
}))
mpi_comm = mpi4py.MPI.COMM_WORLD
def create_communicator(param, use_gpu, use_chx):
if not param.multi_node:
ranks = _communication_utility.init_ranks(mpi_comm)
inter_size = ranks[4]
if inter_size > 1:
pytest.skip('This test is for single node only')
if use_gpu and not param.nccl1 and nccl.get_build_version() < 2000:
pytest.skip('This test requires NCCL version >= 2.0')
communicator = param.communicator_class(mpi_comm)
communicator.set_config('batched_copy', param.batched_copy)
value = communicator.get_config('batched_copy')
assert param.batched_copy == value
with pytest.raises(ValueError):
communicator.set_config('blah blah blah')
if param.communicator_class is PureNcclCommunicator:
communicator.set_config('allreduce_grad_dtype',
param.allreduce_grad_dtype)
value = communicator.get_config('allreduce_grad_dtype')
assert param.allreduce_grad_dtype == value
if use_gpu:
chainermn.testing.get_device(communicator.intra_rank, use_chx).use()
return communicator
def check_send_and_recv(communicator, *shape):
if communicator.size < 2:
pytest.skip('This test is for multiple nodes')
if communicator.rank > 0:
rank_prev = (communicator.rank - 1) % communicator.size
data_recv = communicator.recv(source=rank_prev, tag=0)
chainer.testing.assert_allclose(
data_recv, rank_prev * np.ones((shape)))
if communicator.rank < communicator.size - 1:
rank_next = (communicator.rank + 1) % communicator.size
data_send = communicator.rank * \
np.ones((shape)).astype(np.float32)
communicator.send(data_send, dest=rank_next, tag=0)
def check_send_and_recv_tuple(communicator, data):
if communicator.size < 2:
pytest.skip('This test is for multiple nodes')
if communicator.rank > 0:
rank_prev = (communicator.rank - 1) % communicator.size
data_recv = communicator.recv(source=rank_prev, tag=0)
for array0, array1 in zip(data, data_recv):
chainer.testing.assert_allclose(array0, array1)
if communicator.rank < communicator.size - 1:
rank_next = (communicator.rank + 1) % communicator.size
communicator.send(data, dest=rank_next, tag=0)
def check_bcast_data(communicator, model):
model.a.W.data[:] = communicator.rank
model.b.W.data[:] = communicator.rank + 1
model.c.b.data[:] = communicator.rank + 2
communicator.bcast_data(model)
chainer.testing.assert_allclose(model.a.W.data, 0 * np.ones((3, 2)))
chainer.testing.assert_allclose(model.b.W.data, 1 * np.ones((4, 3)))
chainer.testing.assert_allclose(model.c.b.data, 2 * np.ones((5, )))
def check_multi_node_mean_grad(communicator, model):
# We need to repeat twice for regressions on lazy initialization of
# sub communicators.
for _ in range(2):
model.a.W.grad[:] = communicator.rank
model.b.W.grad[:] = communicator.rank + 1
model.c.b.grad[:] = communicator.rank + 2
communicator.multi_node_mean_grad(model)
base = (communicator.size - 1.0) / 2
chainer.testing.assert_allclose(model.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(model.b.W.grad,
(base + 1) * np.ones((4, 3)))
chainer.testing.assert_allclose(model.c.b.grad,
(base + 2) * np.ones((5, )))
def check_multi_node_mean_grad_empty(communicator, model):
# We need to repeat twice for regressions on lazy initialization of
# sub communicators.
for _ in range(2):
model.a.W.grad[:] = communicator.rank
model.b.W.grad[:] = communicator.rank + 1
model.c.b.grad = None
communicator.multi_node_mean_grad(model)
base = (communicator.size - 1.0) / 2
chainer.testing.assert_allclose(model.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(model.b.W.grad,
(base + 1) * np.ones((4, 3)))
def check_multi_node_mean_grad_empty_half(communicator, model):
# We need to repeat twice for regressions on lazy initialization of
# sub communicators.
for _ in range(2):
model.a.W.data[:] = communicator.rank
model.b.W.data[:] = communicator.rank + 1
model.c.b.data[:] = communicator.rank + 2
model.a.W.grad[:] = communicator.rank
model.b.W.grad[:] = communicator.rank + 1
if communicator.rank % 2 == 0:
model.c.b.grad[:] = communicator.rank + 2
else:
model.c.b.grad = None
communicator.multi_node_mean_grad(model, zero_fill=True)
base = (communicator.size - 1.0) / 2
chainer.testing.assert_allclose(model.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(model.b.W.grad,
(base + 1) * np.ones((4, 3)))
v = 0.0
for i in range(communicator.size):
if i % 2 == 0:
v += i + 2
v /= communicator.size
chainer.testing.assert_allclose(model.c.b.grad,
v * np.ones((5, )))
def check_send_recv(param, use_gpu, use_chx=False):
communicator = create_communicator(param, use_gpu, use_chx)
assert mpi_comm.Get_rank() == communicator.rank
assert mpi_comm.Get_size() == communicator.size
check_send_and_recv(communicator, 50)
check_send_and_recv(communicator, 50, 20)
check_send_and_recv(communicator, 50, 20, 5)
check_send_and_recv(communicator, 50, 20, 5, 3)
data = [np.ones((50)).astype(np.float32)]
check_send_and_recv_tuple(communicator, data)
data = [
np.ones((50)).astype(np.float32),
np.ones((50, 20)).astype(np.float32),
np.ones((50, 20, 5)).astype(np.float32)]
check_send_and_recv_tuple(communicator, data)
communicator.finalize()
def check_multi_node_mean_grad_mixed_dtype(param, model, use_gpu, use_chx):
# Checks the actual allreduce communication is performed
# in the correct data type (FP16 or FP32)
comm_class = param.communicator_class
if not param.multi_node:
ranks = _communication_utility.init_ranks(mpi_comm)
inter_size = ranks[4]
if inter_size > 1:
pytest.skip('This test is for single node only')
communicator = comm_class(mpi_comm)
communicator.set_config('batched_copy', param.batched_copy)
if comm_class is PureNcclCommunicator:
communicator.set_config('allreduce_grad_dtype',
param.allreduce_grad_dtype)
value = communicator.get_config('allreduce_grad_dtype')
assert param.allreduce_grad_dtype == value
value = communicator.allreduce_grad_dtype
assert param.allreduce_grad_dtype == value
mpi_comm.barrier()
# answer type: see the document of `create_communicator`
global_dtype = param.global_dtype
allreduce_dtype = param.allreduce_grad_dtype
# assert test configuration.
assert chainer.get_dtype() == global_dtype
answer_dtype = None
if allreduce_dtype == np.float16:
answer_dtype = np.float16
elif allreduce_dtype == np.float32:
answer_dtype = np.float32
else:
if global_dtype == np.float32:
answer_dtype = np.float32
else:
answer_dtype = np.float16
if use_gpu:
device = chainermn.testing.get_device(communicator.intra_rank,
use_chainerx=use_chx)
model.to_device(device)
model.a.W.grad[:] = communicator.rank
model.b.W.grad[:] = communicator.rank + 1
model.c.b.grad[:] = communicator.rank + 2
if isinstance(communicator, PureNcclCommunicator):
communicator._init_comms()
with mock.patch.object(communicator, 'nccl_comm',
wraps=communicator.nccl_comm) as mc:
answer_dtype = _communication_utility._get_nccl_type_id(
answer_dtype)
communicator.multi_node_mean_grad(model)
# dtype that was used in the actual communication,
# which is nccl_comm.allReduce
call_args = mc.allReduce.call_args[0]
actual_dtype = call_args[3]
assert answer_dtype == actual_dtype
else:
# For other MPI-based communicators,
# all communication should happen in FP32 as of now, so
# here we just check the results are correct for
# 16-32 mixed models.
communicator.multi_node_mean_grad(model)
base = (communicator.size - 1.0) / 2
chainer.testing.assert_allclose(model.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(model.b.W.grad,
(base + 1) * np.ones((4, 3)))
mpi_comm.barrier()
communicator.finalize()
def check_collective_communication(param, use_gpu, use_chx):
communicator = create_communicator(param, use_gpu, use_chx)
mpi_comm.barrier()
model = ExampleModel(param.model_dtype)
if use_gpu:
device = chainermn.testing.get_device(communicator.intra_rank, use_chx)
else:
device = chainermn.testing.get_device(use_chainerx=use_chx)
model.to_device(device)
check_bcast_data(communicator, model)
model = ExampleModel(param.model_dtype)
model.to_device(device)
check_multi_node_mean_grad(communicator, model)
model = ExampleModel(param.model_dtype)
model.to_device(device)
check_multi_node_mean_grad_empty(communicator, model)
model = ExampleModel(param.model_dtype)
model.to_device(device)
check_multi_node_mean_grad_empty_half(communicator, model)
# Check allreduce debug mode
model = ExampleModel()
model.to_device(device)
# The example model includes some nan parameters so the debug mode
# must detect it.
chainer.set_debug(True)
with pytest.raises(ValueError, match=r'.* diverged .*'):
check_multi_node_mean_grad(communicator, model)
chainer.set_debug(False)
# barrier() requires before destructor of PureNcclCommunicator
# because communication may not be finished.
mpi_comm.barrier()
communicator.finalize()
# chainer.testing.parameterize is not available at functions
@pytest.mark.parametrize('param', cpu_params)
@pytest.mark.parametrize('use_chx', [True, False])
def test_communicator_cpu(param, use_chx):
check_send_recv(param, False, use_chx)
check_collective_communication(param, False, use_chx)
@pytest.mark.parametrize('param', gpu_params)
@pytest.mark.parametrize('use_chx', [True, False])
@chainer.testing.attr.gpu
def test_communicator_gpu(param, use_chx):
check_send_recv(param, True)
check_collective_communication(param, True, use_chx)
@pytest.mark.parametrize('param', gpu_mixed_dtype_params)
@pytest.mark.parametrize('use_chx', [True, False])
@chainer.testing.attr.gpu
def test_mixed_dtype_communicator_gpu(param, use_chx):
model = ExampleMixedModel()
with chainer.using_config('dtype', param.global_dtype):
check_multi_node_mean_grad_mixed_dtype(param, model, True, use_chx)
class TestPureNcclCommunicator(unittest.TestCase):
def setUp(self):
if nccl.get_build_version() < 2000:
pytest.skip('This test requires NCCL version >= 2.0')
self.mpi_comm = mpi4py.MPI.COMM_WORLD
@chainer.testing.attr.gpu
def test_invalid_allreduce_grad_dtype(self):
with self.assertRaises(ValueError):
comm = PureNcclCommunicator(self.mpi_comm)
comm.set_config('allreduce_grad_dtype', np.int32)
@chainer.testing.attr.gpu
def test_finalize(self):
communicator = PureNcclCommunicator(self.mpi_comm)
communicator._init_comms()
communicator.finalize()
self.assertIsNone(communicator.nccl_comm)
class TestNonCudaAwareCommunicator(unittest.TestCase):
def setUp(self):
if nccl.get_build_version() < 2000:
pytest.skip('This test requires NCCL version >= 2.0')
self.mpi_comm = mpi4py.MPI.COMM_WORLD
@chainer.testing.attr.gpu
def test_finalize(self):
communicator = NonCudaAwareCommunicator(self.mpi_comm)
communicator._init_comms()
communicator.finalize()
self.assertIsNone(communicator.intra_nccl_comm)
class TestDifferentDtype(unittest.TestCase):
def setup(self, gpu):
if gpu:
self.communicator = chainermn.create_communicator('flat')
self.device = self.communicator.intra_rank
chainer.cuda.get_device_from_id(self.device).use()
else:
self.communicator = chainermn.create_communicator('naive')
self.device = -1
if self.communicator.size != 2:
pytest.skip('This test is for two processes')
# dtypes to be tested
# DO NOT USE chainer.testing.parameterize
# (because running order of generated test cases is not deterministic)
self.dtypes = [np.int32, np.int64, np.float32, np.float64]
def teardown(self):
if self.communicator:
self.communicator.finalize()
def check_send_recv(self, x):
if self.communicator.rank == 0:
self.communicator.send(x, dest=1, tag=0)
y = x
elif self.communicator.rank == 1:
y = self.communicator.recv(source=0, tag=0)
chainer.testing.assert_allclose(y, x)
def test_send_recv_cpu(self):
self.setup(False)
for dtype in self.dtypes:
x = np.arange(18).astype(dtype)
self.check_send_recv(x)
x = np.array(1).astype(dtype)
self.check_send_recv(x)
self.teardown()
@chainer.testing.attr.gpu
def test_send_recv_gpu(self):
self.setup(True)
for dtype in self.dtypes:
x = np.arange(18).astype(dtype)
x = chainer.cuda.to_gpu(x, device=self.device)
self.check_send_recv(x)
self.teardown()
def check_alltoall(self, xs):
x = xs[self.communicator.rank]
ys = self.communicator.alltoall(
tuple([x for _ in range(self.communicator.size)]))
for x, y in zip(xs, ys):
chainer.testing.assert_allclose(x, y)
def test_alltoall_cpu(self):
self.setup(False)
for dtype in self.dtypes:
xs = np.arange(4 * self.communicator.size) \
.reshape(self.communicator.size, 4) \
.astype(dtype)
xs = np.split(xs, self.communicator.size)
self.check_alltoall(xs)
xs = [np.array(1).astype(dtype)] * 4
self.check_alltoall(xs)
self.teardown()
@chainer.testing.attr.gpu
def test_alltoall_gpu(self):
self.setup(True)
for dtype in self.dtypes:
xs = np.arange(4 * self.communicator.size) \
.reshape(self.communicator.size, 4) \
.astype(dtype)
xs = np.split(xs, self.communicator.size)
xs = [chainer.cuda.to_gpu(x, device=self.device) for x in xs]
self.check_alltoall(xs)
xs = [np.array(1).astype(dtype)] * 4
xs = [chainer.cuda.to_gpu(x, device=self.device) for x in xs]
self.check_alltoall(xs)
self.teardown()
def check_allgather(self, xs):
x = xs[self.communicator.rank]
ys = self.communicator.allgather(x)
for x, y in zip(xs, ys):
chainer.testing.assert_allclose(x, y)
def test_allgather_cpu(self):
self.setup(False)
for dtype in self.dtypes:
xs = np.arange(4 * self.communicator.size) \
.reshape(self.communicator.size, 4) \
.astype(dtype)
xs = np.split(xs, self.communicator.size)
self.check_allgather(xs)
x = np.array(1).astype(dtype)
ys = self.communicator.allgather(x)
for y in ys:
chainer.testing.assert_allclose(x, y)
self.teardown()
@chainer.testing.attr.gpu
def test_allgather_gpu(self):
self.setup(True)
for dtype in self.dtypes:
xs = np.arange(4 * self.communicator.size) \
.reshape(self.communicator.size, 4) \
.astype(dtype)
xs = np.split(xs, self.communicator.size)
xs = [chainer.cuda.to_gpu(x, device=self.device) for x in xs]
self.check_allgather(xs)
x = np.array(1).astype(dtype)
x = chainer.cuda.to_gpu(x, device=self.device)
ys = self.communicator.allgather(x)
for y in ys:
chainer.testing.assert_allclose(x, y)
self.teardown()
def check_bcast(self, x):
if self.communicator.rank == 0:
y = self.communicator.bcast(x, root=0)
else:
y = self.communicator.bcast(None, root=0)
chainer.testing.assert_allclose(x, y)
def test_bcast_cpu(self):
self.setup(False)
for dtype in self.dtypes:
x = np.arange(4).astype(dtype)
self.check_bcast(x)
x = np.array(42).astype(dtype)
y = self.communicator.bcast(x)
chainer.testing.assert_allclose(x, y)
self.teardown()
@chainer.testing.attr.gpu
def test_bcast_gpu(self):
self.setup(True)
for dtype in self.dtypes:
x = np.arange(4).astype(dtype)
x = chainer.cuda.to_gpu(x, device=self.device)
self.check_bcast(x)
x = np.array(42).astype(dtype)
x = chainer.cuda.to_gpu(x, device=self.device)
y = self.communicator.bcast(x)
chainer.testing.assert_allclose(x, y)
self.teardown()
def check_gather(self, xs, x1, ans):
x = xs[self.communicator.rank]
ys = self.communicator.gather(x, root=0)
if self.communicator.rank == 0:
for x, y in zip(xs, ys):
chainer.testing.assert_allclose(x, y)
ys = self.communicator.gather(x1, root=0)
if self.communicator.rank == 0:
for a, y in zip(ans, ys):
chainer.testing.assert_allclose(a, y)
def test_gather_cpu(self):
self.setup(False)
for dtype in self.dtypes:
xs = np.arange(4 * self.communicator.size) \
.reshape(self.communicator.size, 4) \
.astype(dtype)
xs = np.split(xs, self.communicator.size)
x = np.array(self.communicator.rank).astype(dtype)
ans = np.arange(self.communicator.size, dtype=dtype)
self.check_gather(xs, x, ans)
self.teardown()
@chainer.testing.attr.gpu
def test_gather_gpu(self):
self.setup(True)
for dtype in self.dtypes:
xs = np.arange(4 * self.communicator.size) \
.reshape(self.communicator.size, 4) \
.astype(dtype)
xs = np.split(xs, self.communicator.size)
xs = [chainer.cuda.to_gpu(x, device=self.device) for x in xs]
x = np.array(self.communicator.rank).astype(dtype)
x = chainer.cuda.to_gpu(x, device=self.device)
ans = np.arange(self.communicator.size, dtype=dtype)
self.check_gather(xs, x, ans)
self.teardown()
def check_scatter(self, xs):
x = xs[self.communicator.rank]
if self.communicator.rank == 0:
y = self.communicator.scatter(xs, root=0)
else:
y = self.communicator.scatter(None, root=0)
chainer.testing.assert_allclose(x, y)
def test_scatter_cpu(self):
self.setup(False)
for dtype in self.dtypes:
xs = np.arange(4 * self.communicator.size) \
.reshape(self.communicator.size, 4) \
.astype(dtype)
xs = np.split(xs, self.communicator.size)
self.check_scatter(xs)
x = np.array(42).astype(dtype)
xs = [x] * self.communicator.size
y = self.communicator.scatter(xs)
chainer.testing.assert_allclose(x, y)
self.teardown()
@chainer.testing.attr.gpu
def test_scatter_gpu(self):
self.setup(True)
for dtype in self.dtypes:
xs = np.arange(4 * self.communicator.size) \
.reshape(self.communicator.size, 4) \
.astype(dtype)
xs = np.split(xs, self.communicator.size)
xs = [chainer.cuda.to_gpu(x, device=self.device) for x in xs]
self.check_scatter(xs)
x = np.array(42).astype(dtype)
x = chainer.cuda.to_gpu(x, device=self.device)
xs = [x] * self.communicator.size
y = self.communicator.scatter(xs)
chainer.testing.assert_allclose(x, y)
self.teardown()
def check_allreduce(self, x, dtype, n):
x = self.communicator.allreduce(x)
s = sum(range(self.communicator.size))
y = np.arange(n) * self.communicator.size + s
y = y.astype(dtype)
chainer.testing.assert_allclose(y, x)
def test_allreduce_cpu(self):
self.setup(False)
for dtype in self.dtypes:
for n in [1, 18, 32]:
x = np.arange(n) + self.communicator.rank
x = x.astype(dtype)
self.check_allreduce(x, dtype, n)
x = np.array(1).astype(dtype)
y = self.communicator.allreduce(x)
a = x * self.communicator.size
chainer.testing.assert_allclose(a, y)
self.teardown()
@chainer.testing.attr.gpu
def test_allreduce_gpu(self):
self.setup(True)
for dtype in self.dtypes:
x = np.arange(18) + self.communicator.rank
x = x.astype(dtype)
x = chainer.cuda.to_gpu(x, device=self.device)
self.check_allreduce(x, dtype, 18)
x = np.array(1).astype(dtype)
y = self.communicator.allreduce(x)
a = x * self.communicator.size
chainer.testing.assert_allclose(a, y)
self.teardown()
class TestNonContiguousArray(unittest.TestCase):
def setup(self, gpu):
if gpu:
self.communicator = chainermn.create_communicator('flat')
self.device = self.communicator.intra_rank
chainer.cuda.get_device_from_id(self.device).use()
else:
self.communicator = chainermn.create_communicator('naive')
self.device = -1
if self.communicator.size != 2:
pytest.skip('This test is for two processes')
def teardown(self):
if self.communicator:
self.communicator.finalize()
def check_send(self):
if self.communicator.rank == 0:
x = np.arange(18).reshape(3, 3, 2).astype(np.float32)
# slicing operator destruct both C-/Fortran-contiguousness
self.communicator.send(x[:, 1, :], dest=1, tag=0)
elif self.communicator.rank == 1:
self.communicator.recv(source=0, tag=0)
def test_send_cpu(self):
self.setup(False)
self.check_send()
self.teardown()
@chainer.testing.attr.gpu
def test_send_gpu(self):
self.setup(True)
self.check_send()
self.teardown()
def check_alltoall(self):
self.setup(False)
x = np.arange(18).reshape(3, 3, 2).astype(np.float32)
# slicing operator destruct both C-/Fortran-contiguousness
x = x[:, 1, :]
xs = (x, x)
self.communicator.alltoall(xs)
self.teardown()
def test_alltoall_cpu(self):
self.setup(False)
self.check_alltoall()
self.teardown()
@chainer.testing.attr.gpu
def test_alltoall_gpu(self):
self.setup(True)
self.check_alltoall()
self.teardown()
class TestMpiCommunicatorBase(unittest.TestCase):
def setup(self):
self.communicator = chainermn.create_communicator('naive')
if self.communicator.size != 2:
pytest.skip('This test is for two processes')
def teardown(self):
if self.communicator:
self.communicator.finalize()
def check_send_recv_obj(self, x, tag=0,
use_any_recv=True, use_status=False):
if self.communicator.rank == 0:
self.communicator.send_obj(x, dest=1, tag=tag)
y = x
elif self.communicator.rank == 1:
status = None
if use_status:
status = mpi4py.MPI.Status()
if use_any_recv:
y = self.communicator.recv_obj(source=0,
status=status)
else:
y = self.communicator.recv_obj(source=0,
tag=tag,
status=status)
if use_status:
status_src = status.Get_source()
self.assertEqual(0, status_src)
status_tag = status.Get_tag()
self.assertEqual(tag, status_tag)
self.assertEqual(x, y)
def test_send_recv_obj(self):
self.setup()
self.check_send_recv_obj(0)
self.check_send_recv_obj(1, tag=1)
self.check_send_recv_obj(2, tag=2, use_any_recv=False)
self.check_send_recv_obj(3, use_status=True)
self.check_send_recv_obj(4, tag=4, use_status=True)
self.check_send_recv_obj(5, tag=5, use_any_recv=False, use_status=True)
self.teardown()
def test_send_recv_obj_chx_cpu(self):
self.setup()
with chainerx.using_device("native"):
chx_array = chainerx.array([0])
self.check_send_recv_obj(chx_array)
chx_array = chainerx.array([1])
self.check_send_recv_obj(chx_array, tag=1)
chx_array = chainerx.array([2])
self.check_send_recv_obj(chx_array, tag=2, use_any_recv=False)
self.teardown()
@chainer.testing.attr.gpu
def test_send_obj_chx_gpu(self):
self.setup()
rank_next = (self.communicator.rank + 1) % self.communicator.size
with chainerx.using_device("cuda"):
chx_array = chainerx.array([0])
with pytest.raises(ValueError):
self.communicator.send_obj(chx_array, dest=rank_next)
chx_array_list = [[0], chainerx.array([1])]
with pytest.raises(ValueError):
self.communicator.send_obj(chx_array_list, dest=rank_next)
chx_array_tuple = (0, chainerx.array([2]))
with pytest.raises(ValueError):
self.communicator.send_obj(chx_array_tuple, dest=rank_next)
chx_array_dict_value = {0: chainerx.array([2])}
with pytest.raises(ValueError):
self.communicator.send_obj(chx_array_dict_value,
dest=rank_next)
chx_array_dict_key = {chainerx.array([2]): 0}
with pytest.raises(ValueError):
self.communicator.send_obj(chx_array_dict_key, dest=rank_next)
chx_array_dict_set = {chainerx.array([2]), 0}
with pytest.raises(ValueError):
self.communicator.send_obj(chx_array_dict_set, dest=rank_next)
self.teardown()
@chainer.testing.attr.gpu
def test_collective_obj_chx_gpu(self):
self.setup()
test_function_list = [self.communicator.gather_obj,
self.communicator.bcast_obj,
self.communicator.allreduce_obj]
with chainerx.using_device("cuda"):
for func in test_function_list:
chx_array = chainerx.array([0])
with pytest.raises(ValueError):
func(chx_array)
chx_array_list = [[0], chainerx.array([1])]
with pytest.raises(ValueError):
func(chx_array_list)
chx_array_tuple = (0, chainerx.array([2]))
with pytest.raises(ValueError):
func(chx_array_tuple)
chx_array_dict_value = {0: chainerx.array([2])}
with pytest.raises(ValueError):
func(chx_array_dict_value)
chx_array_dict_key = {chainerx.array([2]): 0}
with pytest.raises(ValueError):
func(chx_array_dict_key)
chx_array_dict_set = {chainerx.array([2]), 0}
with pytest.raises(ValueError):
func(chx_array_dict_set)
self.teardown()
def test_config(self):
self.setup()
assert self.communicator.batched_copy
assert self.communicator.get_config('batched_copy')
self.communicator.set_config('batched_copy', False)
assert not self.communicator.batched_copy
assert not self.communicator.get_config('batched_copy')
self.communicator.set_config('batched_copy')
assert self.communicator.batched_copy
assert self.communicator.get_config('batched_copy')
def test_config_context(self):
self.setup()
# Although this is not external interface, but to be tested
with self.communicator.config_scope():
self.communicator.foobar = '0xdeadbeef'
assert '0xdeadbeef' == self.communicator._configs['foobar']
| 35,331
| 33.537634
| 79
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/communicator_tests/test_communication_utility.py
|
import mpi4py
import numpy as np
import pytest
import unittest
from chainermn.communicators._communication_utility import chunked_bcast_obj # NOQA
from chainermn.communicators._communication_utility import INT_MAX # NOQA
from chainermn.communicators.naive_communicator import NaiveCommunicator
class TestCommunicationUtility(unittest.TestCase):
def setUp(self):
self.mpi_comm = mpi4py.MPI.COMM_WORLD
self.communicator = NaiveCommunicator(self.mpi_comm)
def test_chunked_bcast_objs(self):
# success
for (s, l) in [(10, 1), (1024, 7), (355678, 2378), (234, INT_MAX - 1)]:
self.check_chunked_bcast_obj(s, l)
# fail
for (s, l) in [(200, -1), (23, INT_MAX)]:
with pytest.raises(AssertionError):
self.check_chunked_bcast_obj(s, l)
def check_chunked_bcast_obj(self, data_size, max_buf_len):
root = 0
obj = np.arange(data_size)
src = None
if self.communicator.rank == root:
src = obj
dst = chunked_bcast_obj(src, self.communicator.mpi_comm,
max_buf_len, root)
assert len(dst) == len(obj)
for i in range(len(obj)):
assert dst[i] == obj[i]
| 1,250
| 32.810811
| 84
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/communicator_tests/test_node_aware_communicator_base.py
|
import collections
import os
import unittest
import mpi4py.MPI
import pytest
from chainermn.communicators.mpi_communicator_base import MpiCommunicatorBase
class NodeAwareNaiveCommunicator(MpiCommunicatorBase):
def __init__(self, mpi_comm):
super(NodeAwareNaiveCommunicator, self).__init__(mpi_comm)
def multi_node_mean_grad(self, model):
raise NotImplementedError()
class TestMpiCommunicatorBase(unittest.TestCase):
def setUp(self):
self.mpi_comm = mpi4py.MPI.COMM_WORLD
self.communicator = NodeAwareNaiveCommunicator(self.mpi_comm)
def test_intra_rank_with_env(self):
if 'MV2_COMM_WORLD_LOCAL_RANK' in os.environ: # MVAPICH
expected = int(os.environ['MV2_COMM_WORLD_LOCAL_RANK'])
elif 'OMPI_COMM_WORLD_LOCAL_RANK' in os.environ: # OpenMPI
expected = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
else:
pytest.skip('No MPI specified')
self.assertEqual(self.communicator.intra_rank, expected)
def test_intra_size_with_env(self):
if 'MV2_COMM_WORLD_LOCAL_SIZE' in os.environ: # MVAPICH
expected = int(os.environ['MV2_COMM_WORLD_LOCAL_RANK'])
elif 'OMPI_COMM_WORLD_LOCAL_SIZE' in os.environ: # OpenMPI
expected = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
else:
pytest.skip('No MPI specified')
self.assertEqual(self.communicator.intra_rank, expected)
def test_inter_rank_and_size(self):
ranks_and_sizes = self.mpi_comm.gather((
self.communicator.inter_rank, self.communicator.inter_size))
if self.mpi_comm.rank == 0:
for inter_rank, inter_size in ranks_and_sizes:
self.assertTrue(0 <= inter_rank < inter_size)
sizes = list(set(x[1] for x in ranks_and_sizes))
self.assertEqual(len(sizes), 1)
size = sizes[0]
ranks = list(sorted(set(x[0] for x in ranks_and_sizes)))
self.assertEqual(ranks, list(range(size)))
def test_intra_rank_and_size(self):
ranks_and_sizes = self.mpi_comm.gather((
self.communicator.intra_rank, self.communicator.intra_size,
self.communicator.inter_rank, self.communicator.inter_size))
if self.mpi_comm.rank == 0:
for intra_rank, intra_size, _, _ in ranks_and_sizes:
self.assertTrue(0 <= intra_rank < intra_size)
inter_rank_to_intra_ranks = collections.defaultdict(list)
for intra_rank, _, inter_rank, _ in ranks_and_sizes:
inter_rank_to_intra_ranks[inter_rank].append(intra_rank)
for ranks in inter_rank_to_intra_ranks.values():
ranks.sort()
for _, intra_size, inter_rank, _ in ranks_and_sizes:
self.assertEqual(
inter_rank_to_intra_ranks[inter_rank],
list(range(intra_size)))
| 2,939
| 35.296296
| 77
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/functions_tests/test_collective_communication.py
|
import chainer
import chainer.testing
import chainer.testing.attr
import numpy
import pytest
import chainermn
import chainermn.functions
class Param(object):
def __init__(self, param):
self.dtype = None
self.__dict__.update(param)
params = [Param(p) for p in [
{
'dtype': numpy.float16,
}, {
'dtype': numpy.float32,
}, {
'dtype': chainer.mixed16,
}]]
def get_communicator(gpu):
numpy.random.seed(42)
if gpu:
communicator = chainermn.create_communicator('flat')
device = communicator.intra_rank
chainer.cuda.get_device_from_id(device).use()
else:
communicator = chainermn.create_communicator('naive')
if communicator.size < 2:
pytest.skip('This test is for multinode')
return communicator
def check_all_gather(xs, communicator):
x = xs[communicator.rank]
ys = chainermn.functions.allgather(communicator, x)
e = 0
for i, y in enumerate(ys):
e += chainer.functions.mean_squared_error(y, xs[i])
e.backward()
# Check backward does not fall in deadlock, and error = 0.
assert 0 == e.data
assert None is not x.grad
@pytest.mark.parametrize('param', params)
def test_all_gather_cpu(param):
communicator = get_communicator(False)
with chainer.using_config('dtype', param.dtype):
xs = [chainer.Variable(
numpy.random.normal(size=(10, i + 1)).astype(param.dtype))
for i in range(communicator.size)]
check_all_gather(xs, communicator)
@pytest.mark.parametrize('param', params)
@chainer.testing.attr.gpu
def test_all_gather_gpu(param):
communicator = get_communicator(True)
with chainer.using_config('dtype', param.dtype):
xs = [chainer.Variable(
numpy.random.normal(size=(10, i + 1)).astype(param.dtype))
for i in range(communicator.size)]
for x in xs:
x.to_gpu()
check_all_gather(xs, communicator)
def check_all_to_all(xs, communicator):
ys = chainermn.functions.alltoall(communicator, xs)
y = chainer.functions.sum(ys[0])
for _y in ys[1:]:
y += chainer.functions.sum(_y)
y.backward()
# Check if gradients are passed back without deadlock.
assert None is not xs[0].grad
@pytest.mark.parametrize('param', params)
def test_all_to_all_cpu(param):
communicator = get_communicator(False)
with chainer.using_config('dtype', param.dtype):
data = [
chainer.Variable(numpy.zeros(
(communicator.rank, i), dtype=param.dtype))
for i in range(communicator.size)]
check_all_to_all(data, communicator)
@pytest.mark.parametrize('param', params)
@chainer.testing.attr.gpu
def test_all_to_all_gpu(param):
communicator = get_communicator(True)
with chainer.using_config('dtype', param.dtype):
data = [chainer.Variable(
numpy.zeros((communicator.rank + 1, i + 1), dtype=param.dtype))
for i in range(communicator.size)]
for x in data:
x.to_gpu()
check_all_to_all(data, communicator)
def check_bcast(x, communicator):
root = 0
if communicator.rank == root:
y = chainermn.functions.bcast(
communicator, x, root)
else:
y = chainermn.functions.bcast(
communicator, None, root)
e = chainer.functions.mean_squared_error(y, x)
e.backward()
# Check backward does not fall in deadlock, and error = 0 in root.
if communicator.rank == root:
assert 0 == e.data
assert None is not x.grad
@pytest.mark.parametrize('param', params)
def test_bcast_cpu(param):
communicator = get_communicator(False)
with chainer.using_config('dtype', param.dtype):
x = chainer.Variable(
numpy.random.normal(size=(100, 100)).astype(param.dtype))
check_bcast(x, communicator)
@pytest.mark.parametrize('param', params)
@chainer.testing.attr.gpu
def test_bcast_gpu(param):
communicator = get_communicator(True)
with chainer.using_config('dtype', param.dtype):
x = chainer.Variable(
numpy.random.normal(size=(100, 100)).astype(param.dtype))
x.to_gpu()
check_bcast(x, communicator)
def check_gather(xs, communicator):
root = 0
# All processes receive the same xs since seed is fixed.
x = xs[communicator.rank]
if communicator.rank == root:
ys = chainermn.functions.gather(
communicator, x, root)
e = 0
for i, y in enumerate(ys):
e += chainer.functions.mean_squared_error(y, xs[i])
e.backward()
# Check error = 0.
assert 0 == e.data
else:
phi = chainermn.functions.gather(
communicator, x, root)
phi.backward()
# Check backward does not fall in deadlock.
assert None is not x.grad
@pytest.mark.parametrize('param', params)
def test_gather_cpu(param):
communicator = get_communicator(False)
with chainer.using_config('dtype', param.dtype):
xs = [chainer.Variable(
numpy.random.normal(size=(100, 100)).astype(param.dtype))
for _ in range(communicator.size)]
check_gather(xs, communicator)
@pytest.mark.parametrize('param', params)
@chainer.testing.attr.gpu
def test_gather_gpu(param):
communicator = get_communicator(True)
with chainer.using_config('dtype', param.dtype):
xs = [chainer.Variable(
numpy.random.normal(size=(100, 100)).astype(param.dtype))
for _ in range(communicator.size)]
for x in xs:
x.to_gpu()
check_gather(xs, communicator)
@pytest.mark.parametrize('param', params)
def test_gatherv_cpu(param):
communicator = get_communicator(False)
with chainer.using_config('dtype', param.dtype):
xs = [chainer.Variable(
numpy.random.normal(size=(i + 1, i + 1)).astype(param.dtype))
for i in range(communicator.size)]
check_gather(xs, communicator)
@pytest.mark.parametrize('param', params)
@chainer.testing.attr.gpu
def test_gatherv_gpu(param):
communicator = get_communicator(True)
with chainer.using_config('dtype', param.dtype):
xs = [chainer.Variable(
numpy.random.normal(size=(i + 1, i + 1)).astype(param.dtype))
for i in range(communicator.size)]
for x in xs:
x.to_gpu()
check_gather(xs, communicator)
def check_scatter(xs, communicator):
# All processes receive the same xs since seed is fixed.
root = 0
y = chainermn.functions.scatter(
communicator,
xs if communicator.rank == root else None,
root)
x = xs[communicator.rank]
e = chainer.functions.mean_squared_error(y, x)
e.backward()
# Check backward does not fall in deadlock, and error = 0.
assert 0 == e.data
assert None is not x.grad
@pytest.mark.parametrize('param', params)
def test_scatter_cpu(param):
communicator = get_communicator(False)
with chainer.using_config('dtype', param.dtype):
xs = [chainer.Variable(
numpy.random.normal(size=(100, 100)).astype(param.dtype))
for _ in range(communicator.size)]
check_scatter(xs, communicator)
@pytest.mark.parametrize('param', params)
@chainer.testing.attr.gpu
def test_scatter_gpu(param):
communicator = get_communicator(True)
with chainer.using_config('dtype', param.dtype):
xs = [chainer.Variable(
numpy.random.normal(size=(100, 100)).astype(param.dtype))
for _ in range(communicator.size)]
for x in xs:
x.to_gpu()
check_scatter(xs, communicator)
@pytest.mark.parametrize('param', params)
def test_scatterv_cpu(param):
communicator = get_communicator(False)
with chainer.using_config('dtype', param.dtype):
xs = [chainer.Variable(
numpy.random.normal(size=(i + 1, i + 1)).astype(param.dtype))
for i in range(communicator.size)]
check_scatter(xs, communicator)
@pytest.mark.parametrize('param', params)
@chainer.testing.attr.gpu
def test_scatterv_gpu(param):
communicator = get_communicator(True)
with chainer.using_config('dtype', param.dtype):
xs = [chainer.Variable(
numpy.random.normal(size=(i + 1, i + 1)).astype(param.dtype))
for i in range(communicator.size)]
for x in xs:
x.to_gpu()
check_scatter(xs, communicator)
| 8,507
| 27.454849
| 79
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/functions_tests/test_point_to_point_communication.py
|
import functools
import chainer
import copy
from chainer.backends.cuda import cupy
import chainer.testing
import chainer.testing.attr
import numpy
import pytest
import chainermn
from chainer.functions import sigmoid
from chainer.functions import mean_squared_error as mse
class Param(object):
def __init__(self, param):
self.dtype = None
self.__dict__.update(param)
params = [Param(p) for p in [
{
'dtype': numpy.float16,
}, {
'dtype': numpy.float32,
}, {
'dtype': chainer.mixed16,
}]]
function = sigmoid
evaluation = mse
def create_communicator(gpu, param):
if gpu:
communicator = chainermn.create_communicator('flat')
device = communicator.intra_rank
chainer.cuda.get_device_from_id(device).use()
else:
communicator = chainermn.create_communicator('naive')
if communicator.size < 2:
pytest.skip('This test is for multinode')
return communicator
def create_x(gpu, param, communicator):
x = chainer.Variable(
numpy.arange(10).reshape(1, 10).astype(param.dtype) / 10)
if gpu:
x.to_gpu()
return x
def create_models(gpu, param, communicator):
model = chainer.links.Linear(
10, 10, initialW=_init_w(communicator.rank))
entire_model = [chainer.links.Linear(
10, 10, initialW=_init_w(l))
for l in range(communicator.size)]
if gpu:
device = cupy.cuda.Device()
model.to_device(device)
for model_ in entire_model:
model_.to_device(device)
return (model, entire_model)
def _init_w(l):
return 1.0 * numpy.arange(100).reshape(10, 10) \
/ ((l + 1) * 100)
def check_communication(gpu, param):
with chainer.using_config('dtype', param.dtype):
communicator = create_communicator(gpu, param)
rank_send = (communicator.rank + 1) % communicator.size
rank_recv = (communicator.rank - 1) % communicator.size
x = create_x(gpu, param, communicator)
(model, entire_model) = create_models(gpu, param, communicator)
if communicator.rank == 0:
# Input process.
y = function(model(x))
err = chainermn.functions.send(
y, communicator, rank_send)
err.backward()
grad = model.W.grad
# Compute the expected gradient.
x_ = x
for l in range(communicator.size):
x_ = function(entire_model[l](x_))
err_ = evaluation(x_, x)
err_.backward()
grad_expected = entire_model[0].W.grad
chainer.testing.assert_allclose(grad, grad_expected)
elif communicator.rank == communicator.size - 1:
# Output process.
x_ = chainermn.functions.recv(communicator, rank_recv)
y = function(model(x_))
err = evaluation(y, x)
err.backward()
# Compute the expected output.
x_ = x
for l in range(communicator.size):
x_ = function(entire_model[l](x_))
y_expect = x_
chainer.testing.assert_allclose(y.data, y_expect.data)
else:
# Intermediate processes.
x_ = chainermn.functions.recv(communicator, rank_recv)
y = function(model(x_))
err = chainermn.functions.send(y, communicator, rank_send)
err.backward()
@pytest.mark.parametrize('param', params)
def test_communication_cpu(param):
check_communication(False, param)
@chainer.testing.attr.gpu
@pytest.mark.parametrize('param', params)
def test_communication_gpu(param):
check_communication(True, param)
def check_retain(gpu, param):
with chainer.using_config('dtype', param.dtype):
communicator = create_communicator(gpu, param)
rank_send = (communicator.rank + 1) % communicator.size
rank_recv = (communicator.rank - 1) % communicator.size
x = create_x(gpu, param, communicator)
(model, entire_model) = create_models(gpu, param, communicator)
if communicator.rank == 0:
# Starting process.
t = copy.copy(x)
y = function(model(x))
dlg = chainermn.functions.send(
y, communicator, rank_send)
# Unless delegate_variable is used, backprop would stop here.
x_ = chainermn.functions.recv(communicator, rank_recv,
delegate_variable=dlg)
err = evaluation(x_, t)
err.backward()
# train.x.grad is None if backprop stops in the middle.
assert x.grad is not None
else:
# Intermediate processes.
x_ = chainermn.functions.recv(communicator, rank_recv)
y = function(model(x_))
err = chainermn.functions.send(y, communicator, rank_send)
err.backward()
@pytest.mark.parametrize('param', params)
def test_retain_cpu(param):
check_retain(False, param)
@chainer.testing.attr.gpu
@pytest.mark.parametrize('param', params)
def test_retain_gpu(param):
check_retain(True, param)
def check_tuple_communication(length, gpu, param):
with chainer.using_config('dtype', param.dtype):
communicator = create_communicator(gpu, param)
rank_send = (communicator.rank + 1) % communicator.size
rank_recv = (communicator.rank - 1) % communicator.size
x = create_x(gpu, param, communicator)
(model, entire_model) = create_models(gpu, param, communicator)
if communicator.rank == 0:
y = []
for i in range(length):
_y = function(model(x))
y.append(_y)
err = chainermn.functions.send(y, communicator, rank_send)
err.backward()
elif communicator.rank == communicator.size - 1:
y = chainermn.functions.recv(
communicator, rank_recv, force_tuple=True)
assert isinstance(y, tuple)
z = functools.reduce(lambda x, y: x + y, y)
err = evaluation(z, x)
err.backward()
else:
y = chainermn.functions.recv(communicator, rank_recv)
err = chainermn.functions.send(y, communicator, rank_send)
err.backward()
lengths = [1, 2]
@pytest.mark.parametrize('length', lengths)
@pytest.mark.parametrize('param', params)
def test_tuple_communication_cpu(length, param):
check_tuple_communication(length, False, param)
@chainer.testing.attr.gpu
@pytest.mark.parametrize('length', lengths)
@pytest.mark.parametrize('param', params)
def test_tuple_communication_gpu(length, param):
check_tuple_communication(length, True, param)
@pytest.mark.parametrize('param', params)
def test_non_variable_send(param):
"""Checks if backward will be called even if inputs are not Variable.
This test confirms whether deadlock occurs when numpy/cupy array is
given as an input of send.
In this case, the input will be converted to chainer Variable without
``requires_grad``, thus ``backward`` will not be called without any
modification.
"""
communicator = chainermn.create_communicator('naive')
if communicator.size < 2:
pytest.skip('This test is for multinode')
rank_send = (communicator.rank + 1) % communicator.size
rank_recv = (communicator.rank - 1) % communicator.size
if communicator.rank == 0:
x = numpy.ones((1, 10)).astype(param.dtype)
phi = chainermn.functions.send(
x, communicator, rank=rank_send)
x, = chainermn.functions.pseudo_connect(phi, x)
y = chainer.functions.sum(x)
t = numpy.array(0).astype(param.dtype)
z = chainer.functions.mean_squared_error(y, t)
z.backward()
elif communicator.rank == communicator.size - 1:
x = chainermn.functions.recv(communicator, rank=rank_recv)
y = chainer.functions.sum(x)
t = numpy.array(0).astype(param.dtype)
z = chainer.functions.mean_squared_error(y, t)
z.backward()
else:
x = chainermn.functions.recv(communicator, rank=rank_recv)
phi = chainermn.functions.send(
x, communicator, rank=rank_send)
phi.backward()
| 8,296
| 29.843866
| 73
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/functions_tests/test_pseudo_connect.py
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
import chainermn.functions
@testing.parameterize(*testing.product({
'shape_x': [[(4, 5), (3, 2)], [(3, 2)], [()]],
'shape_delegate': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64, chainer.mixed16],
}))
class TestPseudoConnect(unittest.TestCase):
def setUp(self):
self.delegate = numpy.random.uniform(-1, 1, self.shape_delegate)\
.astype(self.dtype)
self.x = tuple([
numpy.random.uniform(-1, 1, shape).astype(self.dtype)
for shape in self.shape_x])
self.gy = tuple([
numpy.random.uniform(-1, 1, shape).astype(self.dtype)
for shape in self.shape_x])
def check_forward(self, delegate_data, x_data):
delegate_variable = chainer.Variable(delegate_data)
x = tuple([chainer.Variable(data) for data in x_data])
y = chainermn.functions.pseudo_connect(delegate_variable, *x)
for _y in y:
self.assertEqual(_y.data.dtype, self.dtype)
for _x, _y in zip(self.x, y):
y_expect = _x.copy()
testing.assert_allclose(y_expect, _y.data)
def test_forward_cpu(self):
self.check_forward(self.delegate, self.x)
@attr.gpu
def test_forward_gpu(self):
x = tuple([cuda.to_gpu(_x) for _x in self.x])
self.check_forward(cuda.to_gpu(self.delegate), x)
def check_backward(self, delegate_data, x_data, y_grad):
gradient_check.check_backward(
chainermn.functions.pseudo_connect,
(delegate_data, ) + x_data, y_grad,
dtype=numpy.float64)
def test_backward_cpu(self):
self.check_backward(self.delegate, self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
x = tuple([cuda.to_gpu(_x) for _x in self.x])
gy = tuple([cuda.to_gpu(_gy) for _gy in self.gy])
self.check_backward(cuda.to_gpu(self.delegate), x, gy)
| 2,079
| 32.015873
| 76
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/extensions_tests/test_checkpoint.py
|
import os
import tempfile
import unittest
import warnings
import numpy as np
import chainer
from chainer.dataset import convert
import chainer.functions as F
import chainer.links as L
import chainer.testing
from chainer import training
import chainermn
from chainermn.extensions.checkpoint import _CheckpointStats
from chainermn.extensions.checkpoint import create_multi_node_checkpointer
from chainermn.testing.device import get_device
import chainerx as chx
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__()
with self.init_scope():
self.l1 = L.Linear(784, n_units)
self.l2 = L.Linear(n_units, n_units)
self.l3 = L.Linear(n_units, n_out)
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
class TestCheckpoint(unittest.TestCase):
def setUp(self):
self.communicator = chainermn.create_communicator('naive')
warnings.filterwarnings(action='always', category=DeprecationWarning)
def test_stats(self):
stats = _CheckpointStats()
for i in range(1024):
stats.start()
stats.end()
assert isinstance(stats.report(), str)
stats = _CheckpointStats()
assert isinstance(stats.report(), str)
def check_filename_converters(self, xp):
checkpointer = create_multi_node_checkpointer(name='hoge',
comm=self.communicator,
cp_interval=23,
gc_interval=32)
nums = [int(xp.random.uniform() * 4096) for _ in range(234)]
filenames = checkpointer._filenames(nums)
nums2 = []
for n, r, i in checkpointer._parse_filenames(filenames):
assert self.communicator.rank == r
assert 'hoge' == n
nums2.append(i)
assert set(nums) == set(nums2)
filenames2 = checkpointer._filenames(nums2)
assert set(filenames) == set(filenames2)
def test_filename_converters(self):
self.check_filename_converters(np)
self.check_filename_converters(chx)
def setup_mnist_trainer(self, display_log=False, use_chx=False):
batchsize = 100
n_units = 100
comm = self.communicator
model = L.Classifier(MLP(n_units, 10))
model.to_device(get_device(None, use_chx))
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.Adam(), comm)
optimizer.setup(model)
if comm.rank == 0:
train, test = chainer.datasets.get_mnist()
else:
train, test = None, None
train = chainermn.scatter_dataset(train, comm, shuffle=True)
test = chainermn.scatter_dataset(test, comm, shuffle=True)
train_iter = chainer.iterators.SerialIterator(train, batchsize)
test_iter = chainer.iterators.SerialIterator(test, batchsize,
repeat=False,
shuffle=False)
updater = training.StandardUpdater(
train_iter,
optimizer
)
return updater, optimizer, train_iter, test_iter, model
@chainer.testing.attr.slow
def test_mnist_simple(self, display_log=True):
self.check_mnist_simple(display_log, False)
self.check_mnist_simple(display_log, True)
def check_mnist_simple(self, display_log=True, use_chx=False):
updater, optimizer, train_iter, _, model = \
self.setup_mnist_trainer(use_chx)
path = tempfile.mkdtemp(dir='/tmp', prefix=__name__ + '-tmp-')
if display_log:
print('temporary file:', path)
checkpointer = create_multi_node_checkpointer(name=__name__,
comm=self.communicator,
path=path)
checkpointer.maybe_load(updater, optimizer)
sum_accuracy = 0
sum_loss = 0
stop = 5
train_count = len(train_iter.dataset)
while train_iter.epoch < stop:
batch = train_iter.next()
x_array, t_array = convert.concat_examples(batch, -1)
x = chainer.Variable(x_array)
t = chainer.Variable(t_array)
optimizer.update(model, x, t)
sum_loss += float(model.loss.data) * len(t.data)
sum_accuracy += float(model.accuracy.data) * len(t.data)
if train_iter.is_new_epoch:
if display_log:
print(updater.iteration, train_iter.epoch,
sum_loss / train_count, sum_accuracy / train_count)
sum_loss = 0
sum_accuracy = 0
checkpointer.save(updater, updater.iteration)
if display_log:
print(self.communicator.rank, checkpointer.get_stats())
# Allocate totally different set of training tools to avoid leakage
data_2 = self.setup_mnist_trainer()
updater2, optimizer2, train_iter2, test_iter2, model2 = data_2
checkpointer2 = create_multi_node_checkpointer(
name=__name__, comm=self.communicator, path=path)
checkpointer2.maybe_load(updater2, optimizer2)
# Check data properly resumed
self.assertEqual(updater.epoch, updater2.epoch)
self.assertEqual(updater.iteration, updater2.iteration)
# TODO(kuenishi): find a simple way to assure model equality
# in terms of float matrix
# self.assertEqual(model, model2)
# Restart training
while train_iter2.epoch < stop * 2:
batch = train_iter2.next()
x_array, t_array = convert.concat_examples(batch, -1)
x = chainer.Variable(x_array)
t = chainer.Variable(t_array)
optimizer2.update(model2, x, t)
sum_loss += float(model2.loss.data) * len(t.data)
sum_accuracy += float(model2.accuracy.data) * len(t.data)
if train_iter2.is_new_epoch:
print(updater2.iteration, train_iter2.epoch,
sum_loss / train_count, sum_accuracy / train_count)
sum_loss = 0
sum_accuracy = 0
checkpointer2.save(updater2, updater2.iteration)
if display_log:
print(self.communicator.rank, checkpointer2.get_stats())
checkpointer2.finalize()
checkpointer.finalize()
# Validate training
sum_accuracy = 0
sum_loss = 0
test_count = len(test_iter2.dataset)
for batch in test_iter2:
x_array, t_array = convert.concat_examples(batch, -1)
x = chainer.Variable(x_array)
t = chainer.Variable(t_array)
loss = model2(x, t)
sum_loss += float(loss.data) * len(t.data)
sum_accuracy += float(model2.accuracy.data) * len(t.data)
if display_log:
print('test mean loss: {}, accuracy: {}'.format(
sum_loss / test_count, sum_accuracy / test_count))
self.assertGreaterEqual(sum_accuracy / test_count, 0.95)
os.removedirs(path)
| 7,333
| 34.090909
| 77
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/extensions_tests/test_allreduce_persistent.py
|
import chainer
import chainer.testing
import chainer.testing.attr
import unittest
import chainermn
from chainermn.testing.device import get_device
class ExampleModel(chainer.Chain):
def __init__(self, n_in=3, n_units=5, n_out=2):
super(ExampleModel, self).__init__()
with self.init_scope():
self.l1 = chainer.links.Linear(n_in, n_units, nobias=True)
self.bn1 = chainer.links.BatchNormalization(n_units)
self.l2 = chainer.links.Linear(n_units, n_units, nobias=True)
self.bn2 = chainer.links.BatchNormalization(n_units)
self.l3 = chainer.links.Linear(n_units, n_out)
class TestAllreducePersistent(unittest.TestCase):
def _test(self, comm, model, use_gpu, use_chx):
if use_gpu:
# Use CuPy's Device class to force call cudaSetDevice()
chainer.cuda.get_device_from_id(comm.intra_rank).use()
device = get_device(comm.intra_rank if use_gpu else None, use_chx)
model.to_device(device)
rank = comm.rank
model.bn1.avg_mean.fill(rank * 1)
model.bn2.avg_mean.fill(rank * 2)
model.bn1.avg_var.fill(rank * 3)
model.bn2.avg_var.fill(rank * 4)
allreduce_persistent = \
chainermn.extensions.AllreducePersistent(model, comm)
allreduce_persistent()
avg_rank = (comm.size - 1) / 2.0
chainer.testing.assert_allclose(model.bn1.avg_mean, avg_rank * 1)
chainer.testing.assert_allclose(model.bn2.avg_mean, avg_rank * 2)
chainer.testing.assert_allclose(model.bn1.avg_var, avg_rank * 3)
chainer.testing.assert_allclose(model.bn2.avg_var, avg_rank * 4)
def test_allreduce_persistent_cpu(self):
comm = chainermn.create_communicator('naive')
model = ExampleModel()
self._test(comm, model, False, False) # CPU test (numpy)
self._test(comm, model, False, True) # CPU test (ChainerX)
@chainer.testing.attr.gpu
def test_allreduce_persistent_gpu(self):
comm = chainermn.create_communicator('flat')
model = ExampleModel()
self._test(comm, model, True, False) # GPU test (CuPy)
self._test(comm, model, True, True) # GPU test (ChainerX)
| 2,231
| 36.830508
| 74
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/extensions_tests/test_multi_node_evaluator.py
|
import chainer
import chainer.testing
from chainer.datasets import TupleDataset
from chainer.iterators import SerialIterator
from chainermn import create_communicator
from chainermn.extensions import GenericMultiNodeEvaluator
class ExampleModel(chainer.Chain):
def forward(self, a, b, c):
return a + b + c
def check_generic(comm, length, bs):
assert bs > 0
assert length > 0
a = list(range(comm.rank, length, comm.size))
b = list(range(comm.rank, length, comm.size))
c = list(range(comm.rank, length, comm.size))
model = ExampleModel()
dataset = TupleDataset(a, b, c)
iterator = SerialIterator(dataset, bs, shuffle=False, repeat=False)
evaluator = GenericMultiNodeEvaluator(comm, iterator, model)
results = evaluator(None)
# Make expected answer
iterator.reset()
s = [[aa + bb + cc # Same calculation as model
for aa, bb, cc in batch] for batch in iterator]
s = comm.gather_obj(s)
if comm.rank == 0:
# flatten list of lists gathered
expected = []
for e in zip(*s):
expected.extend(e)
for e, r in zip(expected, results):
chainer.testing.assert_allclose(e, r)
else:
assert results is None
def test_generic():
comm = create_communicator('naive')
try:
check_generic(comm, 97, 7)
check_generic(comm, 9, 77)
finally:
comm.finalize()
class CustomMultiNodeEvaluator(GenericMultiNodeEvaluator):
def __init__(self, *args, **kwargs):
super(CustomMultiNodeEvaluator, self).__init__(*args, **kwargs)
def calc_local(self, *args, **kwargs):
assert len(args) == 3
return 2
def aggregate(self, results):
for result in results:
assert 2 == result
return sum(results)
def check_custom(comm, length, bs):
assert bs > 0
assert length > 0
a = list(range(comm.rank, length, comm.size))
b = list(range(comm.rank, length, comm.size))
c = list(range(comm.rank, length, comm.size))
model = ExampleModel()
dataset = TupleDataset(a, b, c)
iterator = SerialIterator(dataset, bs, shuffle=False, repeat=False)
evaluator = CustomMultiNodeEvaluator(comm, iterator, model)
result = evaluator(None)
iterator.reset()
expected = comm.allreduce_obj(sum(2 for batch in iterator))
if comm.rank == 0:
assert expected == result
else:
assert result is None
def test_custom():
comm = create_communicator('naive')
try:
check_custom(comm, 97, 7)
check_custom(comm, 9, 77)
finally:
comm.finalize()
| 2,635
| 24.843137
| 71
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/extensions_tests/test_observation_aggregator.py
|
from __future__ import division
import pytest
import numpy as np
import chainer
import chainer.testing
from chainer.training import extension
from chainer.backend import cuda
import chainermn
from chainermn.testing import get_device
from chainermn.extensions import ObservationAggregator
import chainerx
class DummyChain(chainer.Chain):
def __init__(self):
super(DummyChain, self).__init__()
with self.init_scope():
self.l = chainer.links.Linear(None, 1)
def forward(self, x):
return chainer.functions.sum(self.l(x))
@pytest.mark.parametrize('use_chainer_variable', [False, True])
@pytest.mark.parametrize('communicate_interval', [1, 2])
@pytest.mark.parametrize('xp', [chainerx, np])
def test_observation_aggregator_cpu(use_chainer_variable,
communicate_interval,
xp):
communicator = chainermn.create_communicator('naive')
run_test_observation_aggregator(communicator, xp,
use_chainer_variable,
communicate_interval,
use_gpu=False)
@pytest.mark.parametrize('use_chainer_variable', [False, True])
@pytest.mark.parametrize('communicate_interval', [1, 2])
@chainer.testing.attr.gpu
def test_observation_aggregator_gpu_chainerx(use_chainer_variable,
communicate_interval):
xp = chainerx
communicator = chainermn.create_communicator('pure_nccl')
device = get_device(communicator.intra_rank, True)
with chainerx.using_device(device.device):
if use_chainer_variable:
run_test_observation_aggregator(communicator, xp,
use_chainer_variable,
communicate_interval,
use_gpu=True)
else:
with pytest.raises(ValueError):
run_test_observation_aggregator(communicator, xp,
use_chainer_variable,
communicate_interval,
use_gpu=True)
@pytest.mark.parametrize('use_chainer_variable', [True, False])
@pytest.mark.parametrize('communicate_interval', [1, 2])
@chainer.testing.attr.gpu
def test_observation_aggregator_gpu_cupy(use_chainer_variable,
communicate_interval):
communicator = chainermn.create_communicator('pure_nccl')
run_test_observation_aggregator(communicator, cuda.cupy,
use_chainer_variable,
communicate_interval,
use_gpu=True)
def run_test_observation_aggregator(comm, xp,
use_chainer_variable,
communicate_interval,
use_gpu):
model = DummyChain()
if use_gpu:
# Use CuPy's Device class to force call cudaSetDevice()
chainer.cuda.get_device_from_id(comm.intra_rank).use()
device = get_device(comm.intra_rank if use_gpu else None, xp == chainerx)
if xp == chainerx:
train = xp.array(np.random.rand(10, 1).astype(np.float32))
else:
train = xp.random.rand(10, 1).astype(np.float32)
model.to_device(device)
train_iter = chainer.iterators.SerialIterator(train,
batch_size=1,
repeat=True,
shuffle=True)
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.Adam(), comm)
optimizer.setup(model)
updater = chainer.training.StandardUpdater(train_iter, optimizer,
device=device)
trainer = chainer.training.Trainer(updater, (1, 'epoch'))
@extension.make_extension(
trigger=(1, 'iteration'), priority=extension.PRIORITY_WRITER)
def rank_reporter(trainer_):
tmp = xp.asarray(comm.rank, dtype=np.float32)
if use_chainer_variable:
tmp = chainer.Variable(tmp)
trainer_.observation['rank'] = tmp
@extension.make_extension(
trigger=(communicate_interval, 'iteration'),
priority=extension.PRIORITY_READER)
def aggregated_rank_checker(trainer_):
actual = trainer_.observation['rank-aggregated']
if use_chainer_variable:
actual = actual.data
expected = (comm.size - 1) / 2
chainer.testing.assert_allclose(actual, expected)
trainer.extend(rank_reporter)
trainer.extend(ObservationAggregator(
comm, 'rank', 'rank-aggregated',
comm_trigger=(communicate_interval, 'iteration')))
trainer.extend(aggregated_rank_checker)
trainer.run()
| 4,968
| 36.08209
| 77
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/extensions_tests/test_multi_node_snapshot.py
|
import mock
import tempfile
import pytest
import chainer
import chainer.links as L
import chainer.functions as F
from chainer.training import extensions
from chainer.training import StandardUpdater
from chainer.training import Trainer
import chainermn
from chainermn import create_communicator
from chainermn.extensions import multi_node_snapshot
from chainermn.extensions import _multi_node_snapshot
@pytest.mark.parametrize('rs,size,expected', [
([0], 4, [{0}, {1, 2, 3}]),
([0, 1], 4, [{0}, {1}, {2, 3}]),
([[0, 1], [2, 3]], 4, [{0, 1}, {2, 3}]),
([], 4, [{0, 1, 2, 3}]),
([range(0, 16, 2), range(1, 16, 2)], 16,
[set(range(0, 16, 2)), set(range(1, 16, 2))]),
([range(0, 16, 2)], 16, [set(range(0, 16, 2)), set(range(1, 16, 2))]),
([], 8, [set(range(8))]),
])
def test_parser(rs, size, expected):
sets = _multi_node_snapshot._parse_replica_sets(rs, size)
assert expected == sets
def test_smoke_wrapper():
rs = [[0, 1], ]
comm = create_communicator('naive')
if comm.size < 2:
pytest.skip()
snapshot = extensions.snapshot()
filename = '{}.{}'.format(snapshot.filename, comm.rank)
replica_sets = rs
mn_snapshot = multi_node_snapshot(comm, snapshot, replica_sets)
if comm.rank == 0:
assert mn_snapshot.is_master
assert filename == mn_snapshot.snapshot.filename
elif comm.rank == 1:
assert not mn_snapshot.is_master
elif comm.rank == 2:
assert mn_snapshot.is_master
assert filename == mn_snapshot.snapshot.filename
else:
assert not mn_snapshot.is_master
comm.finalize()
def test_callable_filename():
rs = [[0, 1], ]
comm = create_communicator('naive')
if comm.size < 2:
pytest.skip()
def filename_fun(t):
return 'deadbeef-{.updater.iteration}'.format(t)
snapshot = extensions.snapshot(filename=filename_fun)
trainer = mock.MagicMock()
filename = '{}.{}'.format(filename_fun(trainer), comm.rank)
replica_sets = rs
mn_snapshot = multi_node_snapshot(comm, snapshot, replica_sets)
if comm.rank == 0:
assert mn_snapshot.is_master
assert filename == mn_snapshot.snapshot.filename(trainer)
elif comm.rank == 1:
assert not mn_snapshot.is_master
elif comm.rank == 2:
assert mn_snapshot.is_master
assert filename == mn_snapshot.snapshot.filename(trainer)
else:
assert not mn_snapshot.is_master
comm.finalize()
def test_smoke_multinode_snapshot():
t = mock.MagicMock()
c = mock.MagicMock(side_effect=[True, False])
w = mock.MagicMock()
snapshot = extensions.snapshot(target=t, condition=c, writer=w)
trainer = mock.MagicMock()
comm = create_communicator('naive')
replica_sets = []
mn_snapshot = multi_node_snapshot(comm, snapshot, replica_sets)
mn_snapshot.initialize(trainer)
mn_snapshot(trainer)
mn_snapshot(trainer)
mn_snapshot.finalize()
if comm.rank == 0:
assert mn_snapshot.is_master
assert c.call_count == 2
assert w.call_count == 1
else:
assert not mn_snapshot.is_master
assert c.call_count == 0
assert w.call_count == 0
comm.finalize()
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__()
with self.init_scope():
self.l1 = L.Linear(784, n_units)
self.l2 = L.Linear(n_units, n_units)
self.l3 = L.Linear(n_units, n_out)
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
def _prepare_multinode_snapshot(n, result):
n_units = 100
batchsize = 10
comm = create_communicator('naive')
model = L.Classifier(MLP(n_units, 10))
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.Adam(), comm)
optimizer.setup(model)
if comm.rank == 0:
train, _ = chainer.datasets.get_mnist()
else:
train, _ = None, None
train = chainermn.scatter_dataset(train, comm, shuffle=True)
train_iter = chainer.iterators.SerialIterator(train, batchsize)
updater = StandardUpdater(train_iter, optimizer)
trainer = Trainer(updater, out=result)
snapshot = extensions.snapshot(target=updater, autoload=True)
replica_sets = []
mn_snapshot = multi_node_snapshot(comm, snapshot, replica_sets)
mn_snapshot.initialize(trainer)
for _ in range(n):
updater.update()
return updater, mn_snapshot, trainer
def test_multinode_autoload():
n = 3
with tempfile.TemporaryDirectory() as tempd:
result = tempd
updater0, snapshot, trainer0 = _prepare_multinode_snapshot(n, result)
assert n == updater0.iteration
snapshot(trainer0)
updater, _, _ = _prepare_multinode_snapshot(0, result)
assert n == updater.iteration
| 4,902
| 27.505814
| 77
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/extensions_tests/test_multi_node_early_stopping_trigger.py
|
from __future__ import division
import unittest
import numpy as np
import chainer
from chainer import testing
import chainerx
import chainermn
import chainermn.testing
from chainermn.extensions import MultiNodeEarlyStoppingTrigger
from chainer.training import util
from chainer.backend import cuda
def _test_trigger(self, trigger, key, accuracies, expected):
trainer = testing.training.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=2)
for accuracy, expected in zip(accuracies, expected):
trainer.updater.update()
trainer.observation = {key: accuracy}
self.assertEqual(trigger(trainer), expected)
class TestMultiNodeEarlyStoppingTrigger(unittest.TestCase):
def test_early_stopping_trigger_with_accuracy_cpu(self):
self.communicator = chainermn.create_communicator('naive')
self.xp = np
self.run_test_early_stopping_trigger_with_accuracy()
def test_early_stopping_trigger_with_accuracy_cpu_chx(self):
self.communicator = chainermn.create_communicator('naive')
self.xp = chainerx
self.run_test_early_stopping_trigger_with_accuracy()
@chainer.testing.attr.gpu
def test_early_stopping_trigger_with_accuracy_gpu(self):
self.communicator = chainermn.create_communicator('pure_nccl')
self.xp = cuda.cupy
cuda.Device(self.communicator.intra_rank).use()
self.run_test_early_stopping_trigger_with_accuracy()
@chainer.testing.attr.gpu
def test_early_stopping_trigger_with_accuracy_gpu_chx(self):
self.communicator = chainermn.create_communicator('pure_nccl')
self.xp = chainerx
chainermn.testing.get_device(self.communicator.intra_rank, True).use()
with chainerx.using_device("cuda", self.communicator.intra_rank):
self.run_test_early_stopping_trigger_with_accuracy()
def run_test_early_stopping_trigger_with_accuracy(self):
comm = self.communicator
key = 'main/accuracy'
trigger = MultiNodeEarlyStoppingTrigger(comm, monitor=key, patience=3,
check_trigger=(1, 'epoch'),
verbose=False)
trigger = util.get_trigger(trigger)
accuracies = [0.5, 0.5, 0.5, 0.5, 0.6, 0.6, 0.7,
0.7, 0.6, 0.6, 0.4, 0.4, 0.3, 0.3, 0.2, 0.2]
accuracies = [x * (1 - comm.rank / comm.size) for x in accuracies]
accuracies = [
chainer.Variable(self.xp.asarray(acc, dtype=np.float32))
for acc in accuracies]
expected = [False, False, False, False, False, False,
False, False, False, False, False, False,
False, True, False, True]
_test_trigger(self, trigger, key, accuracies, expected)
| 2,824
| 37.69863
| 78
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/datasets_tests/test_empty_dataset.py
|
import unittest
import numpy as np
from chainermn.datasets import create_empty_dataset
import chainerx as chx
class TestEmptyDataset(unittest.TestCase):
def setUp(self):
pass
def check_create_empty_dataset(self, original_dataset):
empty_dataset = create_empty_dataset(original_dataset)
self.assertEqual(len(original_dataset), len(empty_dataset))
for i in range(len(original_dataset)):
self.assertEqual((), empty_dataset[i])
def test_empty_dataset_numpy(self):
self.check_empty_dataset(np)
def test_empty_dataset_chx(self):
self.check_empty_dataset(chx)
def check_empty_dataset(self, xp):
n = 10
self.check_create_empty_dataset([])
self.check_create_empty_dataset([0])
self.check_create_empty_dataset(list(range(n)))
self.check_create_empty_dataset(list(range(n * 5 - 1)))
self.check_create_empty_dataset(xp.array([]))
self.check_create_empty_dataset(xp.array([0]))
self.check_create_empty_dataset(xp.arange(n))
self.check_create_empty_dataset(xp.arange(n * 5 - 1))
| 1,128
| 28.710526
| 67
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/datasets_tests/test_scatter.py
|
from __future__ import with_statement
import itertools
import unittest
import mpi4py.MPI
import numpy as np
import pytest
from chainer import testing
import chainermn
from chainermn.communicators.flat_communicator import FlatCommunicator
from chainermn.communicators.naive_communicator import NaiveCommunicator
import chainerx as chx
class TestDataset(unittest.TestCase):
def setUp(self):
self.mpi_comm = mpi4py.MPI.COMM_WORLD
self.communicator = NaiveCommunicator(self.mpi_comm)
def check_scatter_dataset(self, original_dataset, shuffle=False, root=0):
if self.communicator.rank != root:
original_dataset = None
my_dataset = chainermn.scatter_dataset(
original_dataset, self.communicator,
shuffle=shuffle, root=root)
sub_datasets = self.communicator.gather_obj(my_dataset, root=root)
if self.communicator.rank == root:
# Test the sizes
sub_sizes = [len(sub_dataset) for sub_dataset in sub_datasets]
self.assertEqual(len(set(sub_sizes)), 1)
sub_size = sub_sizes[0]
self.assertLessEqual(
len(original_dataset), sub_size * self.mpi_comm.size)
self.assertGreater(
len(original_dataset), (sub_size - 1) * self.mpi_comm.size)
# Test the content of scattered datasets
joined_dataset = sum((sub_dataset[:]
for sub_dataset in sub_datasets), [])
# NOTE: The values in `original_dataset` and
# `joined_dataset` must be casted to int to compare.
# There are 2 backgrounds on this issue.
#
# (1) numpy and cupy/chainerx have different behaviours on
# 1-element array. Numpy implicitly converts a 1-element array to
# a scalar value.
# type(numpy.array([1])[0])
# => <class 'numpy.int64'> # Scalar
# type(chainerx.array([1])[0])
# => <class 'chainerx.ndarray'> # array of one element
#
# (2) Two different ChainerX arrays are never identical in the
# context of `set()`.
# set([chainerx.array([0]), chainerx.array([0])])
# => {array([0], shape=(1,), dtype=int64, device='native:0'),
# array([0], shape=(1,), dtype=int64, device='native:0')}
joined_dataset = [int(e) for e in joined_dataset]
original_dataset = [int(e) for e in original_dataset]
self.assertEqual(set(joined_dataset), set(original_dataset))
def test_scatter_dataset(self):
n = self.communicator.size
for shuffle in [True, False]:
for root in range(self.communicator.size):
self.check_scatter_dataset([], shuffle, root)
self.check_scatter_dataset([0], shuffle, root)
self.check_scatter_dataset(list(range(n)), shuffle, root)
self.check_scatter_dataset(list(range(n * 5 - 1)),
shuffle, root)
self.check_scatter_dataset(np.array([]), shuffle, root)
self.check_scatter_dataset(np.array([0]), shuffle, root)
self.check_scatter_dataset(np.arange(n), shuffle, root)
self.check_scatter_dataset(np.arange(n * 5 - 1), shuffle, root)
self.check_scatter_dataset(chx.array([]), shuffle, root)
self.check_scatter_dataset(chx.array([0]), shuffle, root)
self.check_scatter_dataset(chx.arange(n), shuffle, root)
self.check_scatter_dataset(
chx.arange(n * 5 - 1), shuffle, root)
def scatter_large_data(communicator):
data = []
if communicator.rank == 0:
data = ['test'] * 2000000000
data = chainermn.scatter_dataset(data, communicator)
assert len(data) > 0
@testing.attr.slow
def test_scatter_large_dataset_naive():
mpi_comm = mpi4py.MPI.COMM_WORLD
communicator = NaiveCommunicator(mpi_comm)
# This test only runs when comm.size >= 2.
if communicator.size == 1:
pytest.skip('This test is for multinode')
scatter_large_data(communicator)
@testing.attr.gpu
@testing.attr.slow
def test_scatter_large_dataset_flat():
mpi_comm = mpi4py.MPI.COMM_WORLD
communicator = FlatCommunicator(mpi_comm)
# This test only runs when comm.size >= 2.
if communicator.size == 1:
pytest.skip('This test is for multinode')
scatter_large_data(communicator)
def test_scatter_index_one():
it = chainermn.datasets.scatter._scatter_index(10, 3, False)
split = [(0, 0, 4), (1, 4, 7), (2, 7, 10)]
for lhs, rhs in zip(split, it):
assert lhs == rhs
@pytest.mark.parametrize('combination', [
[10, 3], [1244, 23], [2, 1], [230945, 237]])
def test_scatter_index(combination):
length, size = combination
it = chainermn.datasets.scatter._scatter_index(length, size, False)
union = set()
total = []
subsets = []
for (_, b, e) in it:
subset = list(range(b, e))
subsets.append(subset)
total.extend(subset)
for x in subset:
union.add(x)
assert length == len(total) # no duplication
assert length == len(union) # no duplication & no lacking
for lhs, rhs in itertools.combinations(subsets, 2):
set(lhs).isdisjoint(set(rhs))
assert abs(len(lhs) - len(rhs)) <= 1
| 5,470
| 35.966216
| 79
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/datasets_tests/test_mnist.py
|
# coding: utf-8
import os
import pytest
import sys
import tempfile
import warnings
import chainer
import chainer.functions as F
import chainer.links as L
import chainer.testing
from chainer import training
from chainer.training import extensions
import chainermn
from chainermn.testing import get_device
from chainermn.extensions.checkpoint import create_multi_node_checkpointer
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__()
with self.init_scope():
self.l1 = L.Linear(784, n_units)
self.l2 = L.Linear(n_units, n_units)
self.l3 = L.Linear(n_units, n_out)
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
def check_mnist(use_gpu, use_chx, display_log=True):
epoch = 5
batchsize = 100
n_units = 100
warnings.filterwarnings(action='always', category=DeprecationWarning)
model = L.Classifier(MLP(n_units, 10))
comm = chainermn.create_communicator('naive')
if use_gpu:
# Call CuPy's `Device.use()` to force cudaSetDevice()
chainer.cuda.get_device_from_id(comm.intra_rank).use()
device = get_device(comm.intra_rank if use_gpu else None, use_chx)
model.to_device(device)
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.Adam(), comm)
optimizer.setup(model)
if comm.rank == 0:
train, test = chainer.datasets.get_mnist()
else:
train, test = None, None
train = chainermn.scatter_dataset(train, comm, shuffle=True)
test = chainermn.scatter_dataset(test, comm, shuffle=True)
train_iter = chainer.iterators.SerialIterator(train, batchsize)
test_iter = chainer.iterators.SerialIterator(test, batchsize,
repeat=False,
shuffle=False)
updater = training.StandardUpdater(
train_iter,
optimizer,
device=device
)
trainer = training.Trainer(updater, (epoch, 'epoch'))
# Wrap standard Chainer evaluators by MultiNodeEvaluator.
evaluator = extensions.Evaluator(test_iter, model, device=device)
evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
trainer.extend(evaluator)
# Add checkpointer. This is just to check checkpointing runs
# without errors
path = tempfile.mkdtemp(dir='/tmp', prefix=__name__ + '-tmp-')
checkpointer = create_multi_node_checkpointer(name=__name__, comm=comm,
path=path)
trainer.extend(checkpointer, trigger=(1, 'epoch'))
# Some display and output extensions are necessary only for one worker.
# (Otherwise, there would just be repeated outputs.)
if comm.rank == 0 and display_log:
trainer.extend(extensions.LogReport(trigger=(1, 'epoch')),
trigger=(1, 'epoch'))
trainer.extend(extensions.PrintReport(['epoch',
'main/loss',
'validation/main/loss',
'main/accuracy',
'validation/main/accuracy',
'elapsed_time'],
out=sys.stderr),
trigger=(1, 'epoch'))
trainer.run()
err = evaluator()['validation/main/accuracy']
assert err > 0.95
# Check checkpointer successfully finalized snapshot directory
assert [] == os.listdir(path)
os.removedirs(path)
@pytest.mark.parametrize("use_chx", [True, False])
@chainer.testing.attr.slow
def test_mnist(use_chx):
check_mnist(False, use_chx)
@pytest.mark.parametrize("use_chx", [True, False])
@chainer.testing.attr.gpu
def test_mnist_gpu(use_chx):
check_mnist(True, use_chx)
if __name__ == '__main__':
test_mnist()
| 3,997
| 31.241935
| 75
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/testing_tests/test_device.py
|
import pytest
from chainermn import testing
import chainer.testing.attr
# use_chainerx, expected
test_data_cpu = [
(False, "@numpy"),
(True, "native:0"),
]
# gpu_id, use_chainerx, expected
test_data_gpu = [
(0, False, "@cupy:0"),
(1, False, "@cupy:1"),
(0, True, "cuda:0"),
(1, True, "cuda:1"),
]
@pytest.mark.parametrize("use_chainerx,expected", test_data_cpu)
def test_get_device_cpu(use_chainerx, expected):
device = testing.get_device(use_chainerx=use_chainerx)
assert device.name == expected
@chainer.testing.attr.gpu
@pytest.mark.parametrize("gpu_id,use_chainerx,expected", test_data_gpu)
def test_get_device(gpu_id, use_chainerx, expected):
device = testing.get_device(gpu_id, use_chainerx)
assert device.name == expected
| 776
| 23.28125
| 71
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/optimizer_tests/test_multi_node_optimizer.py
|
import chainer
import chainer.testing
import chainer.testing.attr
import chainermn
import chainermn.testing
import mock
import numpy as np
import pytest
class ExampleModel(chainer.Chain):
def __init__(self):
super(ExampleModel, self).__init__()
with self.init_scope():
self.a = chainer.links.Linear(2, 3)
self.b = chainer.links.Linear(3, 4)
self.c = chainer.links.Linear(4, 5)
class TestMultiNodeOptimizer(object):
def setup_cpu(self):
self.comm = chainermn.create_communicator('naive')
self.target = ExampleModel()
self.target.a.W.data[:] = self.comm.rank
self.target.b.W.data[:] = self.comm.rank + 1
self.target.c.W.data[:] = self.comm.rank + 2
self.target.a.W.grad[:] = 0
self.target.b.W.grad[:] = 0
self.target.c.W.grad[:] = 0
self.actual_optimizer = chainer.GradientMethod()
self.actual_optimizer.create_update_rule = mock.MagicMock
def setup_gpu(self, use_chx=False):
self.comm = chainermn.create_communicator('flat')
self.target = ExampleModel()
self.device = chainermn.testing.get_device(self.comm.intra_rank,
use_chx)
chainer.cuda.get_device_from_id(self.comm.intra_rank).use()
self.target.to_device(self.device)
self.target.a.W.data[:] = self.comm.rank
self.target.b.W.data[:] = self.comm.rank + 1
self.target.c.W.data[:] = self.comm.rank + 2
self.target.a.W.grad[:] = 0
self.target.b.W.grad[:] = 0
self.target.c.W.grad[:] = 0
self.actual_optimizer = chainer.GradientMethod()
self.actual_optimizer.create_update_rule = mock.MagicMock
def test_update_with_cpu(self):
self.setup_cpu()
self.optimizer = chainermn.create_multi_node_optimizer(
self.actual_optimizer, self.comm)
opt = self.optimizer.setup(self.target)
assert opt is self.optimizer
self.optimizer.update()
assert self.actual_optimizer.t == 0
self.optimizer.target.a.W.grad[:] = self.comm.rank
self.optimizer.target.b.W.grad[:] = self.comm.rank + 1
self.optimizer.target.c.W.grad[:] = self.comm.rank + 2
self.optimizer.update()
assert self.actual_optimizer.t == 1
self.optimizer.target.a.W.update_rule.update.assert_called_once_with(
self.optimizer.target.a.W)
self.optimizer.target.b.W.update_rule.update.assert_called_once_with(
self.optimizer.target.b.W)
self.optimizer.target.c.W.update_rule.update.assert_called_once_with(
self.optimizer.target.c.W)
base = (self.comm.size - 1.0) / 2
chainer.testing.assert_allclose(self.optimizer.target.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(self.optimizer.target.b.W.grad,
(base + 1) * np.ones((4, 3)))
chainer.testing.assert_allclose(self.optimizer.target.c.W.grad,
(base + 2) * np.ones((5, 4)))
@chainer.testing.attr.gpu
@pytest.mark.parametrize("use_chx", [True, False])
def test_update_with_gpu(self, use_chx):
self.setup_gpu(use_chx)
self.optimizer = chainermn.create_multi_node_optimizer(
self.actual_optimizer, self.comm)
opt = self.optimizer.setup(self.target)
assert opt is self.optimizer
self.optimizer.update()
assert self.actual_optimizer.t == 0
self.optimizer.target.a.W.grad[:] = self.comm.rank
self.optimizer.target.b.W.grad[:] = self.comm.rank + 1
self.optimizer.target.c.W.grad[:] = self.comm.rank + 2
self.optimizer.update()
assert self.actual_optimizer.t == 1
self.optimizer.target.a.W.update_rule.update.assert_called_once_with(
self.optimizer.target.a.W)
self.optimizer.target.b.W.update_rule.update.assert_called_once_with(
self.optimizer.target.b.W)
self.optimizer.target.c.W.update_rule.update.assert_called_once_with(
self.optimizer.target.c.W)
base = (self.comm.size - 1.0) / 2
chainer.testing.assert_allclose(self.optimizer.target.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(self.optimizer.target.b.W.grad,
(base + 1) * np.ones((4, 3)))
chainer.testing.assert_allclose(self.optimizer.target.c.W.grad,
(base + 2) * np.ones((5, 4)))
class DynamicExampleModel(chainer.Chain):
def __init__(self):
super(DynamicExampleModel, self).__init__()
with self.init_scope():
self.a = chainer.links.Linear(2, 3)
self.b = chainer.links.Linear(3, 4)
class TestMultiNodeOptimizerWithDynamicModel(object):
def setup_cpu(self):
self.comm = chainermn.create_communicator('naive')
self.target = DynamicExampleModel()
self.target.a.W.data[:] = self.comm.rank
self.target.b.W.data[:] = self.comm.rank + 1
self.target.a.W.grad[:] = 0
self.target.b.W.grad[:] = 0
self.actual_optimizer = chainer.GradientMethod()
self.actual_optimizer.create_update_rule = mock.MagicMock
def setup_gpu(self, use_chx=False):
self.comm = chainermn.create_communicator('flat')
self.target = DynamicExampleModel()
self.device = chainermn.testing.get_device(self.comm.intra_rank,
use_chx)
chainer.cuda.get_device_from_id(self.comm.intra_rank).use()
self.target.to_device(self.device)
self.target.a.W.data[:] = self.comm.rank
self.target.b.W.data[:] = self.comm.rank + 1
self.target.a.W.grad[:] = 0
self.target.b.W.grad[:] = 0
self.actual_optimizer = chainer.GradientMethod()
self.actual_optimizer.create_update_rule = mock.MagicMock
def test_update_with_cpu(self):
self.setup_cpu()
self.optimizer = chainermn.create_multi_node_optimizer(
self.actual_optimizer, self.comm)
opt = self.optimizer.setup(self.target)
assert opt is self.optimizer
self.optimizer.update()
assert self.actual_optimizer.t == 0
with self.target.init_scope():
self.target.c = chainer.links.Linear(4, 4)
if self.comm.rank == 0:
self.target.c.W.data[:] = self.comm.rank + 2
self.optimizer.setup(self.target)
self.optimizer.update()
assert self.actual_optimizer.t == 0
send_buf = chainer.cuda.to_cpu(self.optimizer.target.c.W.data)
recv_buf = self.comm.mpi_comm.allgather(send_buf)
for i in range(1, self.comm.size):
chainer.testing.assert_allclose(recv_buf[0], recv_buf[i])
self.optimizer.target.a.W.grad[:] = self.comm.rank
self.optimizer.target.b.W.grad[:] = self.comm.rank + 1
self.optimizer.target.c.W.grad[:] = self.comm.rank + 2
self.optimizer.update()
assert self.actual_optimizer.t == 1
self.optimizer.target.a.W.update_rule.update.assert_called_once_with(
self.optimizer.target.a.W)
self.optimizer.target.b.W.update_rule.update.assert_called_once_with(
self.optimizer.target.b.W)
self.optimizer.target.c.W.update_rule.update.assert_called_once_with(
self.optimizer.target.c.W)
base = (self.comm.size - 1.0) / 2
chainer.testing.assert_allclose(self.optimizer.target.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(self.optimizer.target.b.W.grad,
(base + 1) * np.ones((4, 3)))
chainer.testing.assert_allclose(self.optimizer.target.c.W.grad,
(base + 2) * np.ones((4, 4)))
@chainer.testing.attr.gpu
@pytest.mark.parametrize("use_chx", [True, False])
def test_update_with_gpu(self, use_chx):
self.setup_gpu(use_chx)
self.optimizer = chainermn.create_multi_node_optimizer(
self.actual_optimizer, self.comm)
opt = self.optimizer.setup(self.target)
assert opt is self.optimizer
self.optimizer.update()
assert self.actual_optimizer.t == 0
with self.target.init_scope():
c = chainer.links.Linear(4, 4)
c.to_device(self.device)
self.target.c = c
if self.comm.rank == 0:
self.target.c.W.data[:] = self.comm.rank + 2
self.optimizer.setup(self.target)
self.optimizer.update()
assert self.actual_optimizer.t == 0
send_buf = chainer.cuda.to_cpu(self.optimizer.target.c.W.data)
recv_buf = self.comm.mpi_comm.allgather(send_buf)
for i in range(1, self.comm.size):
chainer.testing.assert_allclose(recv_buf[0], recv_buf[i])
self.optimizer.target.a.W.grad[:] = self.comm.rank
self.optimizer.target.b.W.grad[:] = self.comm.rank + 1
self.optimizer.target.c.W.grad[:] = self.comm.rank + 2
self.optimizer.update()
assert self.actual_optimizer.t == 1
self.optimizer.target.a.W.update_rule.update.assert_called_once_with(
self.optimizer.target.a.W)
self.optimizer.target.b.W.update_rule.update.assert_called_once_with(
self.optimizer.target.b.W)
self.optimizer.target.c.W.update_rule.update.assert_called_once_with(
self.optimizer.target.c.W)
base = (self.comm.size - 1.0) / 2
chainer.testing.assert_allclose(self.optimizer.target.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(self.optimizer.target.b.W.grad,
(base + 1) * np.ones((4, 3)))
chainer.testing.assert_allclose(self.optimizer.target.c.W.grad,
(base + 2) * np.ones((4, 4)))
| 10,174
| 42.114407
| 77
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/optimizer_tests/test_double_buffering_optimizer.py
|
import chainer
from chainer.backends.cuda import cupy
import chainer.testing
import chainer.testing.attr
import chainermn
from chainermn import nccl
import mock
import numpy as np
import pytest
import unittest
class ExampleModel(chainer.Chain):
def __init__(self):
super(ExampleModel, self).__init__()
with self.init_scope():
self.a = chainer.links.Linear(2, 3)
self.b = chainer.links.Linear(3, 4)
self.c = chainer.links.Linear(4, 5)
class TestDoubleBufferingOptimizer(unittest.TestCase):
def setup(self, batched_copy):
if nccl.get_build_version() < 2000:
pytest.skip('This test requires NCCL version >= 2.0')
self.comm = chainermn.create_communicator('pure_nccl',
batched_copy=batched_copy)
device = self.comm.intra_rank
chainer.cuda.get_device_from_id(device).use()
self.target = ExampleModel()
self.target.to_device(cupy.cuda.Device())
self.target.a.W.data[:] = self.comm.rank
self.target.b.W.data[:] = self.comm.rank + 1
self.target.c.W.data[:] = self.comm.rank + 2
self.target.a.W.grad[:] = 0
self.target.b.W.grad[:] = 0
self.target.c.W.grad[:] = 0
self.actual_optimizer = chainer.GradientMethod()
self.actual_optimizer.create_update_rule = mock.MagicMock
def check_update(self, batched_copy):
self.setup(batched_copy)
self.optimizer = chainermn.create_multi_node_optimizer(
self.actual_optimizer, self.comm, double_buffering=True)
opt = self.optimizer.setup(self.target)
assert opt is self.optimizer
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 0)
self.optimizer.target.a.W.grad[:] = self.comm.rank
self.optimizer.target.b.W.grad[:] = self.comm.rank + 1
self.optimizer.target.c.W.grad[:] = self.comm.rank + 2
self.optimizer.update()
self.optimizer.wait()
self.assertEqual(self.actual_optimizer.t, 0)
base = (self.comm.size - 1.0) / 2
chainer.testing.assert_allclose(
self.optimizer.communicated_target.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(
self.optimizer.communicated_target.b.W.grad,
(base + 1) * np.ones((4, 3)))
chainer.testing.assert_allclose(
self.optimizer.communicated_target.c.W.grad,
(base + 2) * np.ones((5, 4)))
self.optimizer.target.a.W.grad[:] = self.comm.rank + 3
self.optimizer.target.b.W.grad[:] = self.comm.rank + 4
self.optimizer.target.c.W.grad[:] = self.comm.rank + 5
self.optimizer.update()
self.optimizer.wait()
self.assertEqual(self.actual_optimizer.t, 1)
self.optimizer.target.a.W.update_rule.update.assert_called_once_with(
self.optimizer.target.a.W)
self.optimizer.target.b.W.update_rule.update.assert_called_once_with(
self.optimizer.target.b.W)
self.optimizer.target.c.W.update_rule.update.assert_called_once_with(
self.optimizer.target.c.W)
chainer.testing.assert_allclose(
self.optimizer.communicated_target.a.W.grad,
(base + 3) * np.ones((3, 2)))
chainer.testing.assert_allclose(
self.optimizer.communicated_target.b.W.grad,
(base + 4) * np.ones((4, 3)))
chainer.testing.assert_allclose(
self.optimizer.communicated_target.c.W.grad,
(base + 5) * np.ones((5, 4)))
self.comm.finalize()
@chainer.testing.attr.gpu
def test_update_without_batched_copy(self):
self.check_update(False)
@chainer.testing.attr.gpu
def test_update_with_batched_copy(self):
self.check_update(True)
class DynamicExampleModel(chainer.Chain):
def __init__(self):
super(DynamicExampleModel, self).__init__()
with self.init_scope():
self.a = chainer.links.Linear(2, 3)
self.b = chainer.links.Linear(3, 4)
class TestDoubleBufferingOptimizerWithDynamicModel(unittest.TestCase):
def setup(self, batched_copy):
if nccl.get_build_version() < 2000:
pytest.skip('This test requires NCCL version >= 2.0')
self.comm = chainermn.create_communicator('pure_nccl',
batched_copy=batched_copy)
device = self.comm.intra_rank
chainer.cuda.get_device_from_id(device).use()
self.target = DynamicExampleModel()
self.target.to_device(cupy.cuda.Device())
self.target.a.W.data[:] = self.comm.rank
self.target.b.W.data[:] = self.comm.rank + 1
self.target.a.W.grad[:] = 0
self.target.b.W.grad[:] = 0
self.actual_optimizer = chainer.GradientMethod()
self.actual_optimizer.create_update_rule = mock.MagicMock
def check_update(self, batched_copy):
self.setup(batched_copy)
self.optimizer = chainermn.create_multi_node_optimizer(
self.actual_optimizer, self.comm, double_buffering=True)
opt = self.optimizer.setup(self.target)
assert opt is self.optimizer
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 0)
self.optimizer.target.a.W.grad[:] = self.comm.rank
self.optimizer.target.b.W.grad[:] = self.comm.rank + 1
self.optimizer.update()
self.optimizer.wait()
self.assertEqual(self.actual_optimizer.t, 0)
base = (self.comm.size - 1.0) / 2
chainer.testing.assert_allclose(
self.optimizer.communicated_target.a.W.grad,
(base + 0) * np.ones((3, 2)))
chainer.testing.assert_allclose(
self.optimizer.communicated_target.b.W.grad,
(base + 1) * np.ones((4, 3)))
self.optimizer.target.a.W.grad[:] = self.comm.rank + 3
self.optimizer.target.b.W.grad[:] = self.comm.rank + 4
self.optimizer.update()
self.optimizer.wait()
self.assertEqual(self.actual_optimizer.t, 1)
self.optimizer.target.a.W.update_rule.update.assert_called_once_with(
self.optimizer.target.a.W)
self.optimizer.target.b.W.update_rule.update.assert_called_once_with(
self.optimizer.target.b.W)
chainer.testing.assert_allclose(
self.optimizer.communicated_target.a.W.grad,
(base + 3) * np.ones((3, 2)))
chainer.testing.assert_allclose(
self.optimizer.communicated_target.b.W.grad,
(base + 4) * np.ones((4, 3)))
with self.target.init_scope():
c = chainer.links.Linear(4, 4)
c.to_device(cupy.cuda.Device())
self.target.c = c
if self.comm.rank == 0:
self.target.c.W.data[:] = self.comm.rank + 2
self.optimizer.setup(self.target)
self.optimizer.update()
self.assertEqual(self.actual_optimizer.t, 0)
send_buf = chainer.cuda.to_cpu(self.optimizer.target.c.W.data)
recv_buf = self.comm.mpi_comm.allgather(send_buf)
for i in range(1, self.comm.size):
chainer.testing.assert_allclose(recv_buf[0], recv_buf[i])
self.optimizer.target.a.W.grad[:] = self.comm.rank + 6
self.optimizer.target.b.W.grad[:] = self.comm.rank + 7
self.optimizer.target.c.W.grad[:] = self.comm.rank + 8
self.optimizer.update()
self.optimizer.wait()
self.assertEqual(self.actual_optimizer.t, 0)
base = (self.comm.size - 1.0) / 2
chainer.testing.assert_allclose(
self.optimizer.communicated_target.a.W.grad,
(base + 6) * np.ones((3, 2)))
chainer.testing.assert_allclose(
self.optimizer.communicated_target.b.W.grad,
(base + 7) * np.ones((4, 3)))
chainer.testing.assert_allclose(
self.optimizer.communicated_target.c.W.grad,
(base + 8) * np.ones((4, 4)))
self.optimizer.target.a.W.grad[:] = self.comm.rank + 9
self.optimizer.target.b.W.grad[:] = self.comm.rank + 10
self.optimizer.target.c.W.grad[:] = self.comm.rank + 11
self.optimizer.update()
self.optimizer.wait()
self.assertEqual(self.actual_optimizer.t, 1)
self.optimizer.target.a.W.update_rule.update.assert_called_once_with(
self.optimizer.target.a.W)
self.optimizer.target.b.W.update_rule.update.assert_called_once_with(
self.optimizer.target.b.W)
self.optimizer.target.c.W.update_rule.update.assert_called_once_with(
self.optimizer.target.c.W)
chainer.testing.assert_allclose(
self.optimizer.communicated_target.a.W.grad,
(base + 9) * np.ones((3, 2)))
chainer.testing.assert_allclose(
self.optimizer.communicated_target.b.W.grad,
(base + 10) * np.ones((4, 3)))
chainer.testing.assert_allclose(
self.optimizer.communicated_target.c.W.grad,
(base + 11) * np.ones((4, 4)))
self.comm.finalize()
@chainer.testing.attr.gpu
def test_update_without_batched_copy(self):
self.check_update(False)
@chainer.testing.attr.gpu
def test_update_with_batched_copy(self):
self.check_update(True)
| 9,368
| 39.734783
| 77
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/iterators_tests/test_multi_node_iterator.py
|
import chainer
import chainer.testing
import chainer.testing.attr
import chainermn
from chainermn.iterators.multi_node_iterator import _build_ctrl_msg
from chainermn.iterators.multi_node_iterator import _parse_ctrl_msg
import numpy as np
import platform
import pytest
from six.moves import range
import unittest
class DummySerializer(chainer.serializer.Serializer):
def __init__(self, target):
super(DummySerializer, self).__init__()
self.target = target
def __getitem__(self, key):
raise NotImplementedError
def __call__(self, key, value):
self.target[key] = value
return self.target[key]
class DummyDeserializer(chainer.serializer.Deserializer):
def __init__(self, target):
super(DummyDeserializer, self).__init__()
self.target = target
def __getitem__(self, key):
raise NotImplementedError
def __call__(self, key, value):
if value is None:
value = self.target[key]
elif isinstance(value, np.ndarray):
np.copyto(value, self.target[key])
else:
value = type(value)(np.asarray(self.target[key]))
return value
@chainer.testing.parameterize(*chainer.testing.product({
'paired_dataset': [True, False],
'iterator_class': [
chainer.iterators.SerialIterator,
chainer.iterators.MultiprocessIterator
],
}))
class TestMultiNodeIterator(unittest.TestCase):
def setUp(self):
if self.iterator_class == chainer.iterators.MultiprocessIterator and \
int(platform.python_version_tuple()[0]) < 3:
pytest.skip('This test requires Python version >= 3')
self.communicator = chainermn.create_communicator('naive')
if self.communicator.size < 2:
pytest.skip('This test is for multinode only')
self.N = 100
if self.paired_dataset:
self.dataset = list(zip(
np.arange(self.N).astype(np.float32),
np.arange(self.N).astype(np.float32)))
else:
self.dataset = np.arange(self.N).astype(np.float32)
def test_mn_iterator(self):
# Datasize is a multiple of batchsize.
bs = 4
iterator = chainermn.iterators.create_multi_node_iterator(
self.iterator_class(
self.dataset, batch_size=bs, shuffle=True),
self.communicator)
for e in range(3):
for i in range(100):
batch = iterator.next()
if self.communicator.rank == 0:
for rank_from in range(1, self.communicator.size):
_batch = self.communicator.mpi_comm.recv(
source=rank_from)
self.assertEqual(batch, _batch)
else:
self.communicator.mpi_comm.ssend(batch, dest=0)
def test_mn_iterator_frag(self):
# Batasize is not a multiple of batchsize.
bs = 7
iterator = chainermn.iterators.create_multi_node_iterator(
self.iterator_class(
self.dataset, batch_size=bs, shuffle=True),
self.communicator)
for e in range(3):
for i in range(100):
batch = iterator.next()
if self.communicator.rank == 0:
for rank_from in range(1, self.communicator.size):
_batch = self.communicator.mpi_comm.recv(
source=rank_from)
self.assertEqual(batch, _batch)
else:
self.communicator.mpi_comm.ssend(batch, dest=0)
def test_mn_iterator_change_master(self):
# Check if it works under rank_master != 0.
rank_master = 1
bs = 4
iterator = chainermn.iterators.create_multi_node_iterator(
self.iterator_class(
self.dataset, batch_size=bs, shuffle=True),
self.communicator, rank_master)
for e in range(3):
for i in range(100):
batch = iterator.next()
if self.communicator.rank == rank_master:
rank_slaves = [i for i in range(self.communicator.size)
if i != rank_master]
for rank_from in rank_slaves:
_batch = self.communicator.mpi_comm.recv(
source=rank_from)
self.assertEqual(batch, _batch)
else:
self.communicator.mpi_comm.ssend(batch, dest=rank_master)
def test_mn_iterator_no_repeat(self):
# Do not repeat iterator to test if we can catch StopIteration.
bs = 4
iterator = chainermn.iterators.create_multi_node_iterator(
self.iterator_class(
self.dataset, batch_size=bs, shuffle=True, repeat=False),
self.communicator)
for e in range(3):
try:
while True:
batch = iterator.next()
if self.communicator.rank == 0:
for rank_from in range(1, self.communicator.size):
_batch = self.communicator.mpi_comm.recv(
source=rank_from)
self.assertEqual(batch, _batch)
else:
self.communicator.mpi_comm.ssend(batch, dest=0)
except StopIteration:
continue
def test_overwrite_order(self):
"""Tests behavior on serialization.
This test confirms that master's batch order can be overwritten,
while slave's batch order cannot be overwritten, since slave must
always distribute the completely same batch as master.
"""
bs = 4
rank_master = 0
iterator = chainermn.iterators.create_multi_node_iterator(
self.iterator_class(
self.dataset, batch_size=bs, shuffle=True, repeat=False),
self.communicator,
rank_master=rank_master)
target = dict()
iterator.serialize(DummySerializer(target))
order = target['order']
new_order = np.roll(order, 1)
target['order'] = new_order
iterator.serialize(DummyDeserializer(target))
if self.communicator.rank == rank_master:
self.assertEqual(iterator._state.order.tolist(),
new_order.tolist())
else:
self.assertEqual(iterator._order.tolist(), order.tolist())
class TestMultiNodeIteratorDataType(unittest.TestCase):
def setUp(self):
self.communicator = chainermn.create_communicator('naive')
if self.communicator.size < 2:
pytest.skip('This test is for multinode only')
def test_invalid_type(self):
self.N = 10
self.dataset = ['test']*self.N
bs = 1
iterator = chainermn.iterators.create_multi_node_iterator(
chainer.iterators.SerialIterator(
self.dataset, batch_size=bs, shuffle=True),
self.communicator)
with self.assertRaises(TypeError):
iterator.next()
class TestCntrlMessageConversion(unittest.TestCase):
def test_conversion(self):
stop = True
is_valid_data_type = True
is_paired_dataset = True
is_new_epoch = True
current_position = 0
msg = _build_ctrl_msg(stop, is_valid_data_type, is_paired_dataset,
is_new_epoch, current_position)
np.testing.assert_array_equal(msg,
_build_ctrl_msg(*_parse_ctrl_msg(msg)))
| 7,707
| 34.196347
| 78
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/iterators_tests/test_synchronized_iterator.py
|
import chainer
import chainer.testing
import chainermn
import numpy as np
import pytest
import unittest
class TestSynchronizedIterator(unittest.TestCase):
def setUp(self):
self.communicator = chainermn.create_communicator('naive')
if self.communicator.size < 2:
pytest.skip("This test is for multinode only")
N = 100
self.dataset = np.arange(N).astype(np.float32)
def test_sync(self):
# test the case when datasize is a multiple of batchsize
iterator = chainermn.iterators.create_synchronized_iterator(
chainer.iterators.SerialIterator(
self.dataset, batch_size=4, shuffle=True),
self.communicator)
for e in range(3):
self.assertEqual(e, iterator.epoch)
while True:
batch = np.array(iterator.next(), dtype=np.float32)
if self.communicator.rank == 0:
for rank_from in range(1, self.communicator.size):
_batch = self.communicator.recv(rank_from, tag=0)
chainer.testing.assert_allclose(batch, _batch)
else:
self.communicator.send(batch, dest=0, tag=0)
if iterator.is_new_epoch:
break
def test_sync_frag(self):
# test the case when datasize is not a multiple of batchsize
iterator = chainermn.iterators.create_synchronized_iterator(
chainer.iterators.SerialIterator(
self.dataset, batch_size=7, shuffle=True),
self.communicator)
for e in range(3):
self.assertEqual(e, iterator.epoch)
while True:
batch = np.array(iterator.next(), dtype=np.float32)
if self.communicator.rank == 0:
for rank_from in range(1, self.communicator.size):
_batch = self.communicator.recv(rank_from, tag=0)
chainer.testing.assert_allclose(batch, _batch)
else:
self.communicator.send(batch, dest=0, tag=0)
if iterator.is_new_epoch:
break
def test_sync_no_repeat(self):
iterator = chainermn.iterators.create_synchronized_iterator(
chainer.iterators.SerialIterator(
self.dataset, batch_size=4, shuffle=True, repeat=False),
self.communicator)
for e in range(3):
try:
while True:
batch = np.array(iterator.next(), dtype=np.float32)
if self.communicator.rank == 0:
for rank_from in range(1, self.communicator.size):
_batch = self.communicator.recv(rank_from, tag=0)
chainer.testing.assert_allclose(batch, _batch)
else:
self.communicator.send(batch, dest=0, tag=0)
except StopIteration:
iterator.reset()
def test_sync_no_repeat_frag(self):
iterator = chainermn.iterators.create_synchronized_iterator(
chainer.iterators.SerialIterator(
self.dataset, batch_size=7, shuffle=True, repeat=False),
self.communicator)
for e in range(3):
try:
while True:
batch = np.array(iterator.next(), dtype=np.float32)
if self.communicator.rank == 0:
for rank_from in range(1, self.communicator.size):
_batch = self.communicator.recv(rank_from, tag=0)
chainer.testing.assert_allclose(batch, _batch)
else:
self.communicator.send(batch, dest=0, tag=0)
except StopIteration:
iterator.reset()
| 3,885
| 37.475248
| 77
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/iterators_tests/test_iterator_compatibility.py
|
# This test is based on Chainer's iterator compatibility test.
# The major changed point is that we do not test
# the order SerialIterator -> MultiNodeIterator,
# because slave iterator must synchronize the batch order with master
# thus should not accept overwriting the batch order by serialization.
# See: chainer/tests/chainer_tests/
# iterators_tests/test_iterator_compatibility.py (7e8f6cc)
import numpy
import platform
import pytest
import unittest
import chainer
import chainer.testing
import chainermn
class DummySerializer(chainer.serializer.Serializer):
def __init__(self, target):
super(DummySerializer, self).__init__()
self.target = target
def __getitem__(self, key):
raise NotImplementedError
def __call__(self, key, value):
self.target[key] = value
return self.target[key]
class DummyDeserializer(chainer.serializer.Deserializer):
def __init__(self, target):
super(DummyDeserializer, self).__init__()
self.target = target
def __getitem__(self, key):
raise NotImplementedError
def __call__(self, key, value):
if value is None:
value = self.target[key]
elif isinstance(value, numpy.ndarray):
numpy.copyto(value, self.target[key])
else:
value = type(value)(numpy.asarray(self.target[key]))
return value
@chainer.testing.parameterize(*chainer.testing.product({
'iterator_class': [
chainer.iterators.SerialIterator,
chainer.iterators.MultiprocessIterator,
],
}))
class TestIteratorCompatibility(unittest.TestCase):
def setUp(self):
if self.iterator_class == chainer.iterators.MultiprocessIterator and \
int(platform.python_version_tuple()[0]) < 3:
pytest.skip('This test requires Python version >= 3')
self.communicator = chainermn.create_communicator('naive')
if self.communicator.size < 2:
pytest.skip('This test is for multinode only')
self.N = 6
self.dataset = numpy.arange(self.N).astype(numpy.float32)
self.bs = 2
def test_multi_node_iterator_compatibility(self):
iters = (
lambda: chainermn.iterators.create_multi_node_iterator(
self.iterator_class(
self.dataset, batch_size=self.bs),
self.communicator),
lambda: self.iterator_class(
self.dataset, batch_size=self.bs),
)
bs_n_ratio = 1. * self.bs / self.N
it_before, it_after = iters
it = it_before()
self.assertEqual(it.epoch, 0)
self.assertAlmostEqual(it.epoch_detail, 0 * bs_n_ratio)
batch1 = it.next()
self.assertEqual(len(batch1), self.bs)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 1 * bs_n_ratio)
batch2 = it.next()
self.assertEqual(len(batch2), self.bs)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 2 * bs_n_ratio)
target = dict()
it.serialize(DummySerializer(target))
it = it_after()
it.serialize(DummyDeserializer(target))
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 2 * bs_n_ratio)
batch3 = it.next()
self.assertEqual(len(batch3), self.bs)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertEqual(
sorted(batch1 + batch2 + batch3),
self.dataset.tolist())
self.assertAlmostEqual(it.epoch_detail, 3 * bs_n_ratio)
def test_synchronized_iterator_compatibility(self):
"""
Do not use `chainer.testing.parameterize` to share the code with
`test_multi_node_iterator_compatibility` because pytest cannot
guarantee the execution order of tests produced by `parameterize`,
which causes unexpected behaviors with MPI programs.
"""
iters = (
lambda: chainermn.iterators.create_synchronized_iterator(
self.iterator_class(
self.dataset, batch_size=self.bs),
self.communicator),
lambda: self.iterator_class(
self.dataset, batch_size=self.bs),
)
bs_n_ratio = 1. * self.bs / self.N
it_before, it_after = iters
it = it_before()
self.assertEqual(it.epoch, 0)
self.assertAlmostEqual(it.epoch_detail, 0 * bs_n_ratio)
batch1 = it.next()
self.assertEqual(len(batch1), self.bs)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 1 * bs_n_ratio)
batch2 = it.next()
self.assertEqual(len(batch2), self.bs)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 2 * bs_n_ratio)
target = dict()
it.serialize(DummySerializer(target))
it = it_after()
it.serialize(DummyDeserializer(target))
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 2 * bs_n_ratio)
batch3 = it.next()
self.assertEqual(len(batch3), self.bs)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertEqual(
sorted(batch1 + batch2 + batch3),
self.dataset.tolist())
self.assertAlmostEqual(it.epoch_detail, 3 * bs_n_ratio)
| 5,655
| 32.270588
| 78
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/links_tests/test_n_step_rnn.py
|
import chainer
import chainer.backends
from chainer.backends.cuda import cupy
import chainer.functions as F
import chainer.links as L
import chainer.testing
import chainer.testing.attr
import chainermn
import numpy as np
import pytest
class Model(chainer.Chain):
def __init__(self, n_vocab, n_hid, communicator, rank_next, rank_prev):
n_layers = 1
n_rnn_hid = 10
super(Model, self).__init__()
with self.init_scope():
self.l1 = L.EmbedID(n_vocab, n_rnn_hid, ignore_label=-1)
self.rnn = chainermn.links.create_multi_node_n_step_rnn(
L.NStepLSTM(
n_layers=n_layers, in_size=n_rnn_hid, out_size=n_rnn_hid,
dropout=0.1),
communicator, rank_in=rank_prev, rank_out=rank_next,
)
self.l2 = L.Linear(n_rnn_hid, n_hid)
self.l3 = L.Linear(n_hid, 1)
def __call__(self, xs, ts):
h1 = [self.l1(x) for x in xs]
# MultiNodeNStepRNN returns outputs of actual_rnn + delegate_variable.
cell1, cell2, os, delegate_variable = self.rnn(h1)
os = F.concat(os, axis=0)
h2 = self.l2(os)
h3 = self.l3(h2)
ys = F.sum(h3, axis=0)
err = F.mean_squared_error(ys, ts)
err, = chainermn.functions.pseudo_connect(delegate_variable, err)
return err
def setup_communicator(gpu):
if gpu:
communicator = chainermn.create_communicator('flat')
chainer.backends.cuda.get_device_from_id(
communicator.intra_rank).use()
else:
communicator = chainermn.create_communicator('naive')
if communicator.size < 2:
pytest.skip('This test is for multinode only')
rank_next = communicator.rank + 1
rank_prev = communicator.rank - 1
if rank_prev < 0:
rank_prev = None
if rank_next >= communicator.size:
rank_next = None
return communicator, rank_prev, rank_next
def check_homogeneous_rnn(gpu, dtype):
communicator, rank_prev, rank_next = setup_communicator(gpu=gpu)
n, n_vocab, l = 100, 8, 10
# Number of model parameters are same among processes.
n_hid = 2
with chainer.using_config('dtype', dtype):
X = [np.random.randint(
0, n_vocab, size=np.random.randint(l // 2, l + 1),
dtype=np.int32)
for _ in range(n)]
Y = (np.random.rand(n) * 2).astype(dtype)
model = Model(
n_vocab, n_hid, communicator, rank_next,
rank_prev)
if gpu:
model.to_device(cupy.cuda.Device())
X = [chainer.backends.cuda.to_gpu(x) for x in X]
Y = chainer.backends.cuda.to_gpu(Y)
for i in range(n):
err = model(X[i:i + 1], Y[i:i + 1])
err.backward()
# Check if backprop finishes without deadlock.
assert True
@pytest.mark.parametrize('dtype', [np.float16, np.float32])
def test_homogeneous_rnn_cpu(dtype):
check_homogeneous_rnn(False, dtype)
@chainer.testing.attr.gpu
@pytest.mark.parametrize('dtype', [np.float16, np.float32])
def test_homogeneous_rnn_gpu(dtype):
check_homogeneous_rnn(True, dtype)
def check_heterogeneous_rnn(gpu, dtype):
communicator, rank_prev, rank_next = setup_communicator(gpu)
with chainer.using_config('dtype', dtype):
n, n_vocab, l = 100, 8, 10
# Number of model parameters are different among processes.
n_hid = (communicator.rank + 1) * 10
X = [np.random.randint(
0, n_vocab, size=np.random.randint(l // 2, l + 1),
dtype=np.int32)
for _ in range(n)]
Y = (np.random.rand(n) * 2).astype(dtype)
model = Model(
n_vocab, n_hid, communicator, rank_next,
rank_prev)
if gpu:
model.to_device(cupy.cuda.Device())
X = [chainer.backends.cuda.to_gpu(x) for x in X]
Y = chainer.backends.cuda.to_gpu(Y)
for i in range(n):
err = model(X[i:i + 1], Y[i:i + 1])
err.backward()
# Check if backprop finishes without deadlock.
assert True
@pytest.mark.parametrize('dtype', [np.float16, np.float32])
def test_heterogeneous_rnn_cpu(dtype):
check_heterogeneous_rnn(False, dtype)
@chainer.testing.attr.gpu
@pytest.mark.parametrize('dtype', [np.float16, np.float32])
def test_heterogeneous_rnn_gpu(dtype):
check_heterogeneous_rnn(True, dtype)
| 4,452
| 29.923611
| 78
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/links_tests/test_create_mnbn_model.py
|
import unittest
import chainer
import chainer.testing
import chainer.testing.attr
import chainermn
from chainermn.testing.device import get_device
class BnChain(chainer.Chain):
def __init__(self, size):
super(BnChain, self).__init__()
with self.init_scope():
self.conv = chainer.links.Convolution2D(
None, size, 1, 1, 1, nobias=True)
self.bn = chainer.links.BatchNormalization(size)
def forward(self, x):
return chainer.functions.relu(self.bn(self.conv(x)))
class BnChainList(chainer.ChainList):
def __init__(self, size):
super(BnChainList, self).__init__(
chainer.links.Convolution2D(
None, size, 1, 1, 1, nobias=True),
chainer.links.BatchNormalization(size),
)
def forward(self, x):
return chainer.functions.relu(self[1](self[0](x)))
class TestCreateMnBnModel(unittest.TestCase):
def setUp(self):
self.communicator = chainermn.create_communicator('naive')
# Use CuPy's Device class to foce call cudaSetDevice()
if chainer.backends.cuda.available:
chainer.cuda.get_device_from_id(self.communicator.intra_rank).use()
def check_create_mnbn_model_chain(self, use_gpu, use_chx):
model = BnChain(3)
mnbn_model = chainermn.links.create_mnbn_model(model,
self.communicator)
self.assertTrue(isinstance(mnbn_model.conv,
chainer.links.Convolution2D))
self.assertTrue(
isinstance(mnbn_model.bn,
chainermn.links.MultiNodeBatchNormalization))
device = get_device(self.communicator.intra_rank if use_gpu else None,
use_chx)
mnbn_model.to_device(device)
with chainer.using_device(mnbn_model.device):
x = mnbn_model.xp.zeros((1, 1, 1, 1))
mnbn_model(x)
def check_create_mnbn_model_chain_list(self, use_gpu, use_chx):
model = BnChainList(3)
mnbn_model = chainermn.links.create_mnbn_model(model,
self.communicator)
self.assertTrue(isinstance(mnbn_model[0],
chainer.links.Convolution2D))
self.assertTrue(
isinstance(mnbn_model[1],
chainermn.links.MultiNodeBatchNormalization))
device = get_device(self.communicator.intra_rank if use_gpu else None,
use_chx)
mnbn_model.to_device(device)
with chainer.using_device(mnbn_model.device):
x = mnbn_model.xp.zeros((1, 1, 1, 1))
mnbn_model(x)
def check_create_mnbn_model_sequential(self, use_gpu, use_chx):
size = 3
model = chainer.Sequential(
chainer.links.Convolution2D(
None, size, 1, 1, 1, nobias=True),
chainer.links.BatchNormalization(size),
chainer.functions.relu
)
mnbn_model = chainermn.links.create_mnbn_model(model,
self.communicator)
device = get_device(self.communicator.intra_rank if use_gpu else None,
use_chx)
mnbn_model.to_device(device)
with chainer.using_device(mnbn_model.device):
x = mnbn_model.xp.zeros((1, 1, 1, 1))
mnbn_model(x)
def test_create_mnbn_model_chain_cpu(self):
self.check_create_mnbn_model_chain(use_gpu=False, use_chx=False)
self.check_create_mnbn_model_chain(use_gpu=False, use_chx=True)
def test_create_mnbn_model_chain_list_cpu(self):
self.check_create_mnbn_model_chain_list(use_gpu=False, use_chx=False)
self.check_create_mnbn_model_chain_list(use_gpu=False, use_chx=True)
def test_create_mnbn_model_sequential_cpu(self):
self.check_create_mnbn_model_sequential(use_gpu=False, use_chx=False)
self.check_create_mnbn_model_sequential(use_gpu=False, use_chx=True)
@chainer.testing.attr.gpu
def test_create_mnbn_model_chain_gpu(self):
self.check_create_mnbn_model_chain(use_gpu=True, use_chx=False)
self.check_create_mnbn_model_chain(use_gpu=True, use_chx=True)
@chainer.testing.attr.gpu
def test_create_mnbn_model_chain_list_gpu(self):
self.check_create_mnbn_model_chain_list(use_gpu=True, use_chx=False)
self.check_create_mnbn_model_chain_list(use_gpu=True, use_chx=True)
@chainer.testing.attr.gpu
def test_create_mnbn_model_sequential_gpu(self):
self.check_create_mnbn_model_sequential(use_gpu=True, use_chx=False)
self.check_create_mnbn_model_sequential(use_gpu=True, use_chx=True)
| 4,781
| 37.564516
| 79
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/links_tests/test_batch_normalization.py
|
import chainer
from chainer.backends.cuda import cupy
import chainer.testing
import chainer.utils
import mpi4py.MPI
import numpy
import pytest
from chainermn.communicators.naive_communicator import NaiveCommunicator
from chainermn.communicators.pure_nccl_communicator import PureNcclCommunicator
from chainermn.links import MultiNodeBatchNormalization
from chainermn import nccl
mpi_comm = mpi4py.MPI.COMM_WORLD
class ModelNormalBN(chainer.Chain):
def __init__(self, n_in=3, n_units=3, n_out=2):
super(ModelNormalBN, self).__init__()
with self.init_scope():
self.l1 = chainer.links.Linear(n_in, n_units, nobias=True)
self.bn1 = chainer.links.BatchNormalization(n_units)
self.l2 = chainer.links.Linear(n_in, n_units, nobias=True)
self.bn2 = chainer.links.BatchNormalization(n_units)
self.l3 = chainer.links.Linear(n_in, n_out)
self.train = True
def __call__(self, x):
h = chainer.functions.relu(self.bn1(self.l1(x)))
h = chainer.functions.relu(self.bn2(self.l2(h)))
return self.l3(h)
class ModelDistributedBN(chainer.Chain):
def __init__(self, comm, n_in=3, n_units=3, n_out=2, backend='auto'):
super(ModelDistributedBN, self).__init__()
with self.init_scope():
self.l1 = chainer.links.Linear(n_in, n_units, nobias=True)
self.bn1 = MultiNodeBatchNormalization(
n_units, comm, communication_backend=backend)
self.l2 = chainer.links.Linear(n_in, n_units, nobias=True)
self.bn2 = MultiNodeBatchNormalization(
n_units, comm, communication_backend=backend)
self.l3 = chainer.links.Linear(n_in, n_out)
self.train = True
def __call__(self, x):
h = chainer.functions.relu(self.bn1(self.l1(x)))
h = chainer.functions.relu(self.bn2(self.l2(h)))
return self.l3(h)
def check_multi_node_bn(comm, use_gpu=False, backend='auto',
dtype=numpy.float32):
"""Tests correctness of MultiNodeBatchNormalization.
This test verifies MultiNodeBatchNormalization by comparing
the following four configurations.
(1) Single worker, normal BatchNormalization
(2) Multiple workers, normal BatchNormalization
(3) Single worker, MultiNodeBatchNormalization
(4) Multiple workers, MultiNodeBatchNormalization
Single worker: only using the result of worker 0, which uses the whole
batch.
Multiple workers: Each worker uses the 1/n part of the whole batch,
where n is the number of nodes, and gradient is aggregated.
This test conducts the forward and backward computation once for the
deterministic model parameters and an input batch, and checks the
gradients of parameters.
The purpose of MultiNodeBatchNormalization is to make the results of
(4) to be exactly same as (1). Therefore, the essential part is to
check that the results of (1) and (4) are the same. The results of (3)
should also be also same as them. In contrast, the results of (2) is
not necessarily always same as them, and we can expect that it is
almost always different. Therefore, we also check that the results of
(2) is different from them, to see that this test working correctly.
"""
local_batchsize = 8
global_batchsize = local_batchsize * comm.size
ndim = 3
numpy.random.seed(71)
x = numpy.random.random(
(global_batchsize, ndim)).astype(numpy.float32)
y = numpy.random.randint(
0, 1, size=global_batchsize, dtype=numpy.int32)
x_local = comm.mpi_comm.scatter(
x.reshape(comm.size, local_batchsize, ndim))
y_local = comm.mpi_comm.scatter(
y.reshape(comm.size, local_batchsize))
io_dtype = dtype
l_dtype = dtype
bn_dtype = dtype
if dtype == chainer.mixed16:
io_dtype = numpy.float16
l_dtype = numpy.float16
bn_dtype = numpy.float32
x = x.astype(io_dtype)
x_local = x_local.astype(io_dtype)
if use_gpu:
x = chainer.cuda.to_gpu(x)
y = chainer.cuda.to_gpu(y)
x_local = chainer.cuda.to_gpu(x_local)
y_local = chainer.cuda.to_gpu(y_local)
cls = chainer.links.Classifier
with chainer.using_config('dtype', dtype):
# Single worker
m1 = cls(ModelNormalBN())
# Multi worker, Ghost BN
m2 = cls(ModelNormalBN())
# Single worker, MNBN
m3 = cls(ModelDistributedBN(comm, backend=backend))
# Multi worker, MNBN
m4 = cls(ModelDistributedBN(comm, backend=backend))
# NOTE: m1, m3 and m4 should behave in the same way.
# m2 may be different.
if use_gpu:
device = cupy.cuda.Device()
m1.to_device(device)
m2.to_device(device)
m3.to_device(device)
m4.to_device(device)
m2.copyparams(m1)
m3.copyparams(m1)
m4.copyparams(m1)
l1 = m1(x, y)
m1.cleargrads()
l1.backward()
l2 = m2(x_local, y_local)
m2.cleargrads()
l2.backward()
comm.multi_node_mean_grad(m2)
l3 = m3(x, y)
m3.cleargrads()
l3.backward()
l4 = m4(x_local, y_local)
m4.cleargrads()
l4.backward()
comm.multi_node_mean_grad(m4)
if comm.rank == 0:
for p1, p2, p3, p4 in zip(
sorted(m1.namedparams()),
sorted(m2.namedparams()),
sorted(m3.namedparams()),
sorted(m4.namedparams())):
name = p1[0]
assert (p2[0] == name)
assert (p3[0] == name)
assert (p4[0] == name)
if '/l' in name:
param_dtype = l_dtype
else:
param_dtype = bn_dtype
assert (p1[1].data.dtype == param_dtype)
assert (p2[1].data.dtype == param_dtype)
assert (p3[1].data.dtype == param_dtype)
assert (p4[1].data.dtype == param_dtype)
assert_option = {'atol': 1e-4, 'rtol': 1e-3}
if param_dtype == numpy.float16:
assert_option = {'atol': 1e-2, 'rtol': 1e-2}
chainer.testing.assert_allclose(p1[1].grad, p3[1].grad,
**assert_option)
chainer.testing.assert_allclose(p1[1].grad, p4[1].grad,
**assert_option)
# This is to see that this test is valid.
if comm.size >= 2:
assert_not_allclose(p1[1].grad, p2[1].grad)
def check_link_copyable(comm):
# Regression test for #5854
bn0 = ModelDistributedBN(comm)
bn1 = bn0.copy(mode='copy')
assert bn1 is not None
bn2 = MultiNodeBatchNormalization(10, comm)
bn3 = bn2.copy(mode='copy')
assert bn3 is not None
def assert_not_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True):
x = chainer.cuda.to_cpu(chainer.utils.force_array(x))
y = chainer.cuda.to_cpu(chainer.utils.force_array(y))
with pytest.raises(AssertionError):
chainer.testing.assert_allclose(
x, y, atol=atol, rtol=rtol, verbose=verbose)
def create_communicator(communicator_class, mpi_comm, use_gpu):
if PureNcclCommunicator == communicator_class:
use_nccl = True
else:
use_nccl = False
if use_gpu and not use_nccl and nccl.get_build_version() < 2000:
pytest.skip('This test requires NCCL version >= 2.0')
communicator = communicator_class(mpi_comm)
if use_gpu:
chainer.cuda.get_device_from_id(communicator.intra_rank).use()
return communicator
def test_version_check():
comm = create_communicator(NaiveCommunicator, mpi_comm, use_gpu=False)
if chainer.__version__.startswith('1.'):
with pytest.raises(RuntimeError):
MultiNodeBatchNormalization(3, comm)
else:
# Expecting no exceptions
MultiNodeBatchNormalization(3, comm)
@pytest.mark.parametrize(('communicator_class', 'backend', 'dtype'), [
(NaiveCommunicator, 'mpi', numpy.float16),
(NaiveCommunicator, 'mpi', numpy.float32),
(NaiveCommunicator, 'mpi', chainer.mixed16)])
def test_multi_node_bn_cpu(communicator_class, backend, dtype):
comm = create_communicator(communicator_class, mpi_comm,
use_gpu=False)
check_multi_node_bn(comm, backend=backend, dtype=dtype)
check_link_copyable(comm)
comm.mpi_comm.barrier()
@pytest.mark.parametrize(('communicator_class', 'backend', 'dtype'), [
(NaiveCommunicator, 'mpi', numpy.float32),
(PureNcclCommunicator, 'mpi', numpy.float32),
(PureNcclCommunicator, 'mpi', numpy.float16),
(PureNcclCommunicator, 'mpi', chainer.mixed16),
(PureNcclCommunicator, 'nccl', numpy.float32),
(PureNcclCommunicator, 'nccl', numpy.float16),
(PureNcclCommunicator, 'nccl', chainer.mixed16)])
@chainer.testing.attr.gpu
def test_multi_node_bn_gpu(communicator_class, backend, dtype):
comm = create_communicator(communicator_class, mpi_comm,
use_gpu=True)
check_multi_node_bn(comm, use_gpu=True, backend=backend, dtype=dtype)
check_link_copyable(comm)
chainer.cuda.Stream.null.synchronize()
comm.mpi_comm.barrier()
if hasattr(comm, 'nccl_comm'):
comm.nccl_comm.destroy()
@pytest.mark.parametrize(('communicator_class', 'backend'), [
(NaiveCommunicator, 'mpi'),
(NaiveCommunicator, 'auto')])
def test_support_communication_backend_cpu(communicator_class, backend):
n_units = 1
comm = create_communicator(communicator_class,
mpi_comm, use_gpu=False)
MultiNodeBatchNormalization(n_units, comm,
communication_backend=backend)
@pytest.mark.parametrize(('communicator_class', 'backend'), [
(NaiveCommunicator, 'nccl'),
(NaiveCommunicator, 'dummy')])
def test_unsupport_communication_backend_cpu(communicator_class, backend):
n_units = 1
comm = create_communicator(communicator_class,
mpi_comm, use_gpu=False)
with pytest.raises(ValueError):
MultiNodeBatchNormalization(n_units, comm,
communication_backend=backend)
@pytest.mark.parametrize(('communicator_class', 'backend'), [
(NaiveCommunicator, 'mpi'),
(NaiveCommunicator, 'auto'),
(PureNcclCommunicator, 'mpi'),
(PureNcclCommunicator, 'nccl'),
(PureNcclCommunicator, 'auto')])
@chainer.testing.attr.gpu
def test_support_communication_backend_gpu(communicator_class, backend):
n_units = 1
comm = create_communicator(communicator_class,
mpi_comm, use_gpu=True)
MultiNodeBatchNormalization(n_units, comm,
communication_backend=backend)
@pytest.mark.parametrize(('communicator_class', 'backend'), [
(NaiveCommunicator, 'nccl'),
(NaiveCommunicator, 'dummy'),
(PureNcclCommunicator, 'dummy')])
@chainer.testing.attr.gpu
def test_unsupport_communication_backend_gpu(communicator_class, backend):
n_units = 1
comm = create_communicator(communicator_class,
mpi_comm, use_gpu=True)
with pytest.raises(ValueError):
MultiNodeBatchNormalization(n_units, comm,
communication_backend=backend)
| 11,318
| 34.933333
| 79
|
py
|
chainer
|
chainer-master/tests/chainermn_tests/links_tests/test_multi_node_chain_list.py
|
import chainer
import chainer.backends
from chainer.backends.cuda import cupy
import chainer.links as L
import chainer.testing
import chainermn
import numpy as np
import pytest
class Param(object):
def __init__(self, param):
self.dtype = None
self.__dict__.update(param)
params = [Param(p) for p in [
{
'dtype': np.float16,
}, {
'dtype': np.float32,
}, {
'dtype': chainer.mixed16
}]]
class Cycle0SubA(chainer.Chain):
def __init__(self, size):
super(Cycle0SubA, self).__init__()
with self.init_scope():
self.f = L.Linear(size, size)
def __call__(self, x):
return self.f(x)
class Cycle0SubB(chainer.Chain):
def __init__(self, size):
super(Cycle0SubB, self).__init__(
f=L.Linear(size, 2))
def __call__(self, h):
return self.f(h)
class Cycle0(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_prev, rank_next):
super(Cycle0, self).__init__(comm=comm)
self.add_link(Cycle0SubA(size), rank_in=None, rank_out=rank_next)
self.add_link(Cycle0SubB(size), rank_in=rank_prev, rank_out=None)
class Cycle1Sub(chainer.Chain):
def __init__(self, size):
super(Cycle1Sub, self).__init__(
f=L.Linear(size, size))
def __call__(self, h):
return self.f(h)
class Cycle1(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_prev, rank_next):
super(Cycle1, self).__init__(comm=comm)
self.add_link(Cycle1Sub(size), rank_in=rank_prev, rank_out=rank_next)
class Cross0(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_prev, rank_next):
super(Cross0, self).__init__(comm=comm)
self.add_link(Cycle0SubA(size), rank_in=None, rank_out=rank_next)
self.add_link(Cycle0SubB(size), rank_in=rank_prev, rank_out=None)
class Cross1(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_prev, rank_next):
super(Cross1, self).__init__(comm=comm)
self.add_link(Cycle0SubB(size), rank_in=rank_prev, rank_out=None)
self.add_link(Cycle0SubA(size), rank_in=None, rank_out=rank_next)
class BranchSubA(chainer.Chain):
def __init__(self, size):
super(BranchSubA, self).__init__(
f=L.Linear(size, size))
def __call__(self, x):
return self.f(x)
class BranchSubB(chainer.Chain):
def __init__(self, size):
super(BranchSubB, self).__init__(
f=L.Linear(size, size))
def __call__(self, *xs):
x = xs[0]
for _x in xs[1:]:
x = x + _x
return self.f(x)
class BranchParent1(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_children):
super(BranchParent1, self).__init__(comm=comm)
self.add_link(BranchSubA(size), rank_in=None, rank_out=rank_children)
self.add_link(BranchSubB(size), rank_in=rank_children, rank_out=None)
class BranchParent2(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_children):
super(BranchParent2, self).__init__(comm=comm)
ranks = [comm.rank] + rank_children
self.add_link(BranchSubA(size), rank_in=None, rank_out=ranks)
self.add_link(BranchSubA(size), rank_in=comm.rank, rank_out=comm.rank)
self.add_link(BranchSubB(size), rank_in=ranks, rank_out=None)
class BranchParent3(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_children):
super(BranchParent3, self).__init__(comm=comm)
ranks = rank_children + [comm.rank]
self.add_link(BranchSubA(size), rank_in=None, rank_out=ranks)
self.add_link(BranchSubA(size), rank_in=comm.rank, rank_out=comm.rank)
self.add_link(BranchSubB(size), rank_in=ranks, rank_out=None)
class BranchParent4(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_children):
super(BranchParent4, self).__init__(comm=comm)
ranks = rank_children + [comm.rank]
ranks = ranks[1:] + ranks[0:1]
self.add_link(BranchSubA(size), rank_in=None, rank_out=ranks)
self.add_link(BranchSubA(size), rank_in=comm.rank, rank_out=comm.rank)
self.add_link(BranchSubB(size), rank_in=ranks, rank_out=None)
class BranchChild(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_parent):
super(BranchChild, self).__init__(comm=comm)
self.add_link(
BranchSubA(size),
rank_in=rank_parent,
rank_out=rank_parent)
class TwistFirst(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_next):
super(TwistFirst, self).__init__(comm=comm)
self.add_link(BranchSubA(size), rank_in=None, rank_out=rank_next)
self.add_link(BranchSubA(size), rank_in=rank_next, rank_out=None)
class Twist(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_prev, rank_next):
super(Twist, self).__init__(comm=comm)
self.add_link(BranchSubA(size), rank_in=rank_prev, rank_out=comm.rank)
self.add_link(BranchSubA(size), rank_in=None, rank_out=rank_prev)
self.add_link(BranchSubA(size), rank_in=None, rank_out=rank_next)
self.add_link(BranchSubA(size), rank_in=rank_next, rank_out=comm.rank)
self.add_link(
BranchSubB(size),
rank_in=[comm.rank, comm.rank],
rank_out=None)
class TwistLast(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_prev):
super(TwistLast, self).__init__(comm=comm)
self.add_link(BranchSubA(size), rank_in=rank_prev, rank_out=None)
self.add_link(BranchSubA(size), rank_in=None, rank_out=rank_prev)
class TupleDataSubA(chainer.Chain):
def __init__(self, size):
super(TupleDataSubA, self).__init__(
f0=L.Linear(size, size),
f1=L.Linear(size, size))
def __call__(self, x):
y0 = self.f0(x)
y1 = self.f1(x)
return y0, y1
class TupleDataSubB(chainer.Chain):
def __init__(self, size):
super(TupleDataSubB, self).__init__(
f0=L.Linear(size, size),
f1=L.Linear(size, size))
def __call__(self, x):
# TupleDataSubB receives two elemental tuple from TupleDataSubA.
x0, x1 = x
y0 = self.f0(x0)
y1 = self.f1(x1)
return y0 + y1
class TupleDataSubC(chainer.Chain):
def __init__(self, size):
super(TupleDataSubC, self).__init__(
f=L.Linear(size, size))
def __call__(self, x):
return self.f(x)
class TupleDataParent(chainermn.MultiNodeChainList):
def __init__(self, comm, size, rank_child):
super(TupleDataParent, self).__init__(comm=comm)
self.add_link(TupleDataSubA(size), rank_in=None, rank_out=rank_child)
self.add_link(TupleDataSubC(size), rank_in=rank_child, rank_out=None)
class TupleDataChild(chainermn.MultiNodeChainList):
def __init__(self, comm, size, rank_parent):
super(TupleDataChild, self).__init__(comm=comm)
self.add_link(
TupleDataSubB(size), rank_in=rank_parent, rank_out=rank_parent)
def create_communicator(gpu):
if gpu:
communicator = chainermn.create_communicator('flat')
chainer.backends.cuda.get_device_from_id(communicator.intra_rank).use()
else:
communicator = chainermn.create_communicator('naive')
if communicator.size < 2:
pytest.skip('This test is for multinode only')
rank_next = (communicator.rank + 1) % communicator.size
rank_prev = (communicator.rank - 1) % communicator.size
return communicator, rank_next, rank_prev
def check_cycle_model(gpu, param):
communicator, rank_next, rank_prev = create_communicator(gpu)
n, d = 100, 10
with chainer.using_config('dtype', param.dtype):
if communicator.rank == 0:
X = np.random.randn(n, d).astype(param.dtype)
Y = (np.random.rand(n) * 2).astype(np.int32)
model = L.Classifier(
Cycle0(d, communicator, rank_next, rank_prev))
if gpu:
model.to_device(cupy.cuda.Device())
X = chainer.backends.cuda.to_gpu(X)
Y = chainer.backends.cuda.to_gpu(Y)
for i in range(n):
err = model(X[i:i + 1], Y[i:i + 1])
err.backward()
else:
model = Cycle1(
d, communicator, rank_next, rank_prev)
if gpu:
model.to_device(cupy.cuda.Device())
for i in range(n):
err = model()
err.backward()
@pytest.mark.parametrize('param', params)
def test_cycle_model_cpu(param):
check_cycle_model(False, param)
@pytest.mark.parametrize('param', params)
@chainer.testing.attr.gpu
def test_cycle_model_gpu(param):
check_cycle_model(True, param)
def check_crossing_model(gpu, param):
communicator, rank_next, rank_prev = create_communicator(gpu)
n, d = 100, 10
X = np.random.randn(n, d).astype(param.dtype)
Y = (np.random.rand(n) * 2).astype(np.int32)
with chainer.using_config('dtype', param.dtype):
if communicator.rank == 0:
model = L.Classifier(Cross0(
d, communicator, rank_next, rank_prev))
else:
model = L.Classifier(Cross1(
d, communicator, rank_next, rank_prev))
if gpu:
model.to_device(cupy.cuda.Device())
X = chainer.backends.cuda.to_gpu(X)
Y = chainer.backends.cuda.to_gpu(Y)
for i in range(n):
err = model(X[i:i + 1], Y[i:i + 1])
err.backward()
@pytest.mark.parametrize('param', params)
def test_crossing_model_cpu(param):
check_crossing_model(False, param)
@pytest.mark.parametrize('param', params)
@chainer.testing.attr.gpu
def test_crossing_model_gpu(param):
check_crossing_model(True, param)
def check_branching_model(gpu, communicator, rank_next, rank_prev,
parent_model, param):
n, d = 100, 10
X = np.random.randn(n, d).astype(param.dtype)
Y = (np.random.rand(n) * 2).astype(np.int32)
with chainer.using_config('dtype', param.dtype):
if communicator.rank == 0:
rank_children = [rank for rank in range(1, communicator.size)]
model = L.Classifier(parent_model(
d, communicator, rank_children))
if gpu:
model.to_device(cupy.cuda.Device())
X = chainer.backends.cuda.to_gpu(X)
Y = chainer.backends.cuda.to_gpu(Y)
for i in range(n):
err = model(X[i:i + 1], Y[i:i + 1])
err.backward()
else:
model = BranchChild(d, communicator, 0)
if gpu:
model.to_device(cupy.cuda.Device())
for i in range(n):
err = model()
err.backward()
def check_branching_models(gpu, param):
communicator, rank_next, rank_prev = create_communicator(gpu)
check_branching_model(gpu, communicator, rank_next, rank_prev,
BranchParent1, param)
check_branching_model(gpu, communicator, rank_next, rank_prev,
BranchParent2, param)
check_branching_model(gpu, communicator, rank_next, rank_prev,
BranchParent3, param)
check_branching_model(gpu, communicator, rank_next, rank_prev,
BranchParent4, param)
@pytest.mark.parametrize('param', params)
def test_branching_models_cpu(param):
check_branching_models(False, param)
@pytest.mark.parametrize('param', params)
@chainer.testing.attr.gpu
def test_branching_models_gpu(param):
check_branching_models(True, param)
def check_twisting_model(gpu, param):
communicator, rank_next, rank_prev = create_communicator(gpu)
n, d = 100, 10
X = np.random.randn(n, d).astype(param.dtype)
Y = (np.random.rand(n) * 2).astype(np.int32)
with chainer.using_config('dtype', param.dtype):
if communicator.rank == 0:
model = L.Classifier(
TwistFirst(d, communicator, rank_next))
elif communicator.rank == communicator.size - 1:
model = L.Classifier(
TwistLast(d, communicator, rank_prev))
else:
model = L.Classifier(Twist(
d, communicator, rank_prev, rank_next))
if gpu:
model.to_device(cupy.cuda.Device())
X = chainer.backends.cuda.to_gpu(X)
Y = chainer.backends.cuda.to_gpu(Y)
for i in range(n):
err = model(X[i:i + 1], Y[i:i + 1])
err.backward()
@pytest.mark.parametrize('param', params)
def test_twisting_model_cpu(param):
check_twisting_model(False, param)
@pytest.mark.parametrize('param', params)
@chainer.testing.attr.gpu
def test_twisting_model_gpu(param):
check_twisting_model(True, param)
def check_tuple_data_model(gpu, param):
# This test only uses pairs (0, 1), (2, 3), ... (2m, 2m+1)
communicator, rank_next, rank_prev = create_communicator(gpu)
n, d = 100, 10
X = np.random.randn(n, d).astype(param.dtype)
Y = (np.random.rand(n) * 2).astype(np.int32)
with chainer.using_config('dtype', param.dtype):
if communicator.rank % 2 == 0:
if communicator.rank == communicator.size - 1:
# in case 2m is the right end with odd number of nodes
return
model = L.Classifier(
TupleDataParent(communicator, d, rank_next))
elif communicator.rank % 2 == 1:
model = TupleDataChild(communicator, d, rank_prev)
assert model is not None
if gpu:
model.to_device(cupy.cuda.Device())
X = chainer.backends.cuda.to_gpu(X)
Y = chainer.backends.cuda.to_gpu(Y)
for i in range(n):
if communicator.rank % 2 == 0:
err = model(X[i:i + 1], Y[i:i + 1])
elif communicator.rank % 2 == 1:
err = model()
assert err is not None
err.backward()
@pytest.mark.parametrize('param', params)
def test_tuple_data_model_cpu(param):
check_tuple_data_model(False, param)
@pytest.mark.parametrize('param', params)
@chainer.testing.attr.gpu
def test_tuple_data_model_gpu(param):
check_tuple_data_model(True, param)
| 14,461
| 30.995575
| 79
|
py
|
chainer
|
chainer-master/docs/source/_docstring_check.py
|
import math
def check(app, what, name, obj, options, lines):
ctx = DocstringCheckContext(app, what, name, obj, options, lines)
if what in ('function', 'method'):
_docstring_check_returns_indent(ctx)
class DocstringCheckContext(object):
def __init__(self, app, what, name, obj, options, lines):
self.app = app
self.what = what
self.name = name
self.obj = obj
self.options = options
self.lines = lines
self.iline = 0
def nextline(self):
if self.iline >= len(self.lines):
raise StopIteration
line = self.lines[self.iline]
self.iline += 1
return line
def error(self, msg, include_line=True, include_source=True):
lines = self.lines
iline = self.iline - 1
msg = ('{}\n\n'
'on {}'.format(msg, self.name))
if include_line and 0 <= iline < len(lines):
line = lines[iline]
msg += '\n' + 'at line {}: "{}"\n'.format(iline, line)
if include_source:
msg += '\n'
msg += 'docstring:\n'
digits = int(math.floor(math.log10(len(lines)))) + 1
linum_fmt = '{{:0{}d}} '.format(digits)
for i, line in enumerate(lines):
msg += linum_fmt.format(i) + line + '\n'
raise InvalidDocstringError(msg, self, iline)
class InvalidDocstringError(Exception):
def __init__(self, msg, ctx, iline):
super(InvalidDocstringError, self).__init__(self, msg)
self.msg = msg
self.ctx = ctx
self.iline = iline
def __str__(self):
return self.msg
def _docstring_check_returns_indent(ctx):
# Seek the :returns: header
try:
line = ctx.nextline()
while line != ':returns:':
line = ctx.nextline()
except StopIteration:
return # No `Returns` section
# Skip empty lines and seek the first line of the content
try:
line = ctx.nextline()
while not line:
line = ctx.nextline()
except StopIteration:
ctx.error('`Returns` section has no content')
# Find the indentation of the first line
# (note: line should have at least one non-space character)
nindent = next(i for i, c in enumerate(line) if c != ' ')
# Check the indentation of the following lines
try:
line = ctx.nextline()
while line.startswith(' '):
if (not line.startswith(' ' * nindent) or
line[nindent:].startswith(' ')):
ctx.error('Invalid indentation of `Returns` section')
line = ctx.nextline()
except StopIteration:
pass
| 2,691
| 28.582418
| 69
|
py
|
chainer
|
chainer-master/docs/source/_autosummary_check.py
|
import inspect
import os
import types
import sphinx
import chainer.functions
import chainer.links
logger = sphinx.util.logging.getLogger(__name__)
doc_source_dir = os.path.dirname(__file__)
def _is_rst_exists(entity):
return os.path.exists(os.path.join(
doc_source_dir, 'reference', 'generated', '{}.rst'.format(entity)))
def check(app, exception):
missing_entities = []
missing_entities += [
name for name in _list_chainer_functions()
if not _is_rst_exists(name)]
missing_entities += [
name for name in _list_chainer_links()
if not _is_rst_exists(name)]
if missing_entities:
logger.warning('\n'.join([
'Undocumented entities found.',
'',
] + missing_entities))
def _list_chainer_functions():
# List exported functions under chainer.functions.
return ['chainer.functions.{}'.format(name)
for (name, func) in chainer.functions.__dict__.items()
if isinstance(func, types.FunctionType)]
def _list_chainer_links():
# List exported classes under chainer.links.
return ['chainer.links.{}'.format(name)
for (name, link) in chainer.links.__dict__.items()
if inspect.isclass(link)]
| 1,252
| 24.06
| 75
|
py
|
chainer
|
chainer-master/docs/source/_napoleon_patch.py
|
from typing import List
import sphinx
def qualify_name(attr_name, klass):
if klass and '.' not in attr_name:
if attr_name.startswith('~'):
attr_name = attr_name[1:]
try:
q = klass.__qualname__
except AttributeError:
q = klass.__name__
return '~%s.%s' % (q, attr_name)
return attr_name
def setup(app):
app.setup_extension('sphinx.ext.napoleon')
def _parse_attributes_section(self, section: str) -> List[str]:
lines = []
for _name, _type, _desc in self._consume_fields():
if self._config.napoleon_use_ivar:
_name = qualify_name(_name, self._obj) # Added this line
field = ':ivar %s: ' % _name # type: unicode
lines.extend(self._format_block(field, _desc))
if _type:
lines.append(':vartype %s: %s' % (_name, _type))
else:
lines.extend(['.. attribute:: ' + _name, ''])
fields = self._format_field('', '', _desc)
lines.extend(self._indent(fields, 3))
if _type:
lines.append('')
lines.extend(self._indent([':type: %s' % _type], 3))
lines.append('')
if self._config.napoleon_use_ivar:
lines.append('')
return lines
sphinx.ext.napoleon.GoogleDocstring._parse_attributes_section = \
_parse_attributes_section
| 1,477
| 32.590909
| 73
|
py
|
chainer
|
chainer-master/docs/source/conf.py
|
# -*- coding: utf-8 -*-
#
# Chainer documentation build configuration file, created by
# sphinx-quickstart on Sun May 10 12:22:10 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import inspect
import os
import pkg_resources
import sys
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
import _docstring_check
import _autosummary_check
__version__ = pkg_resources.get_distribution('chainer').version
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
rtd_version = os.environ.get('READTHEDOCS_VERSION')
if rtd_version == 'latest':
tag = 'master'
else:
tag = 'v{}'.format(__version__)
extlinks = {
'blob': ('https://github.com/chainer/chainer/blob/{}/%s'.format(tag), ''),
'tree': ('https://github.com/chainer/chainer/tree/{}/%s'.format(tag), ''),
}
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.linkcode',
'_napoleon_patch',
]
try:
import sphinxcontrib.spelling # noqa
extensions.append('sphinxcontrib.spelling')
except ImportError:
pass
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Chainer'
copyright = u'2015, Preferred Networks, inc. and Preferred Infrastructure, inc.'
author = u'Preferred Networks, inc. and Preferred Infrastructure, inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Napoleon settings
napoleon_use_ivar = True
napoleon_include_special_with_doc = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not on_rtd:
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_style = 'css/modified_theme.css'
if on_rtd:
html_context = {
'css_files': [
'https://media.readthedocs.org/css/sphinx_rtd_theme.css',
'https://media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/css/modified_theme.css',
],
}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Chainerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Chainer.tex', u'Chainer Documentation',
u'Preferred Networks, inc. and Preferred Infrastructure, inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'chainer', u'Chainer Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Chainer', u'Chainer Documentation',
author, 'Chainer', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autosummary_generate = True
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'cupy': ('https://docs-cupy.chainer.org/en/latest/', None),
'chainercv': ('https://chainercv.readthedocs.io/en/latest/', None),
}
doctest_global_setup = '''
import os
import numpy as np
import chainer
from chainer.backends import cuda
from chainer.backends.cuda import cupy
from chainer import Function, gradient_check, training, utils, Variable
from chainer import datasets, iterators, optimizers, serializers
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from chainer.testing import doctest_helper
from chainer.training import extensions
import chainerx
import onnx_chainer
np.random.seed(0)
'''
spelling_lang = 'en_US'
spelling_word_list_filename = 'spelling_wordlist.txt'
def setup(app):
app.connect('autodoc-process-docstring', _autodoc_process_docstring)
app.connect('build-finished', _build_finished)
def _autodoc_process_docstring(app, what, name, obj, options, lines):
_docstring_check.check(app, what, name, obj, options, lines)
def _build_finished(app, exception):
if exception is None:
_autosummary_check.check(app, exception)
def _import_object_from_name(module_name, fullname):
obj = sys.modules.get(module_name)
if obj is None:
return None
for comp in fullname.split('.'):
obj = getattr(obj, comp)
return obj
def _is_egg_directory(path):
return (path.endswith('.egg') and
os.path.isdir(os.path.join(path, 'EGG-INFO')))
def _is_git_root(path):
return os.path.isdir(os.path.join(path, '.git'))
_source_root = None
def _find_source_root(source_abs_path):
# Note that READTHEDOCS* environment variable cannot be used, because they
# are not set under docker environment.
global _source_root
if _source_root is None:
dir = os.path.dirname(source_abs_path)
while True:
if _is_egg_directory(dir) or _is_git_root(dir):
# Reached the root directory
_source_root = dir
break
dir_ = os.path.dirname(dir)
if len(dir_) == len(dir):
raise RuntimeError('Couldn\'t parse root directory from '
'source file: {}'.format(source_abs_path))
dir = dir_
return _source_root
def _get_source_relative_path(source_abs_path):
return os.path.relpath(source_abs_path, _find_source_root(source_abs_path))
def _get_sourcefile_and_linenumber(obj):
# Retrieve the original function wrapped by contextlib.contextmanager
if callable(obj):
closure = getattr(obj, '__closure__', None)
if closure is not None:
obj = closure[0].cell_contents
# Get the source file name and line number at which obj is defined.
try:
filename = inspect.getsourcefile(obj)
except TypeError:
# obj is not a module, class, function, ..etc.
return None, None
# inspect can return None for cython objects
if filename is None:
return None, None
# Get the source line number
_, linenum = inspect.getsourcelines(obj)
return filename, linenum
def linkcode_resolve(domain, info):
if domain != 'py' or not info['module']:
return None
if 1 == int(os.environ.get('CHAINER_DOCS_SKIP_LINKCODE', 0)):
return None
# Import the object from module path
obj = _import_object_from_name(info['module'], info['fullname'])
# If it's not defined in the internal module, return None.
mod = inspect.getmodule(obj)
if mod is None:
return None
if not (mod.__name__ == 'chainer' or mod.__name__.startswith('chainer.')):
return None
# Retrieve source file name and line number
filename, linenum = _get_sourcefile_and_linenumber(obj)
if filename is None or linenum is None:
return None
filename = os.path.realpath(filename)
relpath = _get_source_relative_path(filename)
return 'https://github.com/chainer/chainer/blob/{}/{}#L{}'.format(
tag, relpath, linenum)
| 14,917
| 30.209205
| 80
|
py
|
chainer
|
chainer-master/chainerx_cc/examples/mnist_py/train_mnist.py
|
#!/usr/bin/env python3
import argparse
import gzip
import pathlib
import time
import numpy as np
import chainerx as chx
class MLP(object):
def __init__(self):
self.W1, self.b1 = new_linear_params(784, 1000)
self.W2, self.b2 = new_linear_params(1000, 1000)
self.W3, self.b3 = new_linear_params(1000, 10)
@property
def params(self):
return self.W1, self.b1, self.W2, self.b2, self.W3, self.b3
def forward(self, x):
h = chx.relu(chx.linear(x, self.W1, self.b1))
h = chx.relu(chx.linear(h, self.W2, self.b2))
return chx.linear(h, self.W3, self.b3)
def update(self, lr):
for param in self.params:
# TODO(beam2d): make it easier
p = param.as_grad_stopped()
# TODO(beam2d): make grad not have graph by default
p -= lr * param.grad.as_grad_stopped()
param.cleargrad()
def require_grad(self):
for param in self.params:
param.require_grad()
def new_linear_params(n_in, n_out):
W = np.random.randn(n_out, n_in).astype(
np.float32) # TODO(beam2d): not supported in chx
W /= np.sqrt(n_in) # TODO(beam2d): not supported in chx
W = chx.array(W)
b = chx.zeros(n_out, dtype=chx.float32)
return W, b
def evaluate(model, X_test, Y_test, eval_size, batch_size):
N_test = X_test.shape[0] if eval_size is None else eval_size
if N_test > X_test.shape[0]:
raise ValueError(
'Test size can be no larger than {}'.format(X_test.shape[0]))
with chx.no_backprop_mode():
total_loss = chx.array(0, dtype=chx.float32)
num_correct = chx.array(0, dtype=chx.int64)
for i in range(0, N_test, batch_size):
x = X_test[i:min(i + batch_size, N_test)]
t = Y_test[i:min(i + batch_size, N_test)]
y = model.forward(x)
total_loss += chx.softmax_cross_entropy(y, t).sum()
num_correct += (y.argmax(axis=1).astype(t.dtype)
== t).astype(chx.int32).sum()
mean_loss = float(total_loss) / N_test
accuracy = int(num_correct) / N_test
return mean_loss, accuracy
def main():
parser = argparse.ArgumentParser('Train a neural network on MNIST dataset')
parser.add_argument(
'--batchsize', '-B', type=int, default=100, help='Batch size')
parser.add_argument(
'--epoch', '-E', type=int, default=20,
help='Number of epochs to train')
parser.add_argument(
'--iteration', '-I', type=int, default=None,
help='Number of iterations to train. Epoch is ignored if specified.')
parser.add_argument(
'--data', '-p', default='mnist',
help='Path to the directory that contains MNIST dataset')
parser.add_argument(
'--device', '-d', default='native', help='Device to use')
parser.add_argument(
'--eval-size', default=None, type=int,
help='Number of samples to use from the test set for evaluation. '
'None to use all.')
args = parser.parse_args()
chx.set_default_device(args.device)
# Prepare dataset
X, Y = get_mnist(args.data, 'train')
X_test, Y_test = get_mnist(args.data, 't10k')
# Prepare model
model = MLP()
# Training
N = X.shape[0] # TODO(beam2d): implement len
# TODO(beam2d): support int32 indexing
all_indices_np = np.arange(N, dtype=np.int64)
batch_size = args.batchsize
eval_size = args.eval_size
# Train
model.require_grad()
it = 0
epoch = 0
is_finished = False
start = time.time()
while not is_finished:
# TODO(beam2d): not suupported in chx
np.random.shuffle(all_indices_np)
all_indices = chx.array(all_indices_np)
for i in range(0, N, batch_size):
indices = all_indices[i:i + batch_size]
x = X.take(indices, axis=0)
t = Y.take(indices, axis=0)
y = model.forward(x)
loss = chx.softmax_cross_entropy(y, t).mean()
loss.backward()
model.update(lr=0.01)
it += 1
if args.iteration is not None:
mean_loss, accuracy = evaluate(
model, X_test, Y_test, eval_size, batch_size)
elapsed_time = time.time() - start
print(
'iteration {}... loss={},\taccuracy={},\telapsed_time={}'
.format(it, mean_loss, accuracy, elapsed_time))
if it >= args.iteration:
is_finished = True
break
epoch += 1
if args.iteration is None: # stop based on epoch, instead of iteration
mean_loss, accuracy = evaluate(
model, X_test, Y_test, eval_size, batch_size)
elapsed_time = time.time() - start
print(
'epoch {}... loss={},\taccuracy={},\telapsed_time={}'
.format(epoch, mean_loss, accuracy, elapsed_time))
if epoch >= args.epoch:
is_finished = True
def get_mnist(path, name):
path = pathlib.Path(path)
x_path = str(path / '{}-images-idx3-ubyte.gz'.format(name))
y_path = str(path / '{}-labels-idx1-ubyte.gz'.format(name))
with gzip.open(x_path, 'rb') as fx:
fx.read(16) # skip header
# read/frombuffer is used instead of fromfile because fromfile does not
# handle gzip file correctly
x = np.frombuffer(fx.read(), dtype=np.uint8).reshape(-1, 784)
with gzip.open(y_path, 'rb') as fy:
fy.read(8) # skip header
y = np.frombuffer(fy.read(), dtype=np.uint8)
assert x.shape[0] == y.shape[0]
x = x.astype(np.float32)
x /= 255
y = y.astype(np.int32)
return chx.array(x), chx.array(y)
if __name__ == '__main__':
main()
| 5,839
| 30.567568
| 79
|
py
|
chainer
|
chainer-master/chainerx_cc/examples/imagenet_py/train_imagenet.py
|
#!/usr/bin/env python
"""Example code of learning a large scale convnet from ILSVRC2012 dataset.
Prerequisite: To run this example, crop the center of ILSVRC2012 training and
validation images, scale them to 256x256 and convert them to RGB, and make
two lists of space-separated CSV whose first column is full path to image and
second column is zero-origin label (this format is same as that used by Caffe's
ImageDataLayer).
"""
import argparse
import time
import chainer.iterators
import numpy as np
import chainerx as chx
from image_dataset import PreprocessedDataset
import resnet50
def get_imagenet(dataset_iter):
x, t = zip(*next(dataset_iter))
return chx.array(x), chx.array(t)
def evaluate(model, X_test, Y_test, eval_size, batch_size):
N_test = X_test.shape[0] if eval_size is None else eval_size
if N_test > X_test.shape[0]:
raise ValueError(
'Test size can be no larger than {}'.format(X_test.shape[0]))
with chx.no_backprop_mode():
total_loss = chx.array(0, dtype=chx.float32)
num_correct = chx.array(0, dtype=chx.int64)
for i in range(0, N_test, batch_size):
x = X_test[i:min(i + batch_size, N_test)]
t = Y_test[i:min(i + batch_size, N_test)]
y = model(x)
total_loss += chx.softmax_cross_entropy(y, t).sum()
num_correct += (y.argmax(axis=1).astype(t.dtype)
== t).astype(chx.int32).sum()
mean_loss = float(total_loss) / N_test
accuracy = int(num_correct) / N_test
return mean_loss, accuracy
def main():
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('train', help='Path to training image-label list file')
parser.add_argument('val', help='Path to validation image-label list file')
parser.add_argument(
'--batchsize', '-B', type=int, default=32,
help='Learning minibatch size')
parser.add_argument(
'--epoch', '-E', type=int, default=10,
help='Number of epochs to train')
parser.add_argument(
'--iteration', '-I', type=int, default=None,
help='Number of iterations to train. Epoch is ignored if specified.')
parser.add_argument(
'--loaderjob', '-j', type=int,
help='Number of parallel data loading processes')
parser.add_argument(
'--mean', '-m', default='mean.npy',
help='Mean file (computed by compute_mean.py)')
parser.add_argument(
'--root', '-R', default='.',
help='Root directory path of image files')
parser.add_argument(
'--val_batchsize', '-b', type=int, default=250,
help='Validation minibatch size')
parser.set_defaults(test=False)
parser.add_argument(
'--device', '-d', default='native', help='Device to use')
args = parser.parse_args()
chx.set_default_device(args.device)
batch_size = args.batchsize
eval_size = args.val_batchsize
# Prepare model
model = resnet50.ResNet50()
# Prepare datasets and mean file
mean = np.load(args.mean)
train = PreprocessedDataset(args.train, args.root, mean, model.insize)
test = PreprocessedDataset(args.val, args.root, mean, model.insize, False)
train_iter = chainer.iterators.MultiprocessIterator(
train, batch_size, n_processes=args.loaderjob)
test_iter = chainer.iterators.MultiprocessIterator(
test, eval_size, n_processes=args.loaderjob)
N = len(train)
# Train
model.require_grad()
it = 0
epoch = 0
is_finished = False
start = time.time()
while not is_finished:
for i in range(0, N // batch_size):
x, t = get_imagenet(train_iter)
y = model(x)
loss = chx.softmax_cross_entropy(y, t).mean()
loss.backward()
model.update(lr=0.01)
it += 1
if args.iteration is not None:
x_test, t_test = get_imagenet(test_iter)
mean_loss, accuracy = evaluate(
model, x_test, t_test, eval_size, batch_size)
elapsed_time = time.time() - start
print(
'iteration {}... loss={},\taccuracy={},\telapsed_time={}'
.format(it, mean_loss, accuracy, elapsed_time))
if it >= args.iteration:
is_finished = True
break
epoch += 1
if args.iteration is None:
x_test, t_test = get_imagenet(test_iter)
mean_loss, accuracy = evaluate(
model, x_test, t_test, eval_size, batch_size)
elapsed_time = time.time() - start
print(
'epoch {}... loss={},\taccuracy={},\telapsed_time={}'
.format(epoch, mean_loss, accuracy, elapsed_time))
if epoch >= args.epoch:
is_finished = True
if __name__ == '__main__':
main()
| 4,972
| 32.375839
| 79
|
py
|
chainer
|
chainer-master/chainerx_cc/examples/imagenet_py/image_dataset.py
|
import random
import chainer
import numpy as np
class PreprocessedDataset(chainer.dataset.DatasetMixin):
def __init__(self, path, root, mean, crop_size, random=True):
self.base = chainer.datasets.LabeledImageDataset(path, root)
self.mean = mean.astype(np.float32)
self.crop_size = crop_size
self.random = random
def __len__(self):
return len(self.base)
def get_example(self, i):
# It reads the i-th image/label pair and return a preprocessed image.
# It applies following preprocesses:
# - Cropping (random or center rectangular)
# - Random flip
# - Scaling to [0, 1] value
crop_size = self.crop_size
image, label = self.base[i]
_, h, w = image.shape
if self.random:
# Randomly crop a region and flip the image
top = random.randint(0, h - crop_size - 1)
left = random.randint(0, w - crop_size - 1)
if random.randint(0, 1):
image = image[:, :, ::-1]
else:
# Crop the center
top = (h - crop_size) // 2
left = (w - crop_size) // 2
bottom = top + crop_size
right = left + crop_size
image = image[:, top:bottom, left:right]
image -= self.mean[:, top:bottom, left:right]
image *= (1.0 / 255.0) # Scale to [0, 1]
return image, label
| 1,428
| 30.065217
| 77
|
py
|
chainer
|
chainer-master/chainerx_cc/examples/imagenet_py/resnet50.py
|
#!/usr/bin/env python3
import numpy as np
import chainerx as chx
class Convolution2D(object):
def __init__(self, in_channels, out_channels, ksize, stride, pad,
initialW=None, nobias=False, groups=1):
W_shape = out_channels, int(in_channels / groups), ksize, ksize
self.W = chx.array(np.random.normal(size=W_shape).astype(np.float32))
if nobias:
self.b = None
else:
self.b = chx.array(np.random.normal(
size=out_channels).astype(np.float32))
self.stride = stride
self.pad = pad
@property
def params(self):
return self.W, self.b
def __call__(self, x):
if self.b is not None:
return chx.conv(
x, self.W, self.b, stride=self.stride, pad=self.pad)
else:
return chx.conv(x, self.W, stride=self.stride, pad=self.pad)
def require_grad(self):
for param in self.params:
if param is not None:
param.require_grad()
def update(self, lr):
for param in self.params:
if param is not None:
p = param.as_grad_stopped()
p -= lr * param.grad.as_grad_stopped()
param.cleargrad()
class BatchNormalization(object):
def __init__(self, size, dtype=chx.float32):
shape = size,
self.avg_mean = chx.zeros(shape, dtype)
self.avg_var = chx.zeros(shape, dtype)
self.gamma = chx.ones(shape, dtype)
self.beta = chx.zeros(shape, dtype)
def __call__(self, x):
return chx.batch_norm(x, self.gamma, self.beta,
running_mean=self.avg_mean,
running_var=self.avg_var,
axis=(0, 2, 3))
@property
def params(self):
return self.gamma, self.beta
def require_grad(self):
for param in self.params:
param.require_grad()
def update(self, lr):
for param in self.params:
p = param.as_grad_stopped()
p -= lr * param.grad.as_grad_stopped()
param.cleargrad()
class Linear(object):
def __init__(self, n_in, n_out):
W = np.random.randn(n_in, n_out).astype(np.float32)
W /= np.sqrt(n_in)
self.W = chx.array(W)
self.b = chx.zeros((n_out,), dtype=chx.float32)
def __call__(self, x):
x = x.reshape(x.shape[:2])
return x.dot(self.W) + self.b
@property
def params(self):
return self.W, self.b
def require_grad(self):
for param in self.params:
param.require_grad()
def update(self, lr):
for param in self.params:
p = param.as_grad_stopped()
p -= lr * param.grad.as_grad_stopped()
param.cleargrad()
class BottleNeckA(object):
def __init__(self, in_size, ch, out_size, stride=2, groups=1):
initialW = None
self.conv1 = Convolution2D(
in_size, ch, 1, stride, 0, initialW=initialW, nobias=True)
self.bn1 = BatchNormalization(ch)
self.conv2 = Convolution2D(
ch, ch, 3, 1, 1, initialW=initialW, nobias=True, groups=groups)
self.bn2 = BatchNormalization(ch)
self.conv3 = Convolution2D(
ch, out_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = BatchNormalization(out_size)
self.conv4 = Convolution2D(
in_size, out_size, 1, stride, 0, initialW=initialW, nobias=True)
self.bn4 = BatchNormalization(out_size)
def __call__(self, x):
h1 = chx.maximum(0, self.bn1(self.conv1(x)))
h1 = chx.maximum(0, self.bn2(self.conv2(h1)))
h1 = self.bn3(self.conv3(h1))
h2 = self.bn4(self.conv4(x))
return chx.maximum(0, h1 + h2)
@property
def params(self):
return (self.conv1, self.bn1, self.conv2, self.bn2,
self.conv3, self.bn3, self.conv4, self.bn4)
def require_grad(self):
for param in self.params:
param.require_grad()
def update(self, lr):
for param in self.params:
param.update(lr)
class BottleNeckB(object):
def __init__(self, in_size, ch, groups=1):
initialW = None
self.conv1 = Convolution2D(
in_size, ch, 1, 1, 0, initialW=initialW, nobias=True)
self.bn1 = BatchNormalization(ch)
self.conv2 = Convolution2D(
ch, ch, 3, 1, 1, initialW=initialW, nobias=True, groups=groups)
self.bn2 = BatchNormalization(ch)
self.conv3 = Convolution2D(
ch, in_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = BatchNormalization(in_size)
def __call__(self, x):
h = chx.maximum(0, self.bn1(self.conv1(x)))
h = chx.maximum(0, self.bn2(self.conv2(h)))
h = self.bn3(self.conv3(h))
return chx.maximum(0, h + x)
@property
def params(self):
return self.conv1, self.bn1, self.conv2, self.bn2, self.conv3, self.bn3
def require_grad(self):
for param in self.params:
param.require_grad()
def update(self, lr):
for param in self.params:
param.update(lr)
class Block(object):
def __init__(self, layer, in_size, ch, out_size, stride=2, groups=1):
self.children = []
self.add_link(BottleNeckA(in_size, ch, out_size, stride, groups))
for i in range(layer - 1):
self.add_link(BottleNeckB(out_size, ch, groups))
def __call__(self, x):
for f in self.children:
x = f(x)
return x
def add_link(self, x):
self.children.append(x)
def require_grad(self):
for child in self.children:
child.require_grad()
def update(self, lr):
for child in self.children:
child.update(lr)
class ResNet50(object):
insize = 224
def __init__(self):
self.conv1 = Convolution2D(3, 64, 7, 2, 3)
self.bn1 = BatchNormalization(64)
self.res2 = Block(3, 64, 64, 256, 1)
self.res3 = Block(4, 256, 128, 512)
self.res4 = Block(6, 512, 256, 1024)
self.res5 = Block(3, 1024, 512, 2048)
self.fc = Linear(2048, 1000)
def __call__(self, x):
h = self.bn1(self.conv1(x))
h = chx.max_pool(chx.maximum(0, h), 3, stride=2)
h = self.res2(h)
h = self.res3(h)
h = self.res4(h)
h = self.res5(h)
h = chx.average_pool(h, 7, stride=1)
h = self.fc(h)
return h
@property
def params(self):
return (self.conv1, self.bn1, self.res2, self.res3, self.res4,
self.res5, self.fc)
def require_grad(self):
for param in self.params:
param.require_grad()
def update(self, lr):
for param in self.params:
param.update(lr)
| 6,878
| 27.782427
| 79
|
py
|
chainer
|
chainer-master/chainer/device_resident.py
|
from __future__ import absolute_import
import abc
import sys
import threading
import typing as tp # NOQA
import warnings
import numpy
import chainer
from chainer import backend
from chainer.backends import _cpu
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import types # NOQA
from chainer import utils
import chainerx
_thread_local = threading.local()
# Used in _ToDeviceVisitor to detect GPU-to-GPU (cupy-to-cupy) device transfer.
# It is usually `None`.
# Assign `False` to enable GPU-to-GPU detection. If detected, `True` will be
# assigned. `None` should be assigned again after retrieving the result.
_thread_local.flag_gpu_to_gpu = None
class DeviceResident(utils.enable_final(meta_base=abc.ABCMeta)):
"""A base class of objects with multi-device hierarchy."""
_device = _cpu.CpuDevice()
def __init__(self):
# Store overridden to_device family method names.
self._overridden_to_methods = tuple([
m for m in ('to_cpu', 'to_gpu', 'to_intel64')
if _is_to_device_method_overridden(self, m)])
def device_resident_accept(self, visitor):
"""Applies the visitor to all the device objects in this instance.
Args:
visitor(~chainer.device_resident.DeviceResidentsVisitor): Visitor.
This method should be overridden if the concrete class has custom
sub-hierarchy of device resident objects.
"""
visitor.visit_device_resident(self)
@property
def device(self):
""":class:`~chainer.backend.Device` instance."""
return self._device
@property
def xp(self) -> types.Xp:
"""Array module corresponding to the device.
Depending on the device in which this object resides, this property
returns :mod:`numpy`, :mod:`cupy` or :mod:`chainerx`.
"""
device = self.device
if device is None:
return None
return device.xp
@utils.final(action=DeprecationWarning)
def to_cpu(self) -> 'DeviceResident':
"""Copies parameter variables and persistent values to CPU.
.. deprecated:: v7.0.0
Use :meth:`to_device` instead.
This method does not handle non-registered attributes. If some of such
attributes must be copied to CPU, the link implementation should
override :meth:`~DeviceResident.device_resident_accept` to do so.
Returns: self
"""
visitor = _ToDeviceVisitor(
backend.CpuDevice(),
entry_method_info=('to_cpu', {}),
starting_device_resident=self)
self.__to_device(visitor)
return self
@utils.final(action=DeprecationWarning)
def to_gpu(
self,
device: tp.Optional[types.CudaDeviceSpec] = None,
) -> 'DeviceResident':
"""Copies parameter variables and persistent values to GPU.
.. deprecated:: v7.0.0
Use :meth:`to_device` instead.
This method does not handle non-registered attributes. If some of such
attributes must be copied to GPU, the link implementation must
override :meth:`~DeviceResident.device_resident_accept` to do so.
.. warning::
This method does not transfer the parameters if they are already on
GPU. Use ``to_device`` to perform inter-GPU transfer.
Args:
device: Target device specifier. If omitted, the current device is
used.
Returns: self
"""
cuda.check_cuda_available()
cuda_device = cuda._get_device_or_current(device)
device = chainer.backends.cuda.GpuDevice(cuda_device)
visitor = _ToDeviceVisitor(
device,
entry_method_info=('to_gpu', {'device': device.device}),
skip_between_cupy_devices=True,
starting_device_resident=self)
self.__to_device(visitor)
return self
@utils.final(action=DeprecationWarning)
def to_intel64(self) -> 'DeviceResident':
"""Copies parameter variables and persistent values to CPU.
.. deprecated:: v7.0.0
Use :meth:`to_device` instead.
"""
intel64.check_ideep_available()
visitor = _ToDeviceVisitor(
chainer.get_device(intel64.Intel64Device()),
entry_method_info=('to_intel64', {}),
starting_device_resident=self)
self.__to_device(visitor)
return self
@utils.final
def to_chx(self):
"""Converts parameter variables and persistent values to ChainerX \
without any copy.
This method does not handle non-registered attributes. If some of such
attributes must be copied to ChainerX, the link implementation must
override this method to do so.
Returns: self
"""
if not chainerx.is_available():
raise RuntimeError('ChainerX is not available.')
if self.xp is chainerx:
return self
self.device_resident_accept(_ToChxVisitor())
return self
@utils.final
def from_chx(self):
"""Converts parameter variables and persistent values from ChainerX \
to NumPy/CuPy devices without any copy."""
if self._device.xp is chainerx:
self._device = self._device.fallback_device
self.device_resident_accept(_FromChxVisitor())
return self
def __to_device(self, to_device_visitor):
self.device_resident_accept(to_device_visitor)
@utils.final
def to_device(
self,
device: types.DeviceSpec
) -> 'DeviceResident':
"""Copies parameter variables and persistent values to the specified \
device.
This method does not handle non-registered attributes. If some of such
attributes must be copied to the device, the link implementation must
override this method to do so.
Args:
device: Target device specifier. See
:func:`~chainer.get_device` for available values.
Returns: self
"""
device = chainer.get_device(device)
self.__to_device(_ToDeviceVisitor(device))
return self
def _is_to_device_method_overridden(device_resident, method_name):
# Returns whether the specified to_device family method is overridden.
to_method = getattr(device_resident, method_name, None).__func__
to_method_orig = getattr(DeviceResident, method_name)
if sys.version_info < (3,):
to_method_orig = to_method_orig.__func__
if to_method is not to_method_orig:
return True # overridden
return False
class DeviceResidentsVisitor(object):
"""Base class of visitors that visits device resident objects recursively.
.. seealso::
:class:`chainer.DeviceResident`
"""
def visit_device_resident(self, device_resident):
"""Processes a :class:`~chainer.DeviceResident` instance."""
raise NotImplementedError()
def visit_array(self, arr):
"""Processes an array and returns a new one.
If the visitor does not create a new array, it can simply return the
original array.
"""
raise NotImplementedError()
def visit_variable(self, param):
"""Processes a :class:`~chainer.Variable` or a \
:class:`~chainer.Parameter`."""
raise NotImplementedError()
class _ToDeviceVisitor(DeviceResidentsVisitor):
# A visitor that implements recursive to_device().
# For backward compatibility, if any of to_cpu/to_gpu/to_intel64 are
# overridden on a device resident, this visitor calls it instead of
# `visit_device_resident`. That's true even if `to_device` was originally
# called.
def __init__(
self, device, entry_method_info=None,
skip_between_cupy_devices=False,
starting_device_resident=None):
assert isinstance(device, chainer.backend.Device)
# `entry_method_info` is for backward compatibility workaround for
# overridden methods.
# It indicates which method originally causes this visitor.
# If it is any of the to_??? method names, descendant resident's
# respective method will be called if it's overridden
# (instead of `device_resident_accept`).
if entry_method_info is not None:
device_names = {
'to_cpu': '@numpy',
'to_gpu': '@cupy:N',
'to_intel64': '@intel64',
}
assert len(entry_method_info) == 2
method = entry_method_info[0]
assert method in device_names
warnings.warn(
'{} is deprecated. '
'Please use to_device(\'{}\') instead.'.format(
method, device_names[method]),
DeprecationWarning)
# starting_device_resident is also for backward compatibility
# workaround for overridden methods.
# It is a DeviceResident if to_xxx methods were initially called
# on this visitor. This is used to avoid infinite accept-visit loop
# that would occur by calling to_xxx methods.
assert (starting_device_resident is None
or isinstance(starting_device_resident, DeviceResident))
self._device = device
self._entry_method_info = entry_method_info
self._skip_between_cupy_devices = skip_between_cupy_devices
self._starting_device_resident = starting_device_resident
def visit_device_resident(self, device_resident):
device_resident._device = self._device
# Backward compatibility workaround for overridden methods
if device_resident._overridden_to_methods:
# Skip this device resident, if the visitor was initially triggered
# from it.
if device_resident is self._starting_device_resident:
return
if self._entry_method_info is not None:
# Deprecated method is being called: e.g. to_cpu and to_gpu.
method_name, kwargs = self._entry_method_info
else:
# to_device is being called
method_name, kwargs = (
self._device_to_method_name_and_kwargs(self._device))
if method_name in device_resident._overridden_to_methods:
to_method = getattr(device_resident, method_name)
to_method(**kwargs)
return
def _device_to_method_name_and_kwargs(self, device):
# Converts a device instance to the corresponding combination of
# to_??? method name and kwargs.
# chainerx
if device.xp is chainerx:
return None, {}
# cupy
if device.xp is cuda.cupy:
return 'to_gpu', {'device': device.device.id}
# numpy
assert device.xp is numpy
if isinstance(device, _cpu.CpuDevice):
return 'to_cpu', {}
# intel64
assert isinstance(device, intel64.Intel64Device)
return 'to_intel64', {}
def visit_array(self, arr):
assert isinstance(arr, chainer.get_array_types())
device = backend.get_device_from_array(arr)
if self._skip_visiting(device):
self._warn_to_gpu(device, self._device)
return arr
return self._device.send(arr)
def visit_variable(self, param):
assert isinstance(param, chainer.Variable)
device = param.device
if self._skip_visiting(device):
self._warn_to_gpu(device, self._device)
return
param.to_device(self._device)
def _skip_visiting(self, obj_device):
return (
self._skip_between_cupy_devices
and isinstance(self._device, backend.GpuDevice)
and isinstance(obj_device, backend.GpuDevice))
@staticmethod
def _warn_to_gpu(src_device, dst_device):
src_id = src_device.device.id
dst_id = dst_device.device.id
if src_id != dst_id:
if _thread_local.flag_gpu_to_gpu is None:
warnings.warn('''\
You are trying to transfer a DeviceResident to GPU-{dst} which is already on \
GPU-{src}.
`DeviceResident.to_gpu` does nothing if the DeviceResident is already on GPU.
You can use `DeviceResident.to_device()` method to perform inter-GPU transfer.
'''.format(dst=dst_id, src=src_id), RuntimeWarning)
else:
assert isinstance(_thread_local.flag_gpu_to_gpu, bool)
_thread_local.flag_gpu_to_gpu = True
class _ToChxVisitor(DeviceResidentsVisitor):
# A visitor that recursively calls to_chx().
def visit_device_resident(self, device_resident):
device_resident._device = backend.ChainerxDevice.from_fallback_device(
device_resident._device)
def visit_array(self, arr):
assert isinstance(arr, chainer.get_array_types())
return backend.to_chx(arr)
def visit_variable(self, param):
assert isinstance(param, chainer.Variable)
param.to_chx()
class _FromChxVisitor(DeviceResidentsVisitor):
# A visitor that recursively calls from_chx().
def visit_device_resident(self, device_resident):
if isinstance(device_resident._device, backend.ChainerxDevice):
device_resident._device = device_resident._device.fallback_device
def visit_array(self, arr):
assert isinstance(arr, chainer.get_array_types())
return backend.from_chx(arr)
def visit_variable(self, param):
assert isinstance(param, chainer.Variable)
param.from_chx()
| 13,627
| 33.414141
| 79
|
py
|
chainer
|
chainer-master/chainer/warnings.py
|
class PerformanceWarning(RuntimeWarning):
"""Warning that indicates possible performance issues."""
| 104
| 34
| 61
|
py
|
chainer
|
chainer-master/chainer/link.py
|
from __future__ import absolute_import
import collections
import contextlib
import copy
import typing as tp # NOQA
import warnings
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import device_resident
from chainer import initializers
from chainer import link_hook
from chainer import types # NOQA
from chainer.utils import collections_abc
from chainer import variable
def _is_shape(value: tp.Optional[tp.Any]) -> bool:
if value is None:
return True
elif isinstance(value, collections_abc.Sequence):
try:
return all(int(x) for x in value)
except TypeError:
return False
try:
int(value) # try to cast
return True
except TypeError:
return False
def _ensure_shape_dtype(
value: tp.Optional[tp.Any]
) -> tp.Tuple[tp.Optional[types.ShapeSpec], types.DTypeSpec]:
# Return value paired with dtype FP32 if it is a shape.
if _is_shape(value):
return value, numpy.float32
# Otherwise, returns it with assuming a shape-dtype pair.
else:
return value # type: ignore
class Link(device_resident.DeviceResident):
"""Building block of model definitions.
Link is a building block of neural network models that support various
features like handling parameters, defining network fragments,
serialization, etc.
Link is the primitive structure for the model definitions. It supports
management of parameter variables and *persistent values* that should be
incorporated to serialization.
Parameter is an instance of :class:`~chainer.Parameter` registered to a
link. A :class:`~chainer.Parameter` object can be registered as a
parameter of the link by assigning it to an attribute within *an
initialization scope*, which is a code surrounded by a
:meth:`init_scope` context manager using the ``with`` statement.
Persistent values are arrays, scalars, or any other serializable values
registered via :meth:`register_persistent` or :meth:`add_persistent`.
.. note::
Whereas arbitrary serializable objects can be registered as persistent
values, it is strongly recommended that you just register values that
should be treated as results of learning. A typical example of
persistent values is ones computed during training and required for
testing, e.g. running statistics for batch normalization.
Parameters and persistent values are referred by their names. They can be
accessed as attributes of the links. Link class itself manages the lists
of names of parameters and persistent values to distinguish parameters and
persistent values from other attributes.
Link can be composed into more complex models. This composition feature is
supported by child classes like :class:`Chain` and :class:`ChainList`. One
can create a chain by combining one or more links. See the documents for
these classes for details.
As noted above, Link supports the serialization protocol of the
:class:`~chainer.Serializer` class. **Note that only parameters and
persistent values are saved and loaded.** Other attributes are considered
as a part of user program (i.e. a part of network definition). In order to
construct a link from saved file, other attributes must be identically
reconstructed by user codes.
.. admonition:: Example
This is a simple example of custom link definition. Chainer itself also
provides many links defined under the :mod:`~chainer.links` module. They
might serve as examples, too.
Consider we want to define a simple primitive link that implements a
fully-connected layer based on the :func:`~functions.linear` function.
Note that this function takes input units, a weight variable, and a bias
variable as arguments. Then, the fully-connected layer can be defined as
follows::
import chainer
import chainer.functions as F
from chainer import initializers
import numpy as np
class LinearLayer(chainer.Link):
def __init__(self, n_in, n_out):
super(LinearLayer, self).__init__()
with self.init_scope():
self.W = chainer.Parameter(
initializers.Normal(), (n_out, n_in))
self.b = chainer.Parameter(
initializers.Zero(), (n_out,))
def forward(self, x):
return F.linear(x, self.W, self.b)
This example shows that a user can define arbitrary parameters and use
them in any methods. Links typically implement the ``forward``
operator, although they can also provide other methods to implement the
forward propagation.
Args:
params:
Names, shapes, and optional dtypes of initial parameters.
The keywords are used as the parameter names and the corresponding
values consist either of the shape or a tuple of shape and a dtype
``(shape, dtype)``.
If only the shape is supplied, the default dtype will be used.
Attributes:
name (str): Name of this link, given by the parent chain (if exists).
"""
_local_link_hooks = None # type: tp.Optional[collections.OrderedDict[str, chainer.LinkHook]] # NOQA
__init_done = False
def __init__(self, **params: tp.Any) -> None:
super(Link, self).__init__()
self._params = set() # type: tp.Set[str]
self._persistent = set() # type: tp.Set[str]
self._within_init_scope = False # type: bool
self.name = None # type: tp.Optional[str]
# This flag has to be set before calling add_param().
self.__init_done = True
for name, value in six.iteritems(params):
shape, dtype = _ensure_shape_dtype(value)
self.add_param(name, shape, dtype=dtype)
def __check_init_done(self):
if not self.__init_done:
raise RuntimeError('Link.__init__() has not been called.')
def __str__(self):
specs = ', '.join(
'{}={}'.format(k, v) for k, v in self.printable_specs
)
return '{cls}({specs})'.format(
cls=self.__class__.__name__, specs=specs,
)
@property
def local_link_hooks(
self
) -> 'collections.OrderedDict[str, chainer.LinkHook]':
"""Ordered dictionary of registered link hooks.
Contrary to ``chainer.thread_local.link_hooks``,
which registers its elements to all functions,
link hooks in this property are specific to this link.
"""
if self._local_link_hooks is None:
self._local_link_hooks = collections.OrderedDict()
return self._local_link_hooks
@property
def _n_local_link_hooks(self) -> int:
return (0 if self._local_link_hooks is None
else len(self._local_link_hooks))
@property
def _device_id(self):
warnings.warn(
'Link._device_id is left only for backward compatibility and '
'likely to be removed. Use Link.device instead.',
DeprecationWarning)
device = self.device
if device.xp is cuda.cupy:
return device.device.id
return None
@property
def printable_specs(self):
"""Generator of printable specs of this link.
Yields:
specs (tuple of str and object):
Basically, it returns the arguments (pair of keyword and value)
that are passed to the :meth:`__init__`. This pair of key and
value is used for representing this class or subclass with
:meth:`__str__`.
"""
if 0:
yield
@property
def within_init_scope(self) -> bool:
"""True if the current code is inside of an initialization scope.
See :meth:`init_scope` for the details of the initialization scope.
"""
return getattr(self, '_within_init_scope', False)
@contextlib.contextmanager
def init_scope(self) -> tp.Iterator[None]:
"""Creates an initialization scope.
This method returns a context manager object that enables registration
of parameters (and links for :class:`~chainer.Chain`) by an assignment.
A :class:`~chainer.Parameter` object can be automatically registered
by assigning it to an attribute under this context manager.
.. admonition:: Example
In most cases, the parameter registration is done in the
initializer method. Using the ``init_scope`` method, we can
simply assign a :class:`~chainer.Parameter` object to register
it to the link.
.. code-block:: python
class MyLink(chainer.Link):
def __init__(self):
super().__init__()
with self.init_scope():
self.W = chainer.Parameter(0, (10, 5))
self.b = chainer.Parameter(0, (5,))
"""
# super().__init__ must be called before init_scope().
self.__check_init_done()
old_flag = self.within_init_scope
self._within_init_scope = True
try:
yield
finally:
self._within_init_scope = old_flag
def __call__(self, *args: tp.Any, **kwargs: tp.Any) -> tp.Any:
self.__check_init_done()
# TODO(niboshi): Support link hooks for other forward methods.
hooks = chainer._get_link_hooks()
if self._n_local_link_hooks > 0:
hooks = collections.OrderedDict(hooks)
hooks.update(self.local_link_hooks)
hooks = hooks.values() # avoid six for performance
# Call forward_preprocess hook
if hooks:
pre_cb_args = link_hook._ForwardPreprocessCallbackArgs(
self, 'forward', args, kwargs)
for hook in hooks:
hook.forward_preprocess(pre_cb_args)
# Call the forward function
# (See #5078) super().__call__ is used when the method is injected by a
# mixin class. To keep backward compatibility, the injected one is
# prioritized over forward().
forward = getattr(super(Link, self), '__call__', None)
if forward is None:
# forward is implemented in the child classes
forward = self.forward # type: ignore
out = forward(*args, **kwargs)
# Call forward_postprocess hook
if hooks:
post_cb_args = link_hook._ForwardPostprocessCallbackArgs(
self, 'forward', args, kwargs, out)
for hook in hooks:
hook.forward_postprocess(post_cb_args)
return out
def __setattr__(self, name: str, value: tp.Any) -> None:
if self.within_init_scope and isinstance(value, variable.Parameter):
value.name = name
self._params.add(name)
self._persistent.discard(name)
super(Link, self).__setattr__(name, value)
def __delattr__(self, name: str) -> None:
self._params.discard(name)
self._persistent.discard(name)
super(Link, self).__delattr__(name)
def add_param(
self,
name: str,
shape: tp.Optional[types.ShapeSpec] = None,
dtype: types.DTypeSpec = numpy.float32,
initializer: tp.Optional[types.InitializerSpec] = None
) -> None:
"""Registers a parameter to the link.
Args:
name (str): Name of the parameter. This name is also used as the
attribute name.
shape (int or tuple of ints): Shape of the parameter array. If it
is omitted, the parameter variable is left uninitialized.
dtype: Data type of the parameter array.
initializer (:ref:`initializer <initializer>`): If it is not
``None``, the data is initialized with the given initializer.
If it is an array, the data is directly initialized by it. If
it is callable, it is used as a weight initializer. Note that
in these cases, ``dtype`` argument is ignored. It can also be
a scalar, in which case the data array will be filled by this
scalar. Note that float32 is used in this case.
"""
if name in self.__dict__:
raise AttributeError(
'cannot register a new parameter %s: attribute exists'
% name)
if initializer is None:
initializer = initializers.NaN(dtype)
param = variable.Parameter(initializer, shape)
with self.init_scope():
setattr(self, name, param)
def add_persistent(self, name: str, value: tp.Any) -> None:
"""Registers a persistent value to the link.
The registered value is saved and loaded on serialization and
deserialization. The value is set to an attribute of the link.
Args:
name (str): Name of the persistent value. This name is also used
for the attribute name.
value: Value to be registered.
"""
d = self.__dict__
if name in d:
raise AttributeError(
'cannot register a new persistent value %s: attribute exists'
% name)
self._persistent.add(name)
self._params.discard(name)
d[name] = value
def register_persistent(self, name: str) -> None:
"""Registers an attribute of a given name as a persistent value.
This is a convenient method to register an existing attribute as a
persistent value. If ``name`` has been already registered as a
parameter, this method removes it from the list of parameter names
and re-registers it as a persistent value.
Args:
name (str): Name of the attribute to be registered.
"""
if not hasattr(self, name):
raise AttributeError(
'cannot register non-existent attribute %s as a persistent '
'value' % name)
self._persistent.add(name)
self._params.discard(name)
@classmethod
def from_params(cls, *args, **kwargs):
"""Initialize link with given parameters.
This method initializes the link with given :ref:`ndarray`\\s.
Arguments includes
* some parameters for a specific link.
* constants such as stride width of a convolutional layer.
"""
raise NotImplementedError(
'This link does not implement `from_params`.')
def copy(self, mode: str = 'share') -> 'Link':
"""Copies the link hierarchy to new one.
The whole hierarchy rooted by this link is copied. There are three
modes to perform copy. Please see the documentation for the argument
``mode`` below.
The name of the link is reset on the copy, since the copied instance
does not belong to the original parent chain (even if exists).
Args:
mode (str): It should be either ``init``, ``copy``, or ``share``.
``init`` means parameter variables under the returned link
object is re-initialized by calling their
:meth:`~chainer.Parameter.initialize` method, so that all the
parameters may have different initial values from the original
link.
``copy`` means that the link object is deeply copied, so that
its parameters are not re-initialized but are also deeply
copied. Thus, all parameters have same initial values but can
be changed independently.
``share`` means that the link is shallowly copied, so that its
parameters' arrays are shared with the original one. Thus,
their values are changed synchronously. The default ``mode``
is ``share``.
Returns:
Link: Copied link object.
"""
if mode == 'share':
ret = copy.copy(self)
ret._params = set(self._params)
ret._persistent = set(self._persistent)
ret.name = None
d = ret.__dict__ # type: tp.Dict[str, chainer.Parameter]
for name in ret._params:
d[name] = copy.copy(d[name])
d[name].grad = None
return ret
elif mode == 'copy':
return copy.deepcopy(self)
elif mode == 'init':
ret = copy.deepcopy(self)
for param in ret.params(include_uninit=False):
param.initialize(param.shape)
return ret
else:
raise ValueError(
'The \'mode\' argument should be either \'init\','
'\'copy\', or \'share\'. But {} was given.'.format(mode))
def device_resident_accept(self, visitor):
super(Link, self).device_resident_accept(visitor)
d = self.__dict__
for name in self._params:
x = d[name]
visitor.visit_variable(x)
for name in self._persistent:
x = d[name]
if isinstance(x, chainer.get_array_types()):
d[name] = visitor.visit_array(x)
def params(
self,
include_uninit: bool = True
) -> tp.Iterator['chainer.Parameter']:
"""Returns a generator of all parameters under the link hierarchy.
Args:
include_uninit (bool): If ``True``, it also generates uninitialized
parameters.
Returns:
A generator object that generates all parameters.
"""
d = self.__dict__ # type: tp.Dict[str, chainer.Parameter]
for name in sorted(self._params):
if include_uninit or d[name].data is not None:
yield d[name]
def namedparams(
self,
include_uninit: bool = True
) -> tp.Iterator[tp.Tuple[str, 'chainer.Parameter']]:
"""Returns a generator of all (path, param) pairs under the hierarchy.
Args:
include_uninit (bool): If ``True``, it also generates uninitialized
parameters.
Returns:
A generator object that generates all (path, parameter) pairs. The
paths are relative from this link.
"""
d = self.__dict__ # type: tp.Dict[str, chainer.Parameter]
for name in sorted(self._params):
if include_uninit or d[name].is_initialized:
yield '/' + name, d[name]
def links(self, skipself: bool = False) -> tp.Iterator['Link']:
"""Returns a generator of all links under the hierarchy.
Args:
skipself (bool): If ``True``, then the generator skips this link
and starts with the first child link.
Returns:
A generator object that generates all links.
"""
if not skipself:
yield self
def namedlinks(
self,
skipself: bool = False
) -> tp.Iterator[tp.Tuple[str, 'Link']]:
"""Returns a generator of all (path, link) pairs under the hierarchy.
Args:
skipself (bool): If ``True``, then the generator skips this link
and starts with the first child link.
Returns:
A generator object that generates all (path, link) pairs.
"""
if not skipself:
yield '/', self
def children(self) -> tp.Iterator['Link']:
"""Returns a generator of all child links.
Returns:
A generator object that generates all child links.
"""
if 0:
yield
def copyparams(self, link: 'Link', copy_persistent: bool = True) -> None:
"""Copies all parameters from given link.
This method copies data arrays of all parameters in the hierarchy. The
copy is even done across the host and devices. Note that this method
does not copy the gradient arrays.
*From v5.0.0:* this method also copies the persistent values (e.g. the
moving statistics of :class:`~chainer.links.BatchNormalization`). If
the persistent value is an ndarray, the elements are copied. Otherwise,
it is copied using :func:`copy.deepcopy`. The old behavior (not copying
persistent values) can be reproduced with ``copy_persistent=False``.
Args:
link (Link): Source link object.
copy_persistent (bool): If ``True``, persistent values are also
copied. ``True`` by default.
"""
src = link.__dict__
dst = self.__dict__
for name in self._params:
dst[name].copydata(src[name])
if copy_persistent:
array_types = chainer.get_array_types()
for name in self._persistent:
d = dst[name]
s = src[name]
if isinstance(d, array_types) and isinstance(s, array_types):
backend.copyto(d, s)
else:
dst[name] = copy.deepcopy(s)
def cleargrads(self) -> None:
"""Clears all gradient arrays.
This method should be called before the backward computation at every
iteration of the optimization.
"""
for param in self.params():
param.cleargrad()
def zerograds(self) -> None:
"""Initializes all gradient arrays by zero.
.. deprecated:: v1.15
Use the more efficient :meth:`cleargrads` instead.
"""
warnings.warn(
'Link.zerograds is deprecated. Use Link.cleargrads instead.',
DeprecationWarning)
for param in self.params():
param.zerograd()
def addgrads(self, link: 'Link') -> None:
"""Accumulates gradient values from given link.
This method adds each gradient array of the given link to corresponding
gradient array of this link. The accumulation is even done across
host and different devices.
Args:
link (Link): Source link object.
"""
src = link.__dict__
dst = self.__dict__
for name in self._params:
dst[name].addgrad(src[name])
def enable_update(self) -> None:
"""Enables update rules of all parameters under the link hierarchy.
This method sets the :attr:`~chainer.UpdateRule.enabled` flag of the
update rule of each parameter variable to ``True``.
"""
for param in self.params():
rule = param.update_rule
if rule is not None:
rule.enabled = True
def disable_update(self) -> None:
"""Disables update rules of all parameters under the link hierarchy.
This method sets the :attr:`~chainer.UpdateRule.enabled` flag of the
update rule of each parameter variable to ``False``.
"""
for param in self.params():
rule = param.update_rule
if rule is not None:
rule.enabled = False
@property
def update_enabled(self) -> bool:
"""``True`` if at least one parameter has an update rule enabled."""
for param in self.params():
rule = param.update_rule
if rule is not None and rule.enabled:
return True
return False
def serialize(self, serializer: 'chainer.AbstractSerializer') -> None:
"""Serializes the link object.
Args:
serializer (~chainer.AbstractSerializer): Serializer object.
"""
d = self.__dict__ # type: tp.Dict[str, chainer.Parameter]
for name in self._params:
param = d[name]
data = serializer(name, param.data) # type: types.NdArray
if param.data is None and data is not None:
# Initialize the parameter here
param.initialize(data.shape)
with chainer.using_device(param.device):
param.data[...] = param.device.send(data)
for name in self._persistent:
d[name] = serializer(name, d[name])
def repeat(
self,
n_repeat: int,
mode: str = 'init'
) -> 'chainer.Sequential':
"""Repeats this link multiple times to make a :class:`~chainer.Sequential`.
This method returns a :class:`~chainer.Sequential` object which has
the same :class:`~chainer.Link` multiple times repeatedly. The ``mode``
argument means how to copy this link to repeat.
.. admonition:: Example
You can repeat the same link multiple times to create a longer
:class:`~chainer.Sequential` block like this:
.. testcode::
class ConvBNReLU(chainer.Chain):
def __init__(self):
super(ConvBNReLU, self).__init__()
with self.init_scope():
self.conv = L.Convolution2D(
None, 64, 3, 1, 1, nobias=True)
self.bn = L.BatchNormalization(64)
def forward(self, x):
return F.relu(self.bn(self.conv(x)))
net = ConvBNReLU().repeat(16, mode='init')
The ``net`` object contains 16 blocks, each of which is
``ConvBNReLU``. And the ``mode`` was ``init``, so each block
is re-initialized with different parameters. If you give
``copy`` to this argument, each block has same values for its
parameters but its object ID is different from others. If it is
``share``, each block is same to others in terms of not only
parameters but also the object IDs because they are shallow-copied,
so that when the parameter of one block is changed, all the
parameters in the others also change.
Args:
n_repeat (int): Number of times to repeat.
mode (str): It should be either ``init``, ``copy``, or ``share``.
``init`` means parameters of each repeated element in the
returned :class:`~chainer.Sequential` will be re-initialized,
so that all elements have different initial parameters.
``copy`` means that the parameters will not be re-initialized
but object itself will be deep-copied, so that all elements
have same initial parameters but can be changed independently.
``share`` means all the elements which consist the resulting
:class:`~chainer.Sequential` object are same object because
they are shallow-copied, so that all parameters of elements
are shared with each other.
"""
ret = chainer.Sequential()
if n_repeat <= 0:
return ret
if mode not in ['init', 'copy', 'share']:
raise ValueError(
'The \'mode\' argument should be either \'init\','
'\'copy\', or \'share\'. But {} was given.'.format(mode))
link = self
for _ in range(n_repeat):
ret.append(link.copy(mode))
return ret
def count_params(self) -> int:
"""Counts the total number of parameters.
This method counts the total number of scalar values included in all
the :class:`~chainer.Parameter`\\ s held by this link and its
descendants.
If the link containts uninitialized parameters, this method raises a
warning.
Returns:
The total size of parameters (int)
"""
size = 0
for name, param in self.namedparams():
if param.array is None:
warnings.warn(
'Parameter \'{}\' has not been initialized, so the '
'resulting count will not include the number of parameters'
' in it.'.format(name))
continue
size += param.size
return size
def add_hook(
self,
hook: 'chainer.LinkHook',
name: tp.Optional[str] = None
) -> 'Link':
"""Registers a link hook.
Args:
hook (~chainer.LinkHook): Link hook to be registered.
name (str): Name of the link hook. The name must be unique
among link hooks registered to this link. If ``None``,
the default name of the link hook is used.
Returns:
self
"""
if not isinstance(hook, link_hook.LinkHook):
raise TypeError('Hook must be of type LinkHook')
if name is None:
name = hook.name
hooks = self.local_link_hooks
if name in hooks:
raise KeyError('Hook %s already exists' % name)
hooks[name] = hook
hook.added(self)
return self
def delete_hook(self, name: str) -> None:
"""Unregisters the link hook.
Args:
name (str): The name of the link hook to be unregistered.
"""
if name in self.local_link_hooks:
self.local_link_hooks[name].deleted(self)
del self.local_link_hooks[name]
else:
raise KeyError('Hook %s does not exist' % name)
class Chain(Link):
"""Composable link with object-like interface.
Composability is one of the most important features of neural nets. Neural
net models consist of many reusable fragments, and each model itself might
be embedded into a larger learnable system. Chain enables us to write a
neural net based on composition, without bothering about routine works like
collecting parameters, serialization, copying the structure with parameters
shared, etc.
This class actually provides a way to compose one or more links into one
structure. A chain can contain one or more *child links*. Child link is a
link registered to the chain with its own name. The child link is stored to
an attribute of the chain with the name. User can write a whole model or a
fragment of neural nets as a child class of Chain.
Each chain itself is also a link. Therefore, one can combine chains into
higher-level chains. In this way, links and chains construct a *link
hierarchy*. Link hierarchy forms a tree structure, where each node is
identified by the path from the root. The path is represented by a string
like a file path in UNIX, consisting of names of nodes on the path, joined
by slashes ``/``.
A child link can be added just by assigning it to an attribute of the
chain within :meth:`~chainer.Chain.init_scope`.
The registered child link is saved and loaded on serialization and
deserialization, and involved in the optimization. The registered link
is called a child. The child link is accessible via :meth:`children`
generator, which returns a generator running through the children in
lexical order.
On registration of a child link, its :attr:`~Link.name` attribute is also
set (or overwritten if the link has already been registered to another
chain).
.. admonition:: Example
This is a simple example of custom chain definition. Chainer itself also
provides some chains defined under the :mod:`~chainer.links` module.
They might serve as examples, too.
Consider we want to define a multi-layer perceptron consisting of two
hidden layers with rectifiers as activation functions. We can use the
:class:`~chainer.links.Linear` link as a building block::
import chainer
import chainer.functions as F
import chainer.links as L
class MultiLayerPerceptron(chainer.Chain):
def __init__(self, n_in, n_hidden, n_out):
super(MultiLayerPerceptron, self).__init__()
with self.init_scope():
self.layer1 = L.Linear(n_in, n_hidden)
self.layer2 = L.Linear(n_hidden, n_hidden)
self.layer3 = L.Linear(n_hidden, n_out)
def forward(self, x):
# Forward propagation
h1 = F.relu(self.layer1(x))
h2 = F.relu(self.layer2(h1))
return self.layer3(h2)
Child links are registered via the assignment within a
``with self.init_scope():`` block. The forward propagation is often
implemented as the ``forward`` operator as the above example, though
it is not mandatory.
Args:
links: Child links. The keywords are used as their names. The names are
also set to the links.
"""
def __init__(self, **links: 'Link') -> None:
super(Chain, self).__init__()
self._children = set() # type: tp.Set[str]
for name, link in six.iteritems(links):
self.add_link(name, link)
def __str__(self):
reps = []
for child in self.children():
rep = '({name}): {rep},'.format(
name=child.name, rep=str(child),
)
# Add indentation to each line.
for line in rep.splitlines():
reps.append(' {line}\n'.format(line=line))
reps = ''.join(reps)
if reps: # No newline with no children.
reps = '\n' + reps
return '{cls}({children})'.format(
cls=self.__class__.__name__, children=reps,
)
def __getitem__(self, name: str) -> tp.Any:
"""Equivalent to getattr."""
return getattr(self, name)
def __setattr__(self, name: str, value: tp.Any) -> None:
if self.within_init_scope and isinstance(value, Link):
if hasattr(self, name):
raise AttributeError(
'cannot register a new link %s: attribute exists' % name)
value.name = name
self._children.add(name)
super(Chain, self).__setattr__(name, value)
def __delattr__(self, name: str) -> None:
self._children.discard(name)
super(Chain, self).__delattr__(name)
def add_link(self, name: str, link: 'Link') -> None:
"""Registers a child link to this chain.
Args:
name (str): Name of the child link. This name is also used as the
attribute name.
link (Link): The link object to be registered.
"""
if name in self.__dict__:
raise AttributeError(
'cannot register a new link %s: attribute exists' % name)
if not isinstance(link, Link):
raise TypeError('cannot register a non-link object as a child')
with self.init_scope():
setattr(self, name, link)
def copy(self, mode: str = 'share') -> 'Chain':
ret = super(Chain, self).copy(mode) # type: ignore # should be Chain
ret._children = set(ret._children) # type: ignore
d = ret.__dict__ # type: tp.Dict[str, Link]
for name in ret._children: # type: ignore
# copy child links recursively
copied = d[name].copy(mode)
copied.name = name
d[name] = copied
return ret # type: ignore
def device_resident_accept(self, visitor):
super(Chain, self).device_resident_accept(visitor)
d = self.__dict__
for name in self._children:
d[name].device_resident_accept(visitor)
def params(
self,
include_uninit: bool = True
) -> tp.Iterator['chainer.Parameter']:
for param in super(Chain, self).params(include_uninit):
yield param
d = self.__dict__ # type: tp.Dict[str, Link]
for name in sorted(self._children):
for param in d[name].params(include_uninit):
yield param
def namedparams(
self,
include_uninit: bool = True
) -> tp.Iterator[tp.Tuple[str, 'chainer.Parameter']]:
for ret in super(Chain, self).namedparams(include_uninit):
yield ret
d = self.__dict__ # type: tp.Dict[str, Link]
for name in sorted(self._children):
prefix = '/' + name
for path, param in d[name].namedparams(include_uninit):
yield prefix + path, param
def links(self, skipself: bool = False) -> tp.Iterator[Link]:
if not skipself:
yield self
d = self.__dict__ # type: tp.Dict[str, Link]
for name in sorted(self._children):
for link in d[name].links():
yield link
def namedlinks(
self,
skipself: bool = False
) -> tp.Iterator[tp.Tuple[str, Link]]:
if not skipself:
yield '/', self
d = self.__dict__ # type: tp.Dict[str, Link]
for name in sorted(self._children):
child = d[name]
prefix = '/' + name
yield prefix, child
for path, link in d[name].namedlinks(True):
yield prefix + path, link
def children(self) -> tp.Iterator[Link]:
d = self.__dict__ # type: tp.Dict[str, Link]
for name in sorted(self._children):
yield d[name]
def copyparams(self, link: Link, copy_persistent: bool = True) -> None:
super(Chain, self).copyparams(link, copy_persistent)
src = link.__dict__
dst = self.__dict__
for name in self._children:
dst[name].copyparams(src[name], copy_persistent)
def addgrads(self, link: Link) -> None:
super(Chain, self).addgrads(link)
src = link.__dict__
dst = self.__dict__
for name in self._children:
dst[name].addgrads(src[name])
def serialize(self, serializer: 'chainer.AbstractSerializer') -> None:
super(Chain, self).serialize(serializer)
d = self.__dict__ # type: tp.Dict[str, Link]
for name in self._children:
d[name].serialize(serializer[name])
class ChainList(Link, collections_abc.MutableSequence):
"""Composable link with list-like interface.
This is another example of compositional link. Unlike :class:`Chain`, this
class can be used like a list of child links. Each child link is indexed by
a non-negative integer, and it maintains the current number of registered
child links. The :meth:`add_link` method inserts a new link at the end of
the list. It is useful to write a chain with arbitrary number of child
links, e.g. an arbitrarily deep multi-layer perceptron.
This class inherits the methods `index`, `count`, `append`, `reverse`,
`extend`, `pop`, `remove` from `collections.abc.MutableSequence` and
can be accessed and assigned by index or slice.
Args:
links: Initial child links.
"""
def __init__(self, *links: Link) -> None:
super(ChainList, self).__init__()
self._children = [] # type: tp.List[Link]
for link in links:
self.add_link(link)
def __str__(self):
reps = []
for index, child in enumerate(self._children):
rep = '({index}): {rep},'.format(
index=index, rep=str(child),
)
# Add indentation to each line.
for line in rep.splitlines():
reps.append(' {line}\n'.format(line=line))
reps = ''.join(reps)
if reps: # No newline with no children.
reps = '\n' + reps
return '{cls}({children})'.format(
cls=self.__class__.__name__, children=reps,
)
def __setattr__(self, name: str, value: tp.Any) -> None:
if self.within_init_scope and isinstance(value, Link):
raise TypeError(
'cannot register a new link'
' within a "with chainlist.init_scope():" block.')
super(ChainList, self).__setattr__(name, value)
def __setitem__(
self,
index: tp.Union[int, slice],
value: tp.Union[Link, tp.Iterable[Link]]
) -> None:
if isinstance(index, int):
link = value # type: ignore # should be Link
link.name = str(index) # type: ignore
self._children[index] = link # type: ignore
elif isinstance(index, slice):
self._children[index] = value # type: ignore # should be Iterable[Link] # NOQA
for i, c in enumerate(self._children): # type: ignore
c.name = str(i)
else:
raise TypeError(
'ChainList indices must be integers or slices, not %s' %
type(index).__name__)
def __getitem__(self, index):
"""Returns the child at given index.
Args:
index (int): Index of the child in the list.
Returns:
Link: The ``index``-th child link.
"""
return self._children[index]
def __delitem__(self, index: tp.Union[int, slice]) -> None:
del self._children[index]
for i, c in enumerate(self._children):
c.name = str(i)
def insert(self, index: int, link: Link) -> None:
"""Insert a child link at the given index.
Args:
index (int): The position of the list where the new
link is inserted.
link (Link): The link to be inserted.
"""
if index == len(self._children):
self._children.append(link)
link.name = str(index)
else:
self._children.insert(index, link)
for i, c in enumerate(self._children):
c.name = str(i)
def __iter__(self) -> tp.Iterator[Link]:
return iter(self._children)
def __len__(self) -> int:
"""Returns the number of children."""
return len(self._children)
def add_link(self, link: Link) -> None:
"""Registers a child link and adds it to the tail of the list.
Args:
link (Link): The link object to be registered.
"""
self.append(link)
def copy(self, mode: str = 'share') -> 'ChainList':
"""Returns a deep copy of the chainlist."""
ret = super(ChainList, self).copy() # type: ignore # should be ChainList # NOQA
ret._children = list(ret._children) # type: ignore # copy
children = ret._children # type: ignore
for i, child in enumerate(children):
child = child.copy(mode)
child.name = str(i)
children[i] = child
return ret # type: ignore
def device_resident_accept(self, visitor):
super(ChainList, self).device_resident_accept(visitor)
for link in self._children:
link.device_resident_accept(visitor)
def params(
self,
include_uninit: bool = True
) -> tp.Iterator['chainer.Parameter']:
for param in super(ChainList, self).params(include_uninit):
yield param
for link in self._children:
for param in link.params(include_uninit):
yield param
def namedparams(
self,
include_uninit: bool = True
) -> tp.Iterator[tp.Tuple[str, 'chainer.Parameter']]:
for ret in super(ChainList, self).namedparams(include_uninit):
yield ret
for idx, link in enumerate(self._children):
prefix = '/%d' % idx
for path, param in link.namedparams(include_uninit):
yield prefix + path, param
def links(self, skipself: bool = False) -> tp.Iterator[Link]:
if not skipself:
yield self
for child in self._children:
for link in child.links():
yield link
def namedlinks(
self,
skipself: bool = False
) -> tp.Iterator[tp.Tuple[str, Link]]:
if not skipself:
yield '/', self
for idx, child in enumerate(self._children):
prefix = '/%d' % idx
yield prefix, child
for path, link in child.namedlinks(True):
yield prefix + path, link
def children(self) -> tp.Iterator[Link]:
for child in self._children:
yield child
def copyparams(self, link: Link, copy_persistent: bool = True) -> None:
# link is actually a ChainList
super(ChainList, self).copyparams(link, copy_persistent)
for idx, child in enumerate(self._children):
child.copyparams(link[idx], copy_persistent) # type: ignore
def addgrads(self, link: Link) -> None:
# link is actually a ChainList
super(ChainList, self).addgrads(link)
for idx, child in enumerate(self._children):
child.addgrads(link[idx]) # type: ignore
def serialize(self, serializer: 'chainer.AbstractSerializer') -> None:
super(ChainList, self).serialize(serializer)
for idx, child in enumerate(self._children):
child.serialize(serializer['%d' % idx])
| 45,260
| 35.917618
| 104
|
py
|
chainer
|
chainer-master/chainer/link_hook.py
|
import typing as tp # NOQA
import chainer
from chainer import utils
class _ForwardPreprocessCallbackArgs(object):
"""Callback data for LinkHook.forward_preprocess"""
def __init__(
self,
link: 'chainer.link.Link',
forward_name: str,
args: tp.Tuple[tp.Any, ...],
kwargs: tp.Dict[str, tp.Any]
) -> None:
self.link = link
self.forward_name = forward_name
self.args = args
self.kwargs = kwargs
def __repr__(self):
return utils._repr_with_named_data(
self, link=self.link, forward_name=self.forward_name,
args=self.args, kwargs=self.kwargs)
class _ForwardPostprocessCallbackArgs(object):
"""Callback data for LinkHook.forward_postprocess"""
def __init__(
self,
link: 'chainer.link.Link',
forward_name: str,
args: tp.Tuple[tp.Any, ...],
kwargs: tp.Dict[str, tp.Any],
out: tp.Any
) -> None:
self.link = link
self.forward_name = forward_name
self.args = args
self.kwargs = kwargs
self.out = out
def __repr__(self):
return utils._repr_with_named_data(
self, link=self.link, forward_name=self.forward_name,
args=self.args, kwargs=self.kwargs, out=self.out)
class LinkHook(object):
"""Base class of hooks for links.
:class:`~chainer.LinkHook` is a callback object
that is registered to a :class:`~chainer.Link`.
Registered link hooks are invoked before and after calling
:meth:`Link.forward() <chainer.Link.forward>` method of each link.
Link hooks that derive from :class:`LinkHook` may override the following
method:
* :meth:`~chainer.LinkHook.added`
* :meth:`~chainer.LinkHook.deleted`
* :meth:`~chainer.LinkHook.forward_preprocess`
* :meth:`~chainer.LinkHook.forward_postprocess`
By default, these methods do nothing.
Specifically, when the :meth:`~chainer.Link.__call__`
method of some link is invoked,
:meth:`~chainer.LinkHook.forward_preprocess`
(resp. :meth:`~chainer.LinkHook.forward_postprocess`)
of all link hooks registered to this link are called before (resp. after)
:meth:`Link.forward() <chainer.Link.forward>` method of the link.
There are two ways to register :class:`~chainer.LinkHook`
objects to :class:`~chainer.Link` objects.
The first one is to use ``with`` statement. Link hooks hooked
in this way are registered to all links within ``with`` statement
and are unregistered at the end of ``with`` statement.
.. admonition:: Example
The following code is a simple example in which
we measure the elapsed time of a part of forward propagation procedure
with :class:`~chainer.link_hooks.TimerHook`, which is a subclass of
:class:`~chainer.LinkHook`.
>>> class Model(chainer.Chain):
... def __init__(self):
... super(Model, self).__init__()
... with self.init_scope():
... self.l = L.Linear(10, 10)
... def forward(self, x1):
... return F.exp(self.l(x1))
>>> model1 = Model()
>>> model2 = Model()
>>> x = chainer.Variable(np.zeros((1, 10), np.float32))
>>> with chainer.link_hooks.TimerHook() as m:
... _ = model1(x)
... y = model2(x)
>>> model3 = Model()
>>> z = model3(y)
>>> print('Total time : {}'.format(m.total_time()))
... # doctest:+ELLIPSIS
Total time : ...
In this example, we measure the elapsed times for each forward
propagation of all functions in ``model1`` and ``model2``.
Note that ``model3`` is not a target measurement
as :class:`~chainer.link_hooks.TimerHook` is unregistered
before forward propagation of ``model3``.
.. note::
Chainer stores the dictionary of registered link hooks
as a thread local object. So, link hooks registered
are different depending on threads.
The other one is to register directly to
a :class:`~chainer.Link` object by calling its
:meth:`~chainer.Link.add_hook` method.
Link hooks registered in this way can be removed by
:meth:`~chainer.Link.delete_hook` method.
Contrary to former registration method, link hooks are registered
only to the link which :meth:`~chainer.Link.add_hook`
is called.
Args:
name(str): Name of this link hook.
"""
name = 'LinkHook'
def __enter__(self) -> 'LinkHook':
link_hooks = chainer._get_link_hooks()
if self.name in link_hooks:
raise KeyError('hook %s already exists' % self.name)
link_hooks[self.name] = self
self.added(None)
return self
def __exit__(self, *_):
link_hooks = chainer._get_link_hooks()
link_hooks[self.name].deleted(None)
del link_hooks[self.name]
def added(self, link: 'tp.Optional[chainer.link.Link]') -> None:
"""Callback function invoked when the link hook is registered
Args:
link(~chainer.Link): Link object to which
the link hook is registered. ``None`` if the link hook is
registered globally.
"""
pass
def deleted(self, link: 'tp.Optional[chainer.link.Link]') -> None:
"""Callback function invoked when the link hook is unregistered
Args:
link(~chainer.Link): Link object to which
the link hook is unregistered. ``None`` if the link hook had
been registered globally.
"""
pass
# forward
def forward_preprocess(self, args: _ForwardPreprocessCallbackArgs) -> None:
"""Callback function invoked before a forward call of a link.
Args:
args: Callback data. It has the following attributes:
* link (:class:`~chainer.Link`)
Link object.
* forward_name (:class:`str`)
Name of the forward method.
* args (:class:`tuple`)
Non-keyword arguments given to the forward method.
* kwargs (:class:`dict`)
Keyword arguments given to the forward method.
"""
pass
def forward_postprocess(
self,
args: _ForwardPostprocessCallbackArgs
) -> None:
"""Callback function invoked after a forward call of a link.
Args:
args: Callback data. It has the following attributes:
* link (:class:`~chainer.Link`)
Link object.
* forward_name (:class:`str`)
Name of the forward method.
* args (:class:`tuple`)
Non-keyword arguments given to the forward method.
* kwargs (:class:`dict`)
Keyword arguments given to the forward method.
* out
Return value of the forward method.
"""
pass
| 7,090
| 32.92823
| 79
|
py
|
chainer
|
chainer-master/chainer/memory_layouts.py
|
import chainer
from chainer import function_node
from chainer.utils import type_check
# TODO(niboshi): memory layouts and conversions should better be implemented
# with polymorphism instead of ad-hoc conditions.
CUDNN_CHANNEL_FIRST_X = None
CUDNN_CHANNEL_LAST_X = 'CUDNN_CHANNEL_LAST_X'
CUDNN_CHANNEL_FIRST_W = None
CUDNN_CHANNEL_LAST_W = 'CUDNN_CHANNEL_LAST_W'
class _Unspecified:
def __repr__(self):
return '<unspecified>'
_unspecified = _Unspecified()
def get_raw_shape(arr_or_var):
if isinstance(arr_or_var, chainer.Variable):
arr = arr_or_var._data[0]
else:
arr = arr_or_var
return arr.shape
def get_semantic_shape(arr_or_var, *, assumed_layout=_unspecified):
if not isinstance(arr_or_var, chainer.Variable):
# array
shape = arr_or_var.shape
if assumed_layout is not _unspecified:
shape = _transpose_shape(shape, assumed_layout, None)
return shape
# variable
if assumed_layout is not _unspecified:
# TODO(niboshi): Raise exception
assert arr_or_var.layout == assumed_layout
return arr_or_var.shape
def _transpose_array(arr, src_layout, dst_layout):
trans = _get_layout_transpose_axes(arr.ndim, src_layout, dst_layout)
if trans is None:
return arr
return arr.transpose(*trans)
def _transpose_shape(shape, src_layout, dst_layout):
trans = _get_layout_transpose_axes(len(shape), src_layout, dst_layout)
if trans is None:
return shape
return tuple([shape[i] for i in trans])
def _get_layout_transpose_axes(ndim, src_layout, dst_layout, inverse=False):
# None: no transposition is required.
if src_layout == dst_layout:
return None
if dst_layout == CUDNN_CHANNEL_LAST_X:
assert ndim >= 3
assert src_layout == CUDNN_CHANNEL_FIRST_X
trans = (0,) + tuple(range(2, ndim)) + (1,)
elif dst_layout == CUDNN_CHANNEL_LAST_W:
assert ndim >= 3
assert src_layout == CUDNN_CHANNEL_FIRST_W
trans = (0,) + tuple(range(2, ndim)) + (1,)
elif src_layout == CUDNN_CHANNEL_LAST_X:
assert ndim >= 3
assert dst_layout == CUDNN_CHANNEL_FIRST_X
trans = (0, ndim-1) + tuple(range(1, ndim-1))
elif src_layout == CUDNN_CHANNEL_LAST_W:
assert ndim >= 3
assert dst_layout == CUDNN_CHANNEL_FIRST_W
trans = (0, ndim-1) + tuple(range(1, ndim-1))
else:
raise ValueError(
'Unknown layout conversion: from \'{}\' to \'{}\''.format(
src_layout, dst_layout))
if inverse:
t = [None] * ndim
for i, n in enumerate(trans):
t[n] = i
trans = tuple(t)
# Postconditions:
# - assert isinstance(trans, tuple)
# - assert len(trans) == ndim
# - assert all([i in trans for i in range(ndim)])
return trans
class AsLayout(function_node.FunctionNode):
"""Permute the dimensions of an array."""
axes = None
in_layout = None
def __init__(self, layout):
self.out_layout = layout
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1,)
def check_layout_forward(self, inputs):
x, = inputs
self.axes = _get_layout_transpose_axes(
x.ndim, x.layout, self.out_layout)
self.in_layout = x.layout
@property
def label(self):
return 'AsLayout'
def forward_chainerx(self, inputs):
# TODO(niboshi): Add support for this
raise RuntimeError(
'Non-standard memory layouts are not supported for chainerx.')
def forward(self, inputs):
x, = inputs
axes = self.axes
self.output_layouts = (self.out_layout,)
if axes is None:
return x
return x.transpose(axes),
def backward(self, indexes, grad_outputs):
return AsLayout(self.in_layout).apply(grad_outputs)
| 3,916
| 26.780142
| 76
|
py
|
chainer
|
chainer-master/chainer/gradient_check.py
|
from __future__ import absolute_import
import warnings
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import _cpu
from chainer.backends import cuda
from chainer import configuration
from chainer import testing
from chainer import utils
from chainer import variable
import chainerx
class NondifferentiableError(Exception):
pass
def _copy_arrays(xs):
xp = backend.get_array_module(*xs)
if xp is chainerx:
return [
None if x is None
else xp.array(x, dtype=numpy.float64, copy=True, device=x.device)
for x in xs]
else:
return [xp.array(x, dtype=numpy.float64, copy=True) for x in xs]
def _ones_like(arr):
device = backend.get_device_from_array(arr)
with chainer.using_device(device):
return device.xp.ones_like(arr)
def _make_outputs_props_in_error_message(outputs, grad_outputs):
return (
'Output shapes and dtypes : {}\n'
'Output gradient shapes and dtypes: {}'.format(
utils._format_array_props(outputs),
utils._format_array_props(grad_outputs)))
def _check_outputs_and_grad_outputs(outputs, grad_outputs):
if len(outputs) != len(grad_outputs):
raise ValueError(
'Output gradients must contain equally as many elements as '
'the number of output elements.\n'
'{}'.format(
_make_outputs_props_in_error_message(outputs, grad_outputs)))
shapes_match = True
dtypes_match = True
for y, gy in zip(outputs, grad_outputs):
if gy is None:
continue
if y is None and (gy == 0).all():
continue
if y.shape != gy.shape:
shapes_match = False
if y.dtype != gy.dtype:
dtypes_match = False
if not (shapes_match and dtypes_match):
raise ValueError(
'Shapes and/or dtypes of outputs and output gradients do not '
'match.\n'
'{}'.format(
_make_outputs_props_in_error_message(outputs, grad_outputs)))
def numerical_grad(
f, inputs, grad_outputs, eps=1e-3,
detect_nondifferentiable=False, diff_atol=0, diff_rtol=1e-2,
center_outputs=None):
"""Computes numerical gradient by finite differences.
This function is used to implement gradient check. For usage example, see
unit tests of :mod:`chainer.functions`.
By default, ``numerical_grad`` computes the gradient to the first order of
``eps``.
Args:
f (callable): Python function with no arguments that runs forward
computation and returns the result.
inputs (tuple of arrays): Tuple of arrays that should be treated as
inputs. Each element of them is slightly modified to realize
numerical gradient by finite differences.
grad_outputs (tuple of arrays or scalars): Tuple of arrays or scalars
that are treated as output gradients.
eps (float): Epsilon value of finite differences.
detect_nondifferentiable (bool):
``False`` by default.
If ``True``, ``numerical_grad`` checks whether ``f`` is
differentiable at ``inputs``.
It requires evaluation of ``f`` at 5 points instead of 2.
As a side effect, the accuracy of numerical gradient will be
increased to the third order of ``eps``.
If it turns out that ``f`` is non-differentiable at ``input``,
``numerical_grad`` raises
:class:`~chainer.gradient_check.NondifferentiableError`.
diff_atol (float):
Absolute tolerance of fitting error of non-differentiable point
detection.
diff_rtol (float):
Tolerance of fitting error of non-differentiable point detection
relative to the output values of ``f``.
center_outputs (tuple of arrays or None):
Only used if ``detect_nondifferentiable`` is ``True``.
If specified, these arrays are used as the outputs of ``f`` at
``inputs``.
Otherwise, it is calculated.
It can be used to reduce the computation if these arrays are
already calculated before calling ``numerical_grad``.
Returns:
tuple: Numerical gradient arrays corresponding to ``inputs``.
"""
# TODO(niboshi): Deprecate `center_outputs` argument.
# If dtype of this argument is not float64, often the resolution is
# insufficient for numerical gradient calculation. We might use it only
# when its dtype is float64, but it would be better to simply remove it.
center_outputs = None
assert eps > 0
assert isinstance(inputs, (tuple, list))
for x in inputs:
if x.dtype.kind != 'f':
raise RuntimeError(
'The dtype of input arrays must be kind of float')
inputs = tuple(inputs)
# Cast grad_outputs to float64
grad_outputs = tuple([
None if g is None
else numpy.float64(g) if numpy.isscalar(g)
else g.astype(numpy.float64)
for g in grad_outputs])
if not chainer.is_arrays_compatible(
[a for a in inputs + grad_outputs if not numpy.isscalar(a)]):
raise RuntimeError('Do not mix GPU and CPU arrays in `numerical_grad`')
device = backend.get_device_from_array(*(inputs + grad_outputs))
xp = device.xp
if xp is cuda.cupy:
numerical_grad_kernel_1 = cuda.reduce(
'T y1, T y2, U gy, T eps', 'V gxi',
'(y1 - y2) * gy', 'a + b', 'gxi += a / (eps * 2)', '0',
'numerical_grad_kernel_1'
)
numerical_grad_kernel_3 = cuda.reduce(
'T y1, T y2, T y3, T y4, U gy, T eps', 'V gxi',
'(-y1 + 8 * y2 - 8 * y3 + y4) * gy',
'a + b', 'gxi += a / (eps * 6)', '0',
'numerical_grad_kernel_3'
)
if xp is chainerx:
grads = [
xp.zeros(x.shape, numpy.float64, device=x.device) for x in inputs]
else:
grads = [xp.zeros(x.shape, numpy.float64) for x in inputs]
if detect_nondifferentiable:
if center_outputs is None:
ys0 = _copy_arrays(f())
else:
ys0 = center_outputs
nout = len(ys0)
shapes = [y0.shape for y0 in ys0]
sizes = numpy.array([y0.size for y0 in ys0])
cumsizes = numpy.cumsum(sizes)
# Evaluate func at a single input
def eval_func(x, x_ind, delta, orig):
x[x_ind] = orig + delta
ys = _copy_arrays(f())
assert len(ys) == len(grad_outputs)
assert all([
gy is None
for y, gy in zip(ys, grad_outputs)
if y is None])
assert all([
gy is None or numpy.isscalar(gy) or y.shape == gy.shape
for y, gy in zip(ys, grad_outputs)])
x[x_ind] = orig
return ys
# An iteration on a single input displacement
def iterate_single_input(i_in, x, orig_x, x_ind):
orig = orig_x[x_ind]
# `yss` holds a list of output arrays for each of 2 or 5 sampling
# points.
if detect_nondifferentiable:
yss = [
eval_func(x, x_ind, -eps * 1., orig),
eval_func(x, x_ind, -eps * .5, orig),
ys0,
eval_func(x, x_ind, +eps * .5, orig),
eval_func(x, x_ind, +eps * 1., orig),
]
else:
yss = [
eval_func(x, x_ind, -eps * 1, orig),
eval_func(x, x_ind, +eps * 1, orig),
]
assert all([
y is None
or (y.shape == yss[0][i].shape and y.dtype == yss[0][i].dtype)
for ys in yss
for i, y in enumerate(ys)])
# If all the outputs are 0-size, skip non-differentiable check.
if all([y is None or y.size == 0 for y in yss[0]]):
detect_nondifferentiable_ = False
else:
detect_nondifferentiable_ = detect_nondifferentiable
if detect_nondifferentiable_:
# Detect non-differentiable point by quadratic fitting
# Check for non-finite output.
# If any single element in the output arrays has different
# finiteness among sampled points, that means this is a
# non-differentiable point.
# If the function consistently generates non-finite values
# around the point, we do not treat the point as
# non-differentiable.
# (Example: x<0 region for the logarithm function)
any_nonfinite = False
for i_out in range(nout):
isfinites = [xp.isfinite(ys[i_out]) for ys in yss]
if any((isfinites[0] != isfinites[i]).any()
for i in range(1, len(yss))):
s = six.StringIO()
s.write(
'Tried to compute the numeric gradient on a '
'non-differentiable point.\n\n')
s.write('i_in: {}\n'.format(i_in))
s.write('i_out: {}\n'.format(i_out))
s.write('x: {}\n'.format(inputs[i_in]))
s.write('index on x: {}\n'.format(x_ind))
s.write('eps: {}\n'.format(eps))
s.write('y[x-eps ]: {}\n'.format(yss[0][i_out]))
s.write('y[x-eps/2]: {}\n'.format(yss[1][i_out]))
s.write('y[x ]: {}\n'.format(yss[2][i_out]))
s.write('y[x+eps/2]: {}\n'.format(yss[3][i_out]))
s.write('y[x+eps ]: {}\n'.format(yss[4][i_out]))
raise NondifferentiableError(s.getvalue())
any_nonfinite |= not all((_).all() for _ in isfinites)
if not any_nonfinite:
# Stack flattened outputs to make (5, *)-shaped 2D array
ystack = xp.vstack(
[xp.hstack([y.ravel() for y in ys]) for ys in yss])
assert ystack.ndim == 2 and ystack.shape[0] == len(yss)
# Fit to quadratic
if xp is not numpy:
ystack = _cpu._to_cpu(ystack)
polyfit = numpy.polynomial.polynomial.polyfit
_, (residuals, _, _, _) = polyfit(
range(len(yss)), ystack, deg=2, full=True)
if xp is not numpy:
residuals = device.send(residuals)
residuals = xp.sqrt(residuals / len(yss))
# Check for error for each output array
for i_out in range(nout):
size = sizes[i_out]
cumsize = cumsizes[i_out]
shape = shapes[i_out]
# TODO(niboshi): The following two lines could be
# rewritten using xp.stack, which is supported in
# NumPy>=1.10
ymax = xp.concatenate(
[ys[i_out][None] for ys in yss]).max(axis=0)
ymin = xp.concatenate(
[ys[i_out][None] for ys in yss]).min(axis=0)
# Restore the shape of flattened residual
res = residuals[cumsize - size:cumsize]
res = res.reshape(shape)
det = utils.force_array(
diff_atol + diff_rtol * (ymax - ymin) < res)
# Constant output = not nondifferentiable
det[ymax == ymin] = False
if det.any():
s = six.StringIO()
s.write(
'Tried to compute the numeric gradient on a '
'non-differentiable point.\n\n')
s.write('i_in: {}\n'.format(i_in))
s.write('i_out: {}\n'.format(i_out))
s.write('x: {}\n'.format(inputs[i_in]))
s.write('index on x: {}\n'.format(x_ind))
s.write('eps: {}\n'.format(eps))
s.write('diff_rtol: {}\n'.format(diff_rtol))
s.write('diff_atol: {}\n'.format(diff_atol))
s.write('ymax: {}\n'.format(ymax))
s.write('ymin: {}\n'.format(ymin))
s.write(
'diff_atol + diff_rtol * (ymax-ymin): {}\n'.format(
diff_atol + diff_rtol * (ymax - ymin)))
s.write('fitting errors: {}\n'.format(res))
s.write('y[x-eps ]: {}\n'.format(yss[0][i_out]))
s.write('y[x-eps/2]: {}\n'.format(yss[1][i_out]))
s.write('y[x ]: {}\n'.format(yss[2][i_out]))
s.write('y[x+eps/2]: {}\n'.format(yss[3][i_out]))
s.write('y[x+eps ]: {}\n'.format(yss[4][i_out]))
raise NondifferentiableError(s.getvalue())
# Calculate numerical gradient
for i_out, gy in enumerate(grad_outputs):
if gy is None:
continue
if not numpy.isscalar(gy):
gy = gy.astype(numpy.float64, copy=False)
gpu_ = (xp is cuda.cupy and
all(isinstance(ys[i_out], cuda.ndarray)
for ys in yss))
# If any output sample is None, all others must be.
assert all([
(yss[0][i_out] is None) == (yss[j][i_out] is None)
for j in range(len(yss))])
# If outputs samples are None, the part of numeric gradient for
# this output is considered as zero: skip the accumulation.
if yss[0][i_out] is None:
continue
if len(yss) == 2: # 1st order
y0 = yss[0][i_out]
y1 = yss[1][i_out]
if gpu_:
numerical_grad_kernel_1(
y1, y0, xp.asarray(gy), eps, gx[x_ind])
else:
dot = ((y1 - y0) * gy).sum()
gx[x_ind] = gx[x_ind] + dot / (2 * eps)
elif len(yss) == 5: # 3rd order
y0 = yss[0][i_out]
y1 = yss[1][i_out]
y2 = yss[3][i_out]
y3 = yss[4][i_out]
if gpu_:
numerical_grad_kernel_3(
y3, y2, y1, y0, gy, eps, gx[x_ind])
else:
num = -y3 + 8 * y2 - 8 * y1 + y0
dot = (num * gy).sum()
gx[x_ind] = gx[x_ind] + dot / (6 * eps)
else:
assert False
# Calculate numeric gradient
with configuration.using_config('type_check', False):
for i_in, (x, gx) in enumerate(six.moves.zip(inputs, grads)):
orig_x = x.copy() # hold original value
for x_ind in numpy.ndindex(x.shape):
iterate_single_input(i_in, x, orig_x, x_ind)
return [g.astype(x.dtype, copy=False)
for g, x in six.moves.zip(grads, inputs)]
def assert_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True):
"""Asserts if some corresponding element of x and y differs too much.
This function can handle both CPU and GPU arrays simultaneously.
Args:
x: Left-hand-side array.
y: Right-hand-side array.
atol (float): Absolute tolerance.
rtol (float): Relative tolerance.
verbose (bool): If ``True``, it outputs verbose messages on error.
"""
warnings.warn(
'chainer.gradient_check.assert_allclose is deprecated. '
'Use chainer.testing.assert_allclose instead.',
DeprecationWarning)
testing.assert_allclose(x, y, atol, rtol, verbose)
def _as_tuple(xs):
if isinstance(xs, tuple):
return xs
elif isinstance(xs, list):
return tuple(xs)
else:
return xs,
class _CheckBackward(object):
def __init__(
self, func, xs, gys, params, eps, atol, rtol, no_gxs,
dtype, detect_nondifferentiable, is_immutable_params):
# If `is_immutable_params` is `False`, `params` are expected to be of
# type `chainer.Parameter` and are updated in-place.
# To run `_CheckBackward` with ChainerX ndarrays however which cannot
# be updated in-place when wrapped in `chainer.Parameter`s, this flag
# should be `True` and parameters should be given as ndarrays.
# `func` in the former case must take inputs as arguments only. In the
# latter, it must take the parameters in addition.
if dtype is not None and numpy.dtype(dtype).kind != 'f':
raise ValueError('`dtype` is allowed only float type')
if is_immutable_params:
if not all(
isinstance(p, chainer.get_array_types()) for p in params):
raise ValueError(
'All parameters in `params` must be ndarrays if '
'`is_immutable_params` is `True`. Actual: {}.'.format(
', '.join(str(type(p)) for p in params)))
xs = _as_tuple(xs)
if gys is not None:
gys = _as_tuple(gys)
params = _as_tuple(params)
if no_gxs is None:
no_gxs = [None if x is None else x.dtype.kind != 'f' for x in xs]
else:
if len(no_gxs) != len(xs):
raise ValueError(
'Length of no_grads param and xs should be same.\n'
'Actual: {0} != {1}'.format(len(no_gxs), len(xs)))
device = backend.get_device_from_array(*xs)
if device.xp is chainerx:
if params and not is_immutable_params:
raise NotImplementedError(
'gradient_check does not support params argument for '
'ChainerX arrays')
self.device = device
self.func = func
self.xs = xs
self.gys = gys
self.params = params
self.no_gxs = no_gxs
self.atol = atol
self.rtol = rtol
self.is_immutable_params = is_immutable_params
# options for numeric gradients
self.eps = eps
self.dtype = dtype
self.detect_nondifferentiable = detect_nondifferentiable
def run(self):
with chainer.using_device(self.device):
self._run()
def _run(self):
# Run a forward pass for backward gradients.
# Uninitialized parameters may be initialized.
# If self.gys is None, it is also updated with 1s.
# This must be done before sampling a direction vector, because
# otherwise the shapes of uninitialized parameters wouldn't be
# determined.
xs_backward, ys, params_backward = (
self._forward_for_backward_gradients())
# Keep output arrays to save computation in numerical gradients
ys0 = tuple([None if y is None else y.array for y in ys])
# If gys is not given, generate the all-1 gradients.
if self.gys is None:
if not (len(ys) == 1 and ys[0].shape == ()):
raise ValueError(
'y_grad argument cannot be omitted if the target function '
'is not a loss function, which has a single output with '
'shape ().\n'
'Actual output shapes: {}'.format(
', '.join([str(y.shape) for y in ys])))
self.gys = tuple([_ones_like(y.array) for y in ys])
else:
_check_outputs_and_grad_outputs(ys, self.gys)
# Strike out gys corresponding to None y
self.gys = tuple([
None if y is None else gy for gy, y in zip(self.gys, ys0)])
# Sample a direction vector.
directions = self._sample_directions()
# Compute backward gradients by running a backward pass.
gx_backward = self._directional_backward_gradients(
xs_backward, ys, params_backward, directions)
# Compute numeric gradients
gx_numeric = self._directional_numeric_gradients(directions, ys0)
# Compare the resulted gradients
self._compare_gradients(gx_numeric, gx_backward, directions)
def _compare_gradients(self, gx_numeric, gx_backward, directions):
atol = self.atol
rtol = self.rtol
# Compare the gradients
try:
testing.assert_allclose(
gx_numeric, gx_backward, atol=atol, rtol=rtol)
except AssertionError as e:
eps = self.eps
xs = self.xs
gys = self.gys
f = six.StringIO()
f.write('check_backward failed (eps={} atol={} rtol={})\n'.format(
eps, atol, rtol))
for i, x in enumerate(xs):
f.write('inputs[{}]:\n'.format(i))
f.write('{}\n'.format(x))
for i, gy in enumerate(gys):
f.write('grad_outputs[{}]:\n'.format(i))
f.write('{}\n'.format(gy))
for i, d in enumerate(directions):
f.write('directions[{}]:\n'.format(i))
f.write('{}\n'.format(d))
f.write('gradients (numeric): {}\n'.format(gx_numeric))
f.write('gradients (backward): {}\n'.format(gx_backward))
f.write('\n')
f.write('x: numeric gradient, y: backward gradient')
f.write(str(e))
raise AssertionError(f.getvalue())
def _sample_directions(self):
# Samples a direction vector (list of arrays with the same shapes as
# input arrays and parameters)
device = self.device
xs = self.xs
params = self.params
no_gxs = self.no_gxs
xp = device.xp
direction_xs_shapes = [
None if x is None
else x.shape for x, no_gx in six.moves.zip(xs, no_gxs)
if not no_gx]
direction_param_shapes = [p.shape for p in params]
direction_shapes = direction_xs_shapes + direction_param_shapes
total_size = sum([
int(numpy.prod(shape)) for shape in direction_shapes
if shape is not None])
# Sample the concatenated vector at random
directions = self._sample_unit_vector(total_size, xp)
# Unpack the concatenated vector and returns as a list of arrays.
return self._unpack_arrays(xp, directions, direction_shapes)
@staticmethod
def _sample_unit_vector(size, xp):
directions = xp.random.normal(size=(size,))
if size == 0:
return directions
# The direction vector is normalized in order to keep the scale of
# differentiation error invariant with respect to the number of
# input dimensions. Ideally, the scale of the curvature with
# respect to each input dimension should be taken into account,
# but we ignore the differences and assume that the curvature is
# uniform with respect to all the input dimensions.
#
# Small elements in the direction vector leads to instability on
# gradients comparison. In order to avoid that, make absolute values
# at least 0.1 / sqrt(size).
sq_directions = xp.square(directions)
sq_norm = sq_directions.sum()
return xp.copysign(
# Weighted quadratic mean of
# abs(directions / norm) and xp.full(size, 1 / xp.sqrt(size)),
# where norm = xp.sqrt(sq_norm)
xp.sqrt(
(0.99 / sq_norm) * sq_directions
+ 0.01 / size),
directions)
def _unpack_arrays(self, xp, packed_array, shapes):
# Unpacks a flattened-and-concatenated array into original shapes.
# Shapes may include None.
assert packed_array.ndim == 1
n = len(shapes)
# For simplicity omit None from the shapes. They're recovered later.
none_indices = [
i for i, shape in enumerate(shapes) if shape is None]
shapes = [shape for shape in shapes if shape is not None]
# Unpack the array into given shapes
sizes = [int(numpy.prod(shape)) for shape in shapes]
cumsizes = numpy.cumsum(sizes)
unpacked_arrays = [
packed_array[cumsize - size:cumsize].reshape(shape)
for cumsize, size, shape
in zip(cumsizes, sizes, shapes)]
# Recover Nones
for i in none_indices:
unpacked_arrays.insert(i, None)
assert len(unpacked_arrays) == n
return tuple(unpacked_arrays)
def _clear_grads(self, xs):
for x in xs:
if x is None:
continue
x.grad_var = None
def _forward_for_backward_gradients(self):
func = self.func
xs = self.xs
params = self.params
xs = [
None if x is None
else variable.Variable(x, requires_grad=x.dtype.kind == 'f')
for x in xs]
if self.is_immutable_params:
params = tuple([chainer.Parameter(p) for p in params])
ys = func(xs, params)
else:
ys = func(*xs)
ys = _as_tuple(ys)
# Clear gradients which may exist if func calls backward inside of
# itself.
self._clear_grads(xs)
self._clear_grads(params)
return xs, ys, params
def _directional_backward_gradients(self, xs, ys, params, directions):
no_gxs = self.no_gxs
gys = (
[None if gy is None
# Copy is needed to avoid being updated during backprop, which
# would affect the numerical gradient.
# TODO(niboshi): Preserve strides, for testing purpose.
else chainer.Variable(gy.copy(), requires_grad=False)
for gy in self.gys])
# Backward
chainer.backward(ys, gys)
for no_gx, x in six.moves.zip(no_gxs, xs):
if no_gx and x.grad is not None:
raise RuntimeError(
'gradient of int variable must be None')
grads = (
[None if x is None
else x.grad for x, no_gx in six.moves.zip(xs, no_gxs)
if not no_gx]
+ [p.grad for p in params])
gx_accum = 0
assert len(grads) == len(directions)
for g, direction in six.moves.zip(grads, directions):
if g is not None:
assert direction is not None
gx_accum += (g.astype(numpy.float64) * direction).sum()
return gx_accum
def _directional_numeric_gradients(self, directions, y0_data):
device = self.device
func = self.func
xs = self.xs
gys = self.gys
params = self.params
eps = self.eps
no_gxs = self.no_gxs
dtype = self.dtype
detect_nondifferentiable = self.detect_nondifferentiable
params_data = [
p if self.is_immutable_params else p.array for p in params]
xp = device.xp
x_vars = [variable.Variable(x, requires_grad=False) for x in xs]
xs_filtered = [
x.array for x, no_gx in six.moves.zip(x_vars, no_gxs) if not no_gx]
if dtype is None:
casted_data = [x for x in xs_filtered + params_data]
else:
if numpy.dtype(dtype).kind != 'f':
raise ValueError('`dtype` is allowed only float type')
# Even skipped variable must have the same dtype.
for x, no_gx in six.moves.zip(x_vars, no_gxs):
if no_gx and x.array.dtype.kind == 'f':
x.array = x.array.astype(dtype, copy=False)
casted_data = [
None if x is None else x.astype(dtype, copy=False)
for x in xs_filtered + params_data]
delta = xp.array(0., numpy.float64)
def g():
# This functions is called twice in `numerical_grad`.
# `delta` is `epsilon` or `-epsilon` in these calls.
# See the document of `numerical_grad`.
def perturb(data, direction):
if data is None:
assert direction is None
return data
data = (data.astype(numpy.float64)
+ delta * direction).astype(data.dtype)
if numpy.isscalar(data):
data = xp.array(data)
return data
# Input arrays
g_x_vars = []
j = 0
for x_var, no_gx in six.moves.zip(x_vars, no_gxs):
if no_gx:
g_x_vars.append(x_var)
else:
data = perturb(casted_data[j], directions[j])
g_x_vars.append(
None if data is None else variable.Variable(data))
j += 1
# Parameters
for i in range(len(params)):
data = perturb(casted_data[j + i], directions[j + i])
if self.is_immutable_params:
# Update the parameter array since it is converted into
# a Parameter just before calling the func.
params_data[i] = data
else:
# Update the given Parameter in-place since the object is
# held by the caller.
params[i].array = data
# Clear gradients to support func that calls backward inside of
# itself.
self._clear_grads(g_x_vars)
if not self.is_immutable_params:
self._clear_grads(params)
if self.is_immutable_params:
ps = tuple([chainer.Parameter(p) for p in params_data])
ys = func(g_x_vars, ps)
else:
ys = func(*g_x_vars)
ys = _as_tuple(ys)
ys_data = tuple([None if y is None else y.array for y in ys])
if xp is chainerx:
ys_data = tuple([
None if y is None else y.as_grad_stopped()
for y in ys_data])
if not self.is_immutable_params:
for i, param in enumerate(params):
param.array = casted_data[j + i]
return ys_data
gx, = numerical_grad(
g, (delta,), gys, eps=eps,
detect_nondifferentiable=detect_nondifferentiable,
center_outputs=y0_data, diff_atol=0, diff_rtol=self.rtol)
return gx
def check_backward(
func, x_data, y_grad, params=(),
eps=1e-3, atol=1e-5, rtol=1e-4, no_grads=None, dtype=None,
detect_nondifferentiable=False):
"""Test backward procedure of a given function.
This function automatically checks the backward-process of a given function
to ensure that the computed gradients are approximately correct.
For example, assuming you've defined a :class:`~chainer.FunctionNode` class
``MyFunc``, that takes two arguments and returns one value, you can wrap
it in a ordinary function and check its gradient computations as follows:
.. code-block:: python
def func(xs):
y, = MyFunc().apply(xs)
return y
x1_data = xp.array(...)
x2_data = xp.array(...)
gy_data = xp.array(...)
check_backward(func, (x1_data, x2_data), gy_data)
This function creates :class:`~chainer.Variable` objects with ``x_data``
and calls ``func`` with the :class:`~chainer.Variable`\\ s to get its
result as :class:`~chainer.Variable`.
Then, it sets ``y_grad`` array to ``grad`` attribute of the result and
calls ``backward`` method to get gradients of the inputs.
To check correctness of the gradients, the function calls
:func:`numerical_grad` to calculate numerically the gradients and compares
the types of gradients with :func:`chainer.testing.assert_allclose`.
To reduce computational time, it uses directional derivative along a
random vector. A function
:math:`g: \\mathbb{R} \\rightarrow \\mathbb{R}^n` is defined as
:math:`g(\\delta) = f(x + \\delta r)`, where
:math:`\\delta \\in \\mathbb{R}`, :math:`r \\in \\mathbb{R}^n`
is a random vector
and :math:`f` is a function which you want to test.
Its gradient is
.. math::
g'(\\delta) = f'(x + \\delta r) \\cdot r.
Therefore, :math:`g'(0) = f'(x) \\cdot r`.
So we can check the correctness of back propagation of :math:`f` indirectly
by comparing this equation with the gradient of :math:`g` numerically
calculated and that of :math:`f` computed by backprop.
If :math:`r` is chosen from uniform distribution, we can conclude with
high probability that the gradient of :math:`f` itself is correct.
If the function is non-differentiable with respect to some input objects,
we can check its backprop to such objects by ``no_grads`` argument.
``gradient_check`` computes numerical backward to inputs that correspond to
``False`` in ``no_grads``. It also asserts that the backprop leaves
gradients ``None`` for inputs that correspond to ``True`` in ``no_grads``.
The default of ``no_grads`` argument is the tuple of truth values whether
input objects (``x1_data`` or/and ``x2_data`` in this example) represent
integer variables.
You can simplify a test when ``MyFunc`` gets only one argument:
.. code-block:: python
check_backward(func, x1_data, gy_data)
If ``MyFunc`` is a loss function which returns a zero-dimensional
array, pass ``None`` to ``gy_data``. In this case, it sets ``1`` to
``grad`` attribute of the result:
.. code-block:: python
check_backward(my_loss_func,
(x1_data, x2_data), None)
If ``MyFunc`` returns multiple outputs, pass all gradients for outputs
as a tuple:
.. code-block:: python
gy1_data = xp.array(...)
gy2_data = xp.array(...)
check_backward(func, x1_data, (gy1_data, gy2_data))
You can also test a :class:`~chainer.Link`.
To check gradients of parameters of the link, set a tuple of the parameters
to ``params`` arguments:
.. code-block:: python
check_backward(my_link, (x1_data, x2_data), gy_data,
(my_link.W, my_link.b))
Note that ``params`` are not ``ndarray``\\ s,
but :class:`~chainer.Variables`\\ s.
Function objects are acceptable as ``func`` argument:
.. code-block:: python
check_backward(lambda x1, x2: f(x1, x2),
(x1_data, x2_data), gy_data)
.. note::
``func`` is called many times to get numerical gradients for all inputs.
This function doesn't work correctly when ``func`` behaves randomly as
it gets different gradients.
Args:
func (callable): A function which gets :class:`~chainer.Variable`\\ s
and returns :class:`~chainer.Variable`\\ s. ``func`` must returns
a tuple of :class:`~chainer.Variable`\\ s or one
:class:`~chainer.Variable`. You can use a
:class:`~chainer.Function`, :class:`~chainer.FunctionNode` or a
:class:`~chainer.Link` object or any other function satisfying the
condition.
x_data (ndarray or tuple of ndarrays): A set of ``ndarray``\\ s to be
passed to ``func``. If ``x_data`` is one ``ndarray`` object, it is
treated as ``(x_data,)``.
y_grad (ndarray or tuple of ndarrays or None):
A set of ``ndarray``\\ s representing gradients of return-values of
``func``. If ``y_grad`` is one ``ndarray`` object, it is
treated as ``(y_grad,)``. If ``func`` is a loss-function,
``y_grad`` should be set to ``None``.
params (~chainer.Variable or tuple of ~chainder.Variable):
A set of :class:`~chainer.Variable`\\ s whose gradients are
checked. When ``func`` is a :class:`~chainer.Link` object,
set its parameters as ``params``.
If ``params`` is one :class:`~chainer.Variable` object,
it is treated as ``(params,)``.
eps (float): Epsilon value to be passed to :func:`numerical_grad`.
atol (float): Absolute tolerance to be passed to
:func:`chainer.testing.assert_allclose`.
rtol (float): Relative tolerance to be passed to
:func:`chainer.testing.assert_allclose`.
no_grads (list of bool): Flag to skip variable for gradient assertion.
It should be same length as ``x_data``.
dtype (~numpy.dtype): ``x_data``, ``y_grad`` and ``params`` are casted
to this dtype when calculating numerical gradients. Only float
types and ``None`` are allowed.
detect_nondifferentiable (bool):
If ``True``, check for non-differentiable inputs is enabled.
If ``func`` is non-differentiable at ``x_data``, ``check_backward``
raises :class:`~chainer.gradient_check.NondifferentiableError`.
.. seealso::
:func:`numerical_grad`
"""
_CheckBackward(
func, x_data, y_grad, params, eps, atol, rtol, no_grads, dtype,
detect_nondifferentiable, is_immutable_params=False
).run()
def _check_backward_with_params(
# This function was introduced along with the `is_immutable_params`
# argument to `_CheckBackward`.
# It allows passing `params` as ndarrays instead of `Parameter`s and thus
# depends less on the state of the parameter held by the caller.
# It is required by the `LinkTestCase` to check ChainerX parameter
# gradients, since those parameters cannot perturbed in-place for the
# numerical gradients if passed as `Parameter`s as those requiring
# gradients cannot be updated in-place.
func, x_data, y_grad, params=(),
eps=1e-3, atol=1e-5, rtol=1e-4, no_grads=None, dtype=None,
detect_nondifferentiable=False):
assert all(isinstance(p, chainer.get_array_types()) for p in params)
_CheckBackward(
func, x_data, y_grad, params, eps, atol, rtol, no_grads, dtype,
detect_nondifferentiable, is_immutable_params=True
).run()
def check_double_backward(func, x_data, y_grad, x_grad_grad, params=(),
params_grad_grad=(), eps=1e-3, atol=1e-4, rtol=1e-3,
no_grads=None, dtype=None,
detect_nondifferentiable=False):
"""Test twice differentiation of a given procedure.
This function automatically checks if the backward procedure of ``func``
is correctly implemented for further differentiation. It first computes the
gradient of ``func`` w.r.t. its inputs in the same way as
:func:`~chainer.gradient_check.check_backward`. This function then further
invokes the backward procedure against the gradient variables, starting
from the initial gradient given by ``x_grad_grad``. It also computes the
second gradient using :func:`~chainer.gradient_check.numerical_grad`. The
resulting gradients are compared to confirm if the second-order gradients
are approximately correct.
Note that this function **DOES NOT** check if the first-order
differentiation is correct; the numerical gradient assumes that the
first-order gradient given by the usual :meth:`chainer.Variable.backward`
is correct. The implementation of each differentiable function should be
tested by :func:`~chainer.gradient_check.check_backward` first, and then
should be tested by this function if necessary.
For the details of the arguments, see
:func:`~chainer.gradient_check.check_backward`. The additional arguments
``x_grad_grad`` and ``params_grad_grad`` are (tuples of)
:class:`~chainer.Variable` (s) that include the initial gradient
corresponding to the first-order gradient of each input and parameter. Note
that the default error tolerance ``atol`` and ``rtol`` are slightly larger
than those of :func:`~chainer.gradient_check.check_backward` because the
numerical gradients of the second order differentiation are less accurate
than those of the first order gradients.
"""
# Rename variables
xs = x_data
gys = y_grad
ggxs = x_grad_grad
ggparams = params_grad_grad
no_gxs = no_grads
del x_data
del y_grad
del x_grad_grad
del params_grad_grad
del no_grads
xs = _as_tuple(xs)
params = _as_tuple(params)
gys = _as_tuple(gys)
ggxs = _as_tuple(ggxs)
ggparams = _as_tuple(ggparams)
n_x = len(xs)
first_order_no_gxs = [x.dtype.kind != 'f' for x in xs]
def first_order_grad(*inputs):
xs = inputs[:n_x]
gys = inputs[n_x:]
ys = _as_tuple(func(*xs))
# `gys` (inputs to `first_order_grad` forward function) may have been
# casted to float64 by `numerical_grad`. For certain functions demoting
# the dtypes (e.g. `F.cast` that casts to float16), the dtypes of `ys`
# (e.g. outputs of `F.cast`) and `gys` (e.g. given by `numerical_grad`)
# may mismatch and we need to align those dtypes here.
gys = [
None if gy is None
else chainer.functions.cast(gy, y.dtype) for y, gy in zip(ys, gys)]
_check_outputs_and_grad_outputs(ys, gys)
chainer.backward(ys, gys, enable_double_backprop=True)
gxs = []
errors = []
for i, (no_gx, x) in enumerate(six.moves.zip(first_order_no_gxs, xs)):
if no_gx:
if x.grad is not None:
errors.append(
'[{}]: Gradient was calculated while expected to not.'
.format(i))
else:
if x.grad is None:
gxs.append(None)
else:
gxs.append(x.grad_var)
if len(errors) > 0:
f = six.StringIO()
f.write('There are errors retrieving first-order gradients:\n')
f.write('Inputs: {}\n'.format(utils._format_array_props(xs)))
f.write('Skip: {}\n'.format(
', '.join(str(no_gx) for no_gx in first_order_no_gxs)))
f.write('Errors:\n')
for error in errors:
f.write('{}\n'.format(error))
raise RuntimeError(f.getvalue())
return tuple(gxs + [p.grad_var for p in params])
inputs = xs + gys
grad_grad = ggxs + ggparams
try:
check_backward(first_order_grad, inputs, grad_grad, params=params,
eps=eps, atol=atol, rtol=rtol, no_grads=no_gxs,
dtype=dtype,
detect_nondifferentiable=detect_nondifferentiable)
except AssertionError as e:
f = six.StringIO()
f.write('check_double_backward failed '
'(eps={} atol={} rtol={})\n'.format(eps, atol, rtol))
for i, x in enumerate(xs):
f.write('input[{}]:\n'.format(i))
f.write('{}\n'.format(x))
for i, gy in enumerate(gys):
f.write('grad_output[{}]:\n'.format(i))
f.write('{}\n'.format(gy))
for i, ggx in enumerate(ggxs):
f.write('grad_grad_input[{}]:\n'.format(i))
f.write('{}\n'.format(ggx))
for i, ggp in enumerate(ggparams):
f.write('grad_grad_param[{}]:\n'.format(i))
f.write('{}\n'.format(ggp))
f.write('\n')
f.write(str(e))
utils._raise_from(AssertionError, f.getvalue(), e)
| 43,693
| 38.939671
| 79
|
py
|
chainer
|
chainer-master/chainer/cuda.py
|
import sys
from chainer.backends import cuda
sys.modules[__name__] = cuda
| 77
| 10.142857
| 33
|
py
|
chainer
|
chainer-master/chainer/reporter.py
|
from __future__ import absolute_import
import collections
import contextlib
import copy
import json
import threading
import typing as tp # NOQA
import warnings
import numpy
import six
import chainer
from chainer import backend
from chainer import configuration
from chainer import serializer as serializer_module
from chainer import variable
import chainerx
_thread_local = threading.local()
def _copy_variable(value):
if isinstance(value, variable.Variable):
return copy.copy(value)
return value
class Reporter(object):
"""Object to which observed values are reported.
Reporter is used to collect values that users want to watch. The reporter
object holds a mapping from value names to the actually observed values.
We call this mapping `observations`.
When a value is passed to the reporter, an object called `observer` can be
optionally attached. In this case, the name of the observer is added as the
prefix of the value name. The observer name should be registered
beforehand.
See the following example:
>>> from chainer import Reporter, report, report_scope
>>>
>>> reporter = Reporter()
>>> observer = object() # it can be an arbitrary (reference) object
>>> reporter.add_observer('my_observer', observer)
>>> observation = {}
>>> with reporter.scope(observation):
... reporter.report({'x': 1}, observer)
...
>>> observation
{'my_observer/x': 1}
There are also a global API to add values:
>>> reporter = Reporter()
>>> observation = {}
>>> with reporter:
... with report_scope(observation):
... report({'x': 1})
...
>>> observation
{'x': 1}
The most important application of Reporter is to report observed values
from each link or chain in the training and validation procedures.
:class:`~chainer.training.Trainer` and some extensions prepare their own
Reporter object with the hierarchy of the target link registered as
observers. We can use :func:`report` function inside any links and chains
to report the observed values (e.g., training loss, accuracy, activation
statistics, etc.).
Attributes:
observation: Dictionary of observed values.
"""
def __init__(self):
self._observer_names = {}
self.observation = {}
def __enter__(self):
"""Makes this reporter object current."""
_get_reporters().append(self)
def __exit__(self, exc_type, exc_value, traceback):
"""Recovers the previous reporter object to the current."""
_get_reporters().pop()
@contextlib.contextmanager
def scope(self, observation):
"""Creates a scope to report observed values to ``observation``.
This is a context manager to be passed to ``with`` statements. In this
scope, the observation dictionary is changed to the given one.
It also makes this reporter object current.
Args:
observation (dict): Observation dictionary. All observations
reported inside of the ``with`` statement are written to this
dictionary.
"""
old = self.observation
self.observation = observation
self.__enter__()
try:
yield
finally:
self.__exit__(None, None, None)
self.observation = old
def add_observer(self, name, observer):
"""Registers an observer of values.
Observer defines a scope of names for observed values. Values observed
with the observer are registered with names prefixed by the observer
name.
Args:
name (str): Name of the observer.
observer: The observer object. Note that the reporter distinguishes
the observers by their object ids (i.e., ``id(owner)``), rather
than the object equality.
"""
self._observer_names[id(observer)] = name
def add_observers(self, prefix, observers):
"""Registers multiple observers at once.
This is a convenient method to register multiple objects at once.
Args:
prefix (str): Prefix of each name of observers.
observers: Iterator of name and observer pairs.
"""
for name, observer in observers:
self._observer_names[id(observer)] = prefix + name
def report(self, values, observer=None):
"""Reports observed values.
The values are written with the key, prefixed by the name of the
observer object if given.
.. note::
If a value is of type :class:`~chainer.Variable`, the
variable is copied without preserving the computational graph and
the new variable object purged from the graph is stored to the
observer. This behavior can be changed by setting
``chainer.config.keep_graph_on_report`` to ``True``.
Args:
values (dict): Dictionary of observed values.
observer: Observer object. Its object ID is used to retrieve the
observer name, which is used as the prefix of the registration
name of the observed value.
"""
if not configuration.config.keep_graph_on_report:
values = {k: _copy_variable(v) for k, v in six.iteritems(values)}
if observer is not None:
observer_id = id(observer)
if observer_id not in self._observer_names:
raise KeyError(
'Given observer is not registered to the reporter.')
observer_name = self._observer_names[observer_id]
for key, value in six.iteritems(values):
name = '%s/%s' % (observer_name, key)
self.observation[name] = value
else:
self.observation.update(values)
def _get_reporters():
try:
reporters = _thread_local.reporters
except AttributeError:
reporters = _thread_local.reporters = []
return reporters
def get_current_reporter():
"""Returns the current reporter object."""
return _get_reporters()[-1]
def report(values, observer=None):
"""Reports observed values with the current reporter object.
Any reporter object can be set current by the ``with`` statement. This
function calls the :meth:`Reporter.report` method of the current reporter.
If no reporter object is current, this function does nothing.
.. admonition:: Example
The most typical example is a use within links and chains. Suppose that
a link is registered to the current reporter as an observer (for
example, the target link of the optimizer is automatically registered to
the reporter of the :class:`~chainer.training.Trainer`). We can report
some values from the link as follows::
class MyRegressor(chainer.Chain):
def __init__(self, predictor):
super(MyRegressor, self).__init__(predictor=predictor)
def __call__(self, x, y):
# This chain just computes the mean absolute and squared
# errors between the prediction and y.
pred = self.predictor(x)
abs_error = F.sum(abs(pred - y)) / len(x)
loss = F.mean_squared_error(pred, y)
# Report the mean absolute and squared errors.
chainer.report({
'abs_error': abs_error,
'squared_error': loss,
}, self)
return loss
If the link is named ``'main'`` in the hierarchy (which is the default
name of the target link in the
:class:`~chainer.training.updaters.StandardUpdater`),
these reported values are
named ``'main/abs_error'`` and ``'main/squared_error'``. If these values
are reported inside the :class:`~chainer.training.extensions.Evaluator`
extension, ``'validation/'`` is added at the head of the link name, thus
the item names are changed to ``'validation/main/abs_error'`` and
``'validation/main/squared_error'`` (``'validation'`` is the default
name of the Evaluator extension).
Args:
values (dict): Dictionary of observed values.
observer: Observer object. Its object ID is used to retrieve the
observer name, which is used as the prefix of the registration name
of the observed value.
"""
reporters = _get_reporters()
if reporters:
current = reporters[-1]
current.report(values, observer)
@contextlib.contextmanager
def report_scope(observation):
"""Returns a report scope with the current reporter.
This is equivalent to ``get_current_reporter().scope(observation)``,
except that it does not make the reporter current redundantly.
"""
current = _get_reporters()[-1]
old = current.observation
current.observation = observation
yield
current.observation = old
class Summary(object):
"""Online summarization of a sequence of scalars.
Summary computes the statistics of given scalars online.
"""
def __init__(self):
self._x = 0.0
self._x2 = 0.0
self._n = 0
def add(self, value, weight=1):
"""Adds a scalar value.
Args:
value: Scalar value to accumulate. It is either a NumPy scalar or
a zero-dimensional array (on CPU or GPU).
weight: An optional weight for the value. It is a NumPy scalar or
a zero-dimensional array (on CPU or GPU).
Default is 1 (integer).
"""
if isinstance(value, chainerx.ndarray):
# ChainerX arrays does not support inplace assignment if it's
# connected to the backprop graph.
value = value.as_grad_stopped()
with chainer.using_device(backend.get_device_from_array(value)):
self._x += weight * value
self._x2 += weight * value * value
self._n += weight
def compute_mean(self):
"""Computes the mean."""
x, n = self._x, self._n
with chainer.using_device(backend.get_device_from_array(x)):
return x / n
def make_statistics(self):
"""Computes and returns the mean and standard deviation values.
Returns:
tuple: Mean and standard deviation values.
"""
x, n = self._x, self._n
xp = backend.get_array_module(x)
with chainer.using_device(backend.get_device_from_array(x)):
mean = x / n
var = self._x2 / n - mean * mean
std = xp.sqrt(var)
return mean, std
def serialize(self, serializer):
try:
self._x = serializer('_x', self._x)
self._x2 = serializer('_x2', self._x2)
self._n = serializer('_n', self._n)
except KeyError:
warnings.warn('The previous statistics are not saved.')
class DictSummary(object):
"""Online summarization of a sequence of dictionaries.
``DictSummary`` computes the statistics of a given set of scalars online.
It only computes the statistics for scalar values and variables of scalar
values in the dictionaries.
"""
def __init__(self):
self._summaries = collections.defaultdict(Summary)
def add(self, d):
"""Adds a dictionary of scalars.
Args:
d (dict): Dictionary of scalars to accumulate. Only elements of
scalars, zero-dimensional arrays, and variables of
zero-dimensional arrays are accumulated. When the value
is a tuple, the second element is interpreted as a weight.
"""
summaries = self._summaries
for k, v in six.iteritems(d):
w = 1
if isinstance(v, tuple):
w = v[1]
v = v[0]
if isinstance(w, variable.Variable):
w = w.array
if not numpy.isscalar(w) and not getattr(w, 'ndim', -1) == 0:
raise ValueError(
'Given weight to {} was not scalar.'.format(k))
if isinstance(v, variable.Variable):
v = v.array
if numpy.isscalar(v) or getattr(v, 'ndim', -1) == 0:
summaries[k].add(v, weight=w)
def compute_mean(self):
"""Creates a dictionary of mean values.
It returns a single dictionary that holds a mean value for each entry
added to the summary.
Returns:
dict: Dictionary of mean values.
"""
return {name: summary.compute_mean()
for name, summary in six.iteritems(self._summaries)}
def make_statistics(self):
"""Creates a dictionary of statistics.
It returns a single dictionary that holds mean and standard deviation
values for every entry added to the summary. For an entry of name
``'key'``, these values are added to the dictionary by names ``'key'``
and ``'key.std'``, respectively.
Returns:
dict: Dictionary of statistics of all entries.
"""
stats = {}
for name, summary in six.iteritems(self._summaries):
mean, std = summary.make_statistics()
stats[name] = mean
stats[name + '.std'] = std
return stats
def serialize(self, serializer):
if isinstance(serializer, serializer_module.Serializer):
names = list(self._summaries.keys())
serializer('_names', json.dumps(names))
for index, name in enumerate(names):
self._summaries[name].serialize(
serializer['_summaries'][str(index)])
else:
self._summaries.clear()
try:
names = json.loads(serializer('_names', ''))
except KeyError:
warnings.warn('The names of statistics are not saved.')
return
for index, name in enumerate(names):
self._summaries[name].serialize(
serializer['_summaries'][str(index)])
| 14,250
| 32.850356
| 79
|
py
|
chainer
|
chainer-master/chainer/function_hook.py
|
import chainer
class FunctionHook(object):
"""Base class of hooks for Functions.
:class:`~chainer.FunctionHook` is a callback object
that is registered to :class:`~chainer.FunctionNode`.
Registered function hooks are invoked before and after
forward and backward operations of each function.
Function hooks that derive from :class:`FunctionHook` may override the
following methods:
* :meth:`~chainer.FunctionHook.added`
* :meth:`~chainer.FunctionHook.deleted`
* :meth:`~chainer.FunctionHook.forward_preprocess`
* :meth:`~chainer.FunctionHook.forward_postprocess`
* :meth:`~chainer.FunctionHook.backward_preprocess`
* :meth:`~chainer.FunctionHook.backward_postprocess`
By default, these methods do nothing.
Specifically, when the :meth:`~chainer.FunctionNode.__call__`
method of some function is invoked,
:meth:`~chainer.FunctionHook.forward_preprocess`
(resp. :meth:`~chainer.FunctionHook.forward_postprocess`)
of all function hooks registered to this function are called before
(resp. after) forward propagation.
Likewise, when :meth:`~chainer.Variable.backward` of some
:class:`~chainer.Variable` is invoked,
:meth:`~chainer.FunctionHook.backward_preprocess`
(resp. :meth:`~chainer.FunctionHook.backward_postprocess`)
of all function hooks registered to the function which holds this variable
as a gradient are called before (resp. after) backward propagation.
:meth:`~chainer.FunctionHook.added` and
:meth:`~chainer.FunctionHook.deleted` are called when the hook is
registered or unregistered, respectively.
There are two ways to register :class:`~chainer.FunctionHook`
objects to :class:`~chainer.FunctionNode` objects.
The first one is to use ``with`` statement. Function hooks hooked
in this way are registered to all functions within ``with`` statement
and are unregistered at the end of ``with`` statement.
.. admonition:: Example
The following code is a simple example in which
we measure the elapsed time of a part of forward propagation procedure
with :class:`~chainer.function_hooks.TimerHook`, which is a subclass of
:class:`~chainer.FunctionHook`.
>>> class Model(chainer.Chain):
... def __init__(self):
... super(Model, self).__init__()
... with self.init_scope():
... self.l = L.Linear(10, 10)
... def __call__(self, x1):
... return F.exp(self.l(x1))
>>> model1 = Model()
>>> model2 = Model()
>>> x = chainer.Variable(np.zeros((1, 10), np.float32))
>>> with chainer.function_hooks.TimerHook() as m:
... _ = model1(x)
... y = model2(x)
>>> model3 = Model()
>>> z = model3(y)
>>> print('Total time : {}'.format(m.total_time()))
... # doctest:+ELLIPSIS
Total time : ...
In this example, we measure the elapsed times for each forward
propagation of all functions in ``model1`` and ``model2``.
Note that ``model3`` is not a target of measurement
as :class:`~chainer.function_hooks.TimerHook` is unregistered
before forward propagation of ``model3``.
.. note::
Chainer stores the dictionary of registered function hooks
as a thread local object. So, function hooks registered
are different depending on threads.
The other one is to register it directly to
a :class:`~chainer.FunctionNode` object by calling its
:meth:`~chainer.FunctionNode.add_hook` method.
Function hooks registered in this way can be removed by
:meth:`~chainer.FunctionNode.delete_hook` method.
Contrary to the former registration method, function hooks are registered
only to the function whose :meth:`~chainer.FunctionNode.add_hook`
method is called.
If the hook is registered globally using ``with`` statement, ``None`` is
passed as the ``function`` argument of :meth:`~chainer.FunctionHook.added`
and :meth:`~chainer.FunctionHook.deleted`.
If the hook is registered in a specific function using
:meth:`~chainer.FunctionNode.add_hook`, the :class:`~chainer.FunctionNode`
instance is passed as the ``function`` argument of
:meth:`~chainer.FunctionHook.added` and
:meth:`~chainer.FunctionHook.deleted`.
Args:
name(str): Name of this function hook.
"""
name = 'FunctionHook'
def __enter__(self):
function_hooks = chainer.get_function_hooks()
if self.name in function_hooks:
raise KeyError('hook %s already exists' % self.name)
function_hooks[self.name] = self
self.added(None)
return self
def __exit__(self, *_):
chainer.get_function_hooks()[self.name].deleted(None)
del chainer.get_function_hooks()[self.name]
def added(self, function):
"""Callback function invoked when the function hook is registered
Args:
function(~chainer.FunctionNode): Function object to which
the function hook is added. ``None`` if the function hook is
registered globally.
"""
pass
def deleted(self, function):
"""Callback function invoked when the function hook is unregistered
Args:
function(~chainer.FunctionNode): Function object from which
the function hook is deleted. ``None`` if the function hook
was registered globally.
"""
pass
# forward
def forward_preprocess(self, function, in_data):
"""Callback function invoked before forward propagation.
Args:
function(~chainer.FunctionNode): Function object to which
the function hook is registered.
in_data(tuple of :ref:`ndarray`):
Input data of forward propagation.
"""
pass
def forward_postprocess(self, function, in_data):
"""Callback function invoked after forward propagation.
Args:
function(~chainer.FunctionNode): Function object to which
the function hook is registered.
in_data(tuple of :ref:`ndarray`):
Input data of forward propagation.
"""
pass
# backward
def backward_preprocess(self, function, in_data, out_grad):
"""Callback function invoked before backward propagation.
Args:
function(~chainer.FunctionNode): Function object to which
the function hook is registered.
in_data(tuple of :ref:`ndarray`):
Input data of forward propagation.
out_grad(tuple of :ref:`ndarray`):
Gradient data of backward propagation.
"""
pass
def backward_postprocess(self, function, in_data, out_grad):
"""Callback function invoked after backward propagation.
Args:
function(~chainer.FunctionNode): Function object to which
the function hook is registered.
in_data(tuple of :ref:`ndarray`):
Input of forward propagation.
out_grad(tuple of :ref:`ndarray`):
Gradient data of backward propagation.
"""
pass
| 7,303
| 36.649485
| 79
|
py
|
chainer
|
chainer-master/chainer/computational_graph.py
|
import heapq
from chainer import function_node
from chainer import variable
# Note: docstrings must be updated when changing these default values.
_var_style = {'shape': 'octagon', 'fillcolor': '#E0E0E0', 'style': 'filled'}
_func_style = {'shape': 'record', 'fillcolor': '#6495ED', 'style': 'filled'}
class DotNode(object):
"""Node of the computational graph, with utilities for dot language.
This class represents a node of computational graph,
with some utilities for dot language.
Args:
node: :class: `VariableNode` object or :class: `FunctionNode` object.
attribute (dict): Attributes for the node.
show_name (bool): If `True`, the `name` attribute of the node is added
to the label. Default is `True`.
"""
def __init__(self, node, attribute=None, show_name=True):
assert isinstance(node, (variable.VariableNode,
function_node.FunctionNode))
self.node = node
self.id_ = id(node)
self.attribute = {'label': node.label}
if isinstance(node, variable.VariableNode):
if show_name and node.name is not None:
self.attribute['label'] = '{}: {}'.format(
node.name, self.attribute['label'])
self.attribute.update({'shape': 'oval'})
else:
self.attribute.update({'shape': 'box'})
if attribute is not None:
self.attribute.update(attribute)
@property
def label(self):
"""The text that represents properties of the node.
Returns:
string: The text that represents the id and attributes of this
node.
"""
attributes = ['%s="%s"' % (k, v) for (k, v)
in self.attribute.items()]
return '%s [%s];' % (self.id_, ','.join(attributes))
class ComputationalGraph(object):
"""Class that represents computational graph.
.. note::
We assume that the computational graph is directed and acyclic.
Args:
nodes (list): List of nodes. Each node is either
:class:`VariableNode` object or :class:`FunctionNode` object.
edges (list): List of edges. Each edge consists of pair of nodes.
variable_style (dict or `'default'`): Dot node style for variable.
If the special value ``'default'`` is specified, the default
configuration will be used.
function_style (dict or `default`): Dot node style for function.
If the special value ``'default'`` is specified, the default
configuration will be used.
rankdir (str): Direction of the graph that must be
TB (top to bottom), BT (bottom to top), LR (left to right)
or RL (right to left).
remove_variable (bool): If ``True``, :class:`VariableNode`\\ s are
removed from the resulting computational graph. Only
:class:`FunctionNode`\\ s are shown in the output.
show_name (bool): If ``True``, the ``name`` attribute of each node is
added to the label of the node. Default is ``True``.
.. note::
The default configuration for ``variable_style`` is
``{'shape': 'octagon', 'fillcolor': '#E0E0E0', 'style': 'filled'}`` and
the default configuration for ``function_style`` is
``{'shape': 'record', 'fillcolor': '#6495ED', 'style': 'filled'}``.
.. note::
The default behavior of :class:`~chainer.ComputationalGraph` has been
changed from v1.23.0, so that it ouputs the richest representation of
a graph as default, namely, styles are set and names of functions and
variables are shown. To reproduce the same result as previous versions
(<= v1.22.0), please specify `variable_style=None`,
`function_style=None`, and `show_name=False` explicitly.
"""
def __init__(self, nodes, edges, variable_style='default',
function_style='default', rankdir='TB',
remove_variable=False, show_name=True):
# If `variable_style` and `function_style` is explicitly set to None,
# use legacy (Chainer v1.22.0) style for backward compatibility.
if variable_style is None:
variable_style = {}
elif variable_style == 'default':
variable_style = dict(_var_style)
if function_style is None:
function_style = {}
elif function_style == 'default':
function_style = dict(_func_style)
self.nodes = nodes
self.edges = edges
self.variable_style = variable_style
self.function_style = function_style
if rankdir not in ('TB', 'BT', 'LR', 'RL'):
raise ValueError('rankdir must be in TB, BT, LR or RL.')
self.rankdir = rankdir
self.remove_variable = remove_variable
self.show_name = show_name
def _to_dot(self):
"""Converts graph in dot format.
`label` property of is used as short description of each node.
Returns:
str: The graph in dot format.
"""
ret = 'digraph graphname{rankdir=%s;' % self.rankdir
if self.remove_variable:
self.nodes, self.edges = _skip_variable(self.nodes, self.edges)
for node in self.nodes:
assert isinstance(node, (variable.VariableNode,
function_node.FunctionNode))
if isinstance(node, variable.VariableNode):
if not self.remove_variable:
ret += DotNode(
node, self.variable_style, self.show_name).label
else:
ret += DotNode(node, self.function_style, self.show_name).label
drawn_edges = []
for edge in self.edges:
head, tail = edge
if (isinstance(head, variable.VariableNode) and
isinstance(tail, function_node.FunctionNode)):
head_attr = self.variable_style
tail_attr = self.function_style
elif (isinstance(head, function_node.FunctionNode) and
isinstance(tail, variable.VariableNode)):
head_attr = self.function_style
tail_attr = self.variable_style
else:
if not self.remove_variable:
raise TypeError('head and tail should be the set of '
'VariableNode and Function')
else:
head_attr = self.function_style
tail_attr = self.function_style
head_node = DotNode(head, head_attr, self.show_name)
tail_node = DotNode(tail, tail_attr, self.show_name)
edge = (head_node.id_, tail_node.id_)
if edge in drawn_edges:
continue
ret += '%s -> %s;' % edge
drawn_edges.append(edge)
ret += '}'
return ret
def dump(self, format='dot'):
"""Dumps graph as a text.
Args:
format(str): The graph language name of the output.
Currently, it must be 'dot'.
Returns:
str: The graph in specified format.
"""
if format == 'dot':
return self._to_dot()
raise NotImplementedError('Currently, only dot format is supported.')
def _skip_variable(nodes, edges):
func_edges = []
for edge_i, edge in enumerate(edges):
head, tail = edge
if isinstance(head, variable.VariableNode):
if head.creator_node is not None:
head = head.creator_node
else:
continue
if isinstance(tail, variable.VariableNode):
for node in nodes:
if isinstance(node, function_node.FunctionNode):
for input_var in node.inputs:
if input_var is tail:
tail = node
break
if isinstance(tail, function_node.FunctionNode):
break
else:
continue
func_edges.append((head, tail))
return nodes, func_edges
def build_computational_graph(
outputs, remove_split=True, variable_style='default',
function_style='default', rankdir='TB', remove_variable=False,
show_name=True):
"""Builds a graph of functions and variables backward-reachable from outputs.
Args:
outputs (:class:`~chainer.Variable`, \
:class:`~chainer.variable.VariableNode`, \
:class:`~chainer.FunctionNode`, or :class:`list`): node(s) from which
the graph is constructed.
Each element of outputs must be either :class:`~chainer.Variable`
object, :class:`~chainer.variable.VariableNode` object, or
:class:`~chainer.FunctionNode` object.
remove_split(bool): It must be ``True``. This argument is left for
backward compatibility.
variable_style(dict or 'default'): Dot node style for variable.
Possible keys are 'shape', 'color', 'fillcolor', 'style' etc.
If the special value ``'default'`` is specified, the default
configuration will be used.
function_style(dict or 'default'): Dot node style for function.
Possible keys are 'shape', 'color', 'fillcolor', 'style' etc.
If the special value ``'default'`` is specified, the default
configuration will be used.
rankdir (str): Direction of the graph that must be
TB (top to bottom), BT (bottom to top), LR (left to right)
or RL (right to left).
remove_variable (bool): If ``True``, :class:`VariableNode`\\ s are
removed from the resulting computational graph. Only
:class:`FunctionNode`\\ s are shown in the output.
show_name (bool): If ``True``, the ``name`` attribute of each node is
added to the label of the node. Default is ``True``.
Returns:
ComputationalGraph: A graph consisting of nodes and edges that
are backward-reachable from at least one of ``outputs``.
If ``unchain_backward`` was called in some variable in the
computational graph before this function, backward step is
stopped at this variable.
For example, suppose that computational graph is as follows::
|--> f ---> y
x --+
|--> g ---> z
Let ``outputs = [y, z]``.
Then the full graph is emitted.
Next, let ``outputs = [y]``. Note that ``z`` and ``g``
are not backward-reachable from ``y``.
The resulting graph would be following::
x ---> f ---> y
See :class:`TestGraphBuilder` for details.
.. note::
The default configuration for ``variable_style`` is
``{'shape': 'octagon', 'fillcolor': '#E0E0E0', 'style': 'filled'}`` and
the default configuration for ``function_style`` is
``{'shape': 'record', 'fillcolor': '#6495ED', 'style': 'filled'}``.
.. note::
The default behavior of :class:`~chainer.ComputationalGraph` has been
changed from v1.23.0, so that it ouputs the richest representation of
a graph as default, namely, styles are set and names of functions and
variables are shown. To reproduce the same result as previous versions
(<= v1.22.0), please specify `variable_style=None`,
`function_style=None`, and `show_name=False` explicitly.
"""
if not remove_split:
raise ValueError('remove_split=False is not supported anymore')
output_types = (
variable.Variable, variable.VariableNode,
function_node.FunctionNode)
if isinstance(outputs, output_types):
outputs = [outputs]
else:
if not all(isinstance(o, output_types) for o in outputs):
raise TypeError(
'element of outputs must be either Variable, VariableNode, '
' or FunctionNode.')
cands = []
seen_edges = set()
nodes = set()
push_count = [0]
def add_cand(cand):
heapq.heappush(cands, (-cand.rank, push_count[0], cand))
push_count[0] += 1
for o in outputs:
if isinstance(o, variable.Variable):
o = o.node
add_cand(o)
nodes.add(o)
while cands:
_, _, cand = heapq.heappop(cands)
if isinstance(cand, variable.VariableNode):
creator = cand.creator_node
if creator is not None and (creator, cand) not in seen_edges:
add_cand(creator)
seen_edges.add((creator, cand))
nodes.add(creator)
nodes.add(cand)
elif isinstance(cand, function_node.FunctionNode):
for input_ in cand.inputs:
if input_ is not cand and (input_, cand) not in seen_edges:
add_cand(input_)
seen_edges.add((input_, cand))
nodes.add(input_)
nodes.add(cand)
return ComputationalGraph(
list(nodes), list(seen_edges), variable_style,
function_style, rankdir, remove_variable, show_name)
| 13,267
| 37.909091
| 81
|
py
|
chainer
|
chainer-master/chainer/distribution.py
|
import copy
from chainer import backend
class Distribution(object):
"""Interface of Distribution
:class:`Distribution` is a bass class for dealing with probability
distributions.
This class provides the following capabilities.
1. Sampling random points.
2. Evaluating a probability-related function at a given realization \
value. (e.g., probability density function, probability mass function)
3. Obtaining properties of distributions. (e.g., mean, variance)
Note that every method and property that computes them from
:class:`chainer.Variable` can basically be differentiated.
In this class, sampled random points and realization values given in
probability-related function is called *sample*. Sample consists of
*batches*, and each batch consists of independent *events*. Each event
consists of values, and each value in an event cannot be sampled
independently in general. Each event in a batch is independent while it is
not sampled from an identical distribution. And each batch in sample is
sampled from an identical distribution.
Each part of the sample-batch-event hierarchy has its own shape, which is
called ``sample_shape``, ``batch_shape``, and ``event_shape``,
respectively.
On initialization, it takes distribution-specific parameters as inputs.
:attr:`batch_shape` and :attr:`event_shape` is decided by the shape of
the parameter when generating an instance of a class.
.. admonition:: Example
The following code is an example of sample-batch-event hierarchy on
using :class:`~distributions.MultivariateNormal` distribution. This
makes 2d normal distributions. ``dist`` consists of 12(4 * 3)
independent 2d normal distributions. And on initialization,
:attr:`batch_shape` and :attr:`event_shape` is decided.
>>> import chainer
>>> import chainer.distributions as D
>>> import numpy as np
>>> d = 2
>>> shape = (4, 3)
>>> loc = np.random.normal(
... size=shape + (d,)).astype(np.float32)
>>> cov = np.random.normal(size=shape + (d, d)).astype(np.float32)
>>> cov = np.matmul(cov, np.rollaxis(cov, -1, -2))
>>> l = np.linalg.cholesky(cov)
>>> dist = D.MultivariateNormal(loc, scale_tril=l)
>>> dist.event_shape
(2,)
>>> dist.batch_shape
(4, 3)
>>> sample = dist.sample(sample_shape=(6, 5))
>>> sample.shape
(6, 5, 4, 3, 2)
Every probability-related function takes realization value whose shape is
the concatenation of ``sample_shape``, ``batch_shape``, and
``event_shape`` and returns an evaluated value whose shape is the
concatenation of ``sample_shape``, and ``batch_shape``.
"""
def _copy_to(self, target):
target.__dict__ = copy.copy(self.__dict__)
return target
@property
def batch_shape(self):
"""Returns the shape of a batch.
Returns:
tuple: The shape of a sample that is not identical and independent.
"""
raise NotImplementedError
def cdf(self, x):
"""Evaluates the cumulative distribution function at the given points.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Data points in
the domain of the distribution
Returns:
~chainer.Variable: Cumulative distribution function value evaluated
at `x`.
"""
raise NotImplementedError
@property
def covariance(self):
"""Returns the covariance of the distribution.
Returns:
~chainer.Variable: The covariance of the distribution.
"""
raise NotImplementedError
@property
def entropy(self):
"""Returns the entropy of the distribution.
Returns:
~chainer.Variable: The entropy of the distribution.
"""
raise NotImplementedError
@property
def event_shape(self):
"""Returns the shape of an event.
Returns:
tuple: The shape of a sample that is not identical and independent.
"""
raise NotImplementedError
def icdf(self, x):
"""Evaluates the inverse cumulative distribution function at the given points.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Data points in
the domain of the distribution
Returns:
~chainer.Variable: Inverse cumulative distribution function value
evaluated at `x`.
"""
raise NotImplementedError
def log_cdf(self, x):
"""Evaluates the log of cumulative distribution function at the given points.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Data points in
the domain of the distribution
Returns:
~chainer.Variable: Logarithm of cumulative distribution function
value evaluated at `x`.
"""
raise NotImplementedError
def log_prob(self, x):
"""Evaluates the logarithm of probability at the given points.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Data points in
the domain of the distribution
Returns:
~chainer.Variable: Logarithm of probability evaluated at `x`.
"""
raise NotImplementedError
def log_survival_function(self, x):
"""Evaluates the logarithm of survival function at the given points.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Data points in
the domain of the distribution
Returns:
~chainer.Variable: Logarithm of survival function value evaluated
at `x`.
"""
raise NotImplementedError
@property
def mean(self):
"""Returns the mean of the distribution.
Returns:
~chainer.Variable: The mean of the distribution.
"""
raise NotImplementedError
@property
def mode(self):
"""Returns the mode of the distribution.
Returns:
~chainer.Variable: The mode of the distribution.
"""
raise NotImplementedError
@property
def params(self):
"""Returns the parameters of the distribution.
Returns:
dict: The parameters of the distribution.
"""
raise NotImplementedError
def perplexity(self, x):
"""Evaluates the perplexity function at the given points.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Data points in
the domain of the distribution
Returns:
~chainer.Variable: Perplexity function value evaluated at `x`.
"""
raise NotImplementedError
def prob(self, x):
"""Evaluates probability at the given points.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Data points in
the domain of the distribution
Returns:
~chainer.Variable: Probability evaluated at `x`.
"""
raise NotImplementedError
def sample(self, sample_shape=()):
"""Samples random points from the distribution.
This function calls `sample_n` and reshapes a result of `sample_n` to
`sample_shape + batch_shape + event_shape`. On implementing sampling
code in an inherited distribution class, it is not recommended that
you override this function. Instead of doing this, it is preferable to
override `sample_n`.
Args:
sample_shape(:class:`tuple` of :class:`int`): Sampling shape.
Returns:
~chainer.Variable: Sampled random points.
"""
final_shape = self.batch_shape + self.event_shape
if sample_shape == ():
n = 1
elif isinstance(sample_shape, int):
n = sample_shape
final_shape = (n,) + final_shape
else:
n = 1
for shape_ in sample_shape:
n *= shape_
final_shape = sample_shape + final_shape
samples = self.sample_n(n)
return samples.reshape(final_shape)
def sample_n(self, n):
"""Samples n random points from the distribution.
This function returns sampled points whose shape is
`(n,) + batch_shape + event_shape`. When implementing sampling code in
a subclass, it is recommended that you override this method.
Args:
n(int): Sampling size.
Returns:
~chainer.Variable: sampled random points.
"""
raise NotImplementedError
@property
def stddev(self):
"""Returns the standard deviation of the distribution.
Returns:
~chainer.Variable: The standard deviation of the distribution.
"""
raise NotImplementedError
@property
def support(self):
"""Returns the support of the distribution.
Returns:
str: String that means support of this distribution.
"""
raise NotImplementedError
def survival_function(self, x):
"""Evaluates the survival function at the given points.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Data points in
the domain of the distribution
Returns:
~chainer.Variable: Survival function value evaluated at `x`.
"""
raise NotImplementedError
@property
def variance(self):
"""Returns the variance of the distribution.
Returns:
~chainer.Variable: The variance of the distribution.
"""
raise NotImplementedError
@property
def xp(self):
"""Array module for the distribution.
Depending on which of CPU/GPU this distribution is on, this property
returns :mod:`numpy` or :mod:`cupy`.
"""
return backend.get_array_module(*self.params.values())
_KLDIVERGENCE = {}
def register_kl(Dist1, Dist2):
"""Decorator to register KL divergence function.
This decorator registers a function which computes Kullback-Leibler
divergence. This function will be called by :func:`~chainer.kl_divergence`
based on the argument types.
Args:
Dist1(`type`): type of a class inherit from
:class:`~chainer.Distribution` to calculate KL divergence.
Dist2(`type`): type of a class inherit from
:class:`~chainer.Distribution` to calculate KL divergence.
The decorated functoion takes an instance of ``Dist1`` and ``Dist2`` and
returns KL divergence value.
.. admonition:: Example
This is a simple example to register KL divergence. A function to
calculate a KL divergence value between an instance of ``Dist1`` and
an instance of ``Dist2`` is registered.
.. code-block:: python
from chainer import distributions
@distributions.register_kl(Dist1, Dist2)
def _kl_dist1_dist2(dist1, dist2):
return KL
"""
def f(kl):
_KLDIVERGENCE[Dist1, Dist2] = kl
return f
def kl_divergence(dist1, dist2):
"""Computes Kullback-Leibler divergence.
For two continuous distributions :math:`p(x), q(x)`, it is expressed as
.. math::
D_{KL}(p||q) = \\int p(x) \\log \\frac{p(x)}{q(x)} dx
For two discrete distributions :math:`p(x), q(x)`, it is expressed as
.. math::
D_{KL}(p||q) = \\sum_x p(x) \\log \\frac{p(x)}{q(x)}
Args:
dist1(:class:`~chainer.Distribution`): Distribution to calculate KL
divergence :math:`p`. This is the first (left) operand of the KL
divergence.
dist2(:class:`~chainer.Distribution`): Distribution to calculate KL
divergence :math:`q`. This is the second (right) operand of the KL
divergence.
Returns:
~chainer.Variable: Output variable representing kl divergence
:math:`D_{KL}(p||q)`.
Using :func:`~chainer.register_kl`, we can define behavior of
:func:`~chainer.kl_divergence` for any two distributions.
"""
return _KLDIVERGENCE[type(dist1), type(dist2)](dist1, dist2)
def cross_entropy(dist1, dist2):
"""Computes Cross entropy.
For two continuous distributions :math:`p(x), q(x)`, it is expressed as
.. math::
H(p,q) = - \\int p(x) \\log q(x) dx
For two discrete distributions :math:`p(x), q(x)`, it is expressed as
.. math::
H(p,q) = - \\sum_x p(x) \\log q(x)
This function call :func:`~chainer.kl_divergence` and
:meth:`~chainer.Distribution.entropy` of ``dist1``. Therefore, it is
necessary to register KL divergence function with
:func:`~chainer.register_kl` decoartor and define
:meth:`~chainer.Distribution.entropy` in ``dist1``.
Args:
dist1(:class:`~chainer.Distribution`): Distribution to calculate cross
entropy :math:`p`. This is the first (left) operand of the cross
entropy.
dist2(:class:`~chainer.Distribution`): Distribution to calculate cross
entropy :math:`q`. This is the second (right) operand of the cross
entropy.
Returns:
~chainer.Variable: Output variable representing cross entropy
:math:`H(p,q)`.
"""
return dist1.entropy() + kl_divergence(dist1, dist2)
| 13,514
| 29.507901
| 86
|
py
|
chainer
|
chainer-master/chainer/sequential.py
|
import copy
import inspect
from chainer import link as _link
class Sequential(_link.ChainList):
"""Sequential model which has a single-stream forward pass.
.. warning::
This feature is experimental. The interface can change in the future.
This class enables to construct a network which has sequential structure
easily. While :class:`~chainer.Chain` and :class:`~chainer.ChainList` can
only take :class:`~chainer.Link` object as input to their constructor, this
:class:`Sequential` can take arbitrary number of any callable objects for
the forward pass computation. A :class:`Sequential` calls the given
callable objects sequentially inside of the :meth:`~Sequential.forward`
method in the same order as the given arguments.
Therefore, you do not need to write the forward pass computation
explicitly.
.. admonition:: Example
The below example code shows how to use this class to construct a
simple sequential network::
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import Sequential
# Model definition without writing forward function
model = Sequential(
L.Linear(n_in, n_hidden),
F.relu,
L.Linear(n_hidden, n_hidden),
F.relu,
L.Linear(n_hidden, n_out)
)
# Compute the forward pass
y = model(x)
where ``x`` denotes a mini-batch of ``n_in``-dimensional input vectors.
Furthermore, :class:`Sequential` supports built-in list APIs, so you
can concatenate :class:`Sequential` objects to create a longer
:class:`Sequential` model easily with the same ways as Python lists:
>>> from chainer import Sequential
>>> model_A = Sequential(L.Linear(10, 10), F.relu)
>>> model_B = Sequential(L.Linear(10, 10), F.sigmoid)
>>> model_C = model_A + model_B
To repeat a :class:`Sequential` object multiple times, you can use
:meth:`~chainer.Link.repeat` method.
>>> model_D = model_A.repeat(3)
You can also add your own functions or any callable objects to a
:class:`Sequential` object::
from chainer.links.model.vision.vgg import VGG16Layers
model = Sequential()
model.append(L.Linear(n_out, n_hidden))
model.append(F.relu)
model.append(lambda x: F.reshape(x, (1, 3, 224, 224)))
model.append(VGG16Layers())
model.append(lambda x: x['prob'])
y = model(x)
The above code example shows how to add some layers to the ``model``
using :meth:`~Sequential.append` method and then add a large network
(``VGG16Layers``) and finally add a lambda function to extract the
``prob`` output.
You can check the structure of your model briefly using ``print``
as following:
>>> print(model_C) # doctest: +ELLIPSIS
Sequential(
(0): Linear(in_size=10, out_size=10, nobias=False),
(1): <function relu at 0x...>,
(2): Linear(in_size=10, out_size=10, nobias=False),
(3): <function sigmoid at 0x...>,
)
.. note::
Note that a :class:`Sequential` link which has at least one
``lambda`` function as its member cannot be pickled. So, please
use ``partial`` method from :mod:`functools` package instead::
from functools import partial
# This is not pickable
model = Sequential(
L.Convolution2D(None, 64, 3, 1, 1),
lambda x: F.max_pooling_2d(x, 2)
)
# This is pickable
model = Sequential(
L.Convolution2D(None, 64, 3, 1, 1),
partial(F.max_pooling_2d, ksize=2)
)
Args:
layers: The layers which are called in its order. Each component should
be a callable object including :class:`~chainer.Link` object and
functions defined under the :mod:`chainer.functions`, e.g.,
:func:`~chainer.functions.relu`, etc.
"""
def __init__(self, *layers):
super(Sequential, self).__init__()
self._layers = []
for layer in layers:
self.append(layer)
def __len__(self):
return len(self._layers)
def __getitem__(self, i):
return self._layers[i]
def __setitem__(self, i, layer):
if i >= len(self):
raise ValueError(
'{} should be less than {}'.format(i, len(self)))
if not callable(layer):
raise ValueError(
'All elements of a Sequential class should be callable. But '
'given {} is not callable.'.format(layer))
if self._layers[i] is not layer:
del self[i]
self.insert(i, layer)
def __delitem__(self, i):
layer = self._layers.pop(i)
if isinstance(layer, _link.Link):
for i, link in enumerate(self._children):
if link.name == layer.name:
del self._children[i]
break
for j, layer in enumerate(self._children[i:]):
layer.name = str(i + j)
def __iter__(self):
return iter(self._layers)
def __reversed__(self):
return reversed(self._layers)
def __contains__(self, item):
return item in self._layers
def __add__(self, other):
if isinstance(other, Sequential):
ret = Sequential()
for layer in self:
ret.append(layer)
for layer in other:
ret.append(layer)
return ret
else:
raise ValueError('add (+) operator supports only objects of '
'Sequential class, but {} is given.'.format(
str(type(other))))
def __iadd__(self, other):
if isinstance(other, Sequential):
for layer in other:
self.append(layer)
else:
raise ValueError('add (+) operator supports only objects of '
'Sequential class, but {} is given.'.format(
str(type(other))))
return self
def forward(self, *x):
"""Forward pass computation.
This method performs the forward pass computation by giving the input
variable ``x`` to the layers registered in the constructor in the same
order as the order in which the arguments are given to the constructor.
It should be noted that the input variable is given directly to the
first layer and all intermediate outputs generated during the forward
pass are also directly fed to the next layer. Therefore, the number
of outputs at a layer should be the same as the number of inputs at
the next layer.
Args:
x: Input variables.
Returns:
The output of the final layer in the given layers.
"""
if not self._layers:
raise RuntimeError('Sequential does not have any layer.')
for layer in self._layers:
if isinstance(x, tuple):
x = layer(*x)
else:
x = layer(x)
return x
def __reduce__(self):
n_lambda = 0
for layer in self._layers:
if callable(layer) and hasattr(layer, '__name__') \
and layer.__name__ == '<lambda>':
n_lambda += 1
if n_lambda > 0:
raise ValueError(
'This Sequential object has at least one lambda function as '
'its component. Lambda function can not be pickled, so please '
'consider to use functools.partial instead of the lambda '
'function or use "dill", which is an external package that '
'enables pickling an object including lambda functions intead '
'of built-in pickle.')
return super(Sequential, self).__reduce__()
def __str__(self):
reps = []
for index, layer in enumerate(self):
# Explore better representation by if-block.
if getattr(layer, '__name__', None) == '<lambda>':
rep = inspect.getsource(layer).strip().rstrip(',')
else:
rep = str(layer)
# Add indentation to each line.
rep = '({index}): {rep},'.format(index=index, rep=rep)
for line in rep.splitlines():
reps.append(' {line}\n'.format(line=line))
reps = ''.join(reps)
if reps: # No newline with no layers.
reps = '\n' + reps
return '{cls}({layers})'.format(
cls=self.__class__.__name__, layers=reps,
)
def append(self, layer):
self.insert(len(self), layer)
def extend(self, sequential):
for layer in sequential:
self.append(layer)
def insert(self, i, layer):
n = len(self._layers)
if not (-n <= i < (n + 1)):
raise IndexError(
'Index out of range: {}'.format(i))
if i < 0:
i += n
if not callable(layer):
raise ValueError(
'All elements of the argument should be callable. But '
'given {} is not callable.'.format(layer))
self._layers.insert(i, layer)
if isinstance(layer, _link.Link):
if i == 0:
self._children.insert(0, layer)
else:
if i < 0:
i = len(self._layers) + i
last_link_pos = 0
for j in range(i - 1, -1, -1):
# The last link before the given position
if isinstance(self._layers[j], _link.Link):
last_link_pos = j
self._children.insert(last_link_pos + 1, layer)
for i, layer in enumerate(self._children):
layer.name = str(i)
def remove(self, layer):
if layer in self:
del self[self.index(layer)]
else:
raise ValueError(
'There is no layer object that is same as {}'.format(layer))
def remove_by_layer_type(self, type_name):
"""Remove layers by layer type.
This method removes layers from the Sequential object by the
layer's class name or function name. If you want to remove a
:class:`~Link`, the argument ``type_name`` should be its class name,
e.g., :class:`~links.Linear` or :class:`~links.Convolution2D`, etc.
If you want to remove a :class:`~Function` class or any other callable
objects, ``type_name`` should be the function name, e.g., ``relu`` or
``reshape``, etc.
Args:
type_name (str): The name of a layer you want to remove.
"""
names = []
for layer in self:
if isinstance(layer, _link.Link):
name = layer.__class__.__name__
else:
name = layer.__name__
names.append((name, layer))
for _name, _layer in names:
if type_name == _name:
self.remove(_layer)
def pop(self, i=-1):
layer = self._layers[i]
del self[i]
return layer
def clear(self):
# TODO(mitmul): Reduce the computational cost here
for i, _ in enumerate(self._children):
del self._children[i]
self._layers = []
def index(self, layer, start=None, end=None):
return self._layers[start:end].index(layer)
def count(self, layer):
return self._layers.count(layer)
def count_by_layer_type(self, type_name):
"""Count the number of layers by layer type.
This method counts the number of layers which have the name given by
the argument ``type_name``. For example, if you want to know the number
of :class:`~links.Linear` layers included in this model, ``type_name``
should be ``Linear``. If you want to know the number of
:class:`~Function` classes or user-defined functions which have a
specific name, ``type_name`` should be the function name, e.g.,
``relu`` or ``reshape``, etc.
Args:
type_name (str): The class or function name of a layer you want to
enumerate.
"""
num = 0
for layer in self._layers:
if isinstance(layer, _link.Link):
if layer.__class__.__name__ == type_name:
num += 1
else:
if layer.__name__ == type_name:
num += 1
return num
def copy(self, mode='share'):
ret = Sequential()
for layer in self:
if isinstance(layer, _link.Link):
ret.append(layer.copy(mode))
else:
ret.append(copy.copy(layer))
return ret
def copyparams(self, link, copy_persistent=True):
if not isinstance(link, Sequential):
raise ValueError('Objects other than Sequential object cannot be '
'copied to a Sequential object.')
for idx, child in enumerate(self):
if isinstance(child, _link.Link):
child.copyparams(link[idx], copy_persistent)
def flatten(self):
"""Flatten nested :class:`~chainer.Sequential` links.
This method flattens all the nested :class:`~chainer.Sequential` links
inside this :class:`~chainer.Sequential` link.
Returns:
A flattened :class:`~chainer.Sequential` object.
.. admonition:: Example
.. code-block:: python
>>> import chainer
>>> import chainer.functions as F
>>> import chainer.links as L
>>> a = chainer.Sequential(L.Linear(None, 10), F.relu)
>>> b = chainer.Sequential(L.Linear(None, 10), F.relu)
>>> a.append(b)
>>> print(a) # Without flatten
0 Linear W(None) b(10,)
1 relu
2 Sequential which has 2 layers
>>> print(a.flatten()) # With flatten
0 Linear W(None) b(10,)
1 relu
2 Linear W(None) b(10,)
3 relu
"""
ret = Sequential()
for layer in self:
if isinstance(layer, Sequential):
ret.extend(layer.flatten())
else:
ret.append(layer)
return ret
| 14,816
| 34.028369
| 79
|
py
|
chainer
|
chainer-master/chainer/serializer.py
|
class AbstractSerializer(object):
"""Abstract base class of all serializers and deserializers."""
def __getitem__(self, key):
"""Gets a child serializer.
This operator creates a _child_ serializer represented by the given
key.
Args:
key (str): Name of the child serializer.
"""
raise NotImplementedError
def __call__(self, key, value):
"""Serializes or deserializes a value by given name.
This operator saves or loads a value by given name.
If this is a serializer, then the value is simply saved at the key.
Note that some type information might be missed depending on the
implementation (and the target file format).
If this is a deserializer, then the value is loaded by the key. The
deserialization differently works on scalars and arrays. For scalars,
the ``value`` argument is used just for determining the type of
restored value to be converted, and the converted value is returned.
For arrays, the restored elements are directly copied into the
``value`` argument. String values are treated like scalars.
.. note::
Serializers and deserializers are required to
correctly handle the ``None`` value. When ``value`` is ``None``,
serializers save it in format-dependent ways, and deserializers
just return the loaded value. When the saved ``None`` value is
loaded by a deserializer, it should quietly return the ``None``
value without modifying the ``value`` object.
Args:
key (str): Name of the serialization entry.
value (scalar, numpy.ndarray, cupy.ndarray, None, or str):
Object to be (de)serialized.
``None`` is only supported by deserializers.
Returns:
Serialized or deserialized value.
"""
raise NotImplementedError
class Serializer(AbstractSerializer):
"""Base class of all serializers."""
def save(self, obj):
"""Saves an object by this serializer.
This is equivalent to ``obj.serialize(self)``.
Args:
obj: Target object to be serialized.
"""
obj.serialize(self)
class Deserializer(AbstractSerializer):
"""Base class of all deserializers."""
def load(self, obj):
"""Loads an object from this deserializer.
This is equivalent to ``obj.serialize(self)``.
Args:
obj: Target object to be serialized.
"""
obj.serialize(self)
| 2,614
| 30.130952
| 77
|
py
|
chainer
|
chainer-master/chainer/_version.py
|
__version__ = '7.8.1'
_optional_dependencies = [
{
'name': 'CuPy',
'packages': [
'cupy-cuda120',
'cupy-cuda116',
'cupy-cuda115',
'cupy-cuda114',
'cupy-cuda113',
'cupy-cuda112',
'cupy-cuda111',
'cupy-cuda110',
'cupy-cuda102',
'cupy-cuda101',
'cupy-cuda100',
'cupy-cuda92',
'cupy-cuda91',
'cupy-cuda90',
'cupy-cuda80',
'cupy',
],
'specifier': '>=7.7.0,<8.0.0',
'help': 'https://docs.cupy.dev/en/latest/install.html',
},
{
'name': 'iDeep',
'packages': [
'ideep4py',
],
'specifier': '>=2.0.0.post3, <2.1',
'help': 'https://docs.chainer.org/en/latest/tips.html',
},
]
| 865
| 22.405405
| 63
|
py
|
chainer
|
chainer-master/chainer/configuration.py
|
import sys
import threading
import typing as tp # NOQA
from chainer import types # NOQA
if types.TYPE_CHECKING:
import numpy # NOQA
from chainer.graph_optimizations import static_graph # NOQA
class GlobalConfig(object):
debug = None # type: bool
cudnn_deterministic = None # type: bool
warn_nondeterministic = None # type: bool
enable_backprop = None # type: bool
keep_graph_on_report = None # type: bool
train = None # type: bool
type_check = None # type: bool
use_cudnn = None # type: str
use_cudnn_tensor_core = None # type: str
autotune = None # type: bool
schedule_func = None # type: tp.Optional[static_graph.StaticScheduleFunction] # NOQA
use_ideep = None # type: str
lazy_grad_sum = None # type: bool
cudnn_fast_batch_normalization = None # type: bool
dtype = None # type: numpy.dtype
in_recomputing = None # type: bool
use_static_graph = None # type: bool
_will_recompute = None # type: bool
compute_mode = None # type: str
"""The plain object that represents the global configuration of Chainer."""
def show(self, file=sys.stdout):
"""show(file=sys.stdout)
Prints the global config entries.
The entries are sorted in the lexicographical order of the entry name.
Args:
file: Output file-like object.
"""
keys = sorted(self.__dict__)
_print_attrs(self, keys, file)
class LocalConfig(object):
"""Thread-local configuration of Chainer.
This class implements the local configuration. When a value is set to this
object, the configuration is only updated in the current thread. When a
user tries to access an attribute and there is no local value, it
automatically retrieves a value from the global configuration.
"""
def __init__(self, global_config):
super(LocalConfig, self).__setattr__('_global', global_config)
super(LocalConfig, self).__setattr__('_local', threading.local())
def __delattr__(self, name):
delattr(self._local, name)
def __getattr__(self, name):
dic = self._local.__dict__
if name in dic:
return dic[name]
return getattr(self._global, name)
def __setattr__(self, name, value):
setattr(self._local, name, value)
def show(self, file=sys.stdout):
"""show(file=sys.stdout)
Prints the config entries.
The entries are sorted in the lexicographical order of the entry names.
Args:
file: Output file-like object.
.. admonition:: Example
You can easily print the list of configurations used in
the current thread.
>>> chainer.config.show() # doctest: +SKIP
debug False
enable_backprop True
train True
type_check True
"""
keys = sorted(set(self._global.__dict__) | set(self._local.__dict__))
_print_attrs(self, keys, file)
def _print_attrs(obj, keys, file):
max_len = max(len(key) for key in keys)
for key in keys:
spacer = ' ' * (max_len - len(key))
file.write(u'{} {}{}\n'.format(key, spacer, getattr(obj, key)))
global_config = GlobalConfig()
'''Global configuration of Chainer.
It is an instance of :class:`chainer.configuration.GlobalConfig`.
See :ref:`configuration` for details.
'''
config = LocalConfig(global_config)
'''Thread-local configuration of Chainer.
It is an instance of :class:`chainer.configuration.LocalConfig`, and is
referring to :data:`~chainer.global_config` as its default configuration.
See :ref:`configuration` for details.
'''
class _ConfigContext(object):
is_local = False
old_value = None
def __init__(self, config, name, value):
self.config = config
self.name = name
self.value = value
def __enter__(self):
name = self.name
value = self.value
config = self.config
is_local = hasattr(config._local, name)
if is_local:
self.old_value = getattr(config, name)
self.is_local = is_local
setattr(config, name, value)
def __exit__(self, typ, value, traceback):
if self.is_local:
setattr(self.config, self.name, self.old_value)
else:
delattr(self.config, self.name)
def using_config(name, value, config=config):
"""using_config(name, value, config=chainer.config)
Context manager to temporarily change the thread-local configuration.
Args:
name (str): Name of the configuration to change.
value: Temporary value of the configuration entry.
config (~chainer.configuration.LocalConfig): Configuration object.
Chainer's thread-local configuration is used by default.
.. seealso::
:ref:`configuration`
"""
return _ConfigContext(config, name, value)
| 4,971
| 27.574713
| 89
|
py
|
chainer
|
chainer-master/chainer/types.py
|
import numbers
import typing as tp # NOQA
import typing_extensions as tpe # NOQA
try:
from typing import TYPE_CHECKING # NOQA
except ImportError:
# typing.TYPE_CHECKING doesn't exist before Python 3.5.2
TYPE_CHECKING = False
# import chainer modules only for type checkers to avoid circular import
if TYPE_CHECKING:
from types import ModuleType # NOQA
import numpy # NOQA
from chainer import backend # NOQA
from chainer.backends import cuda, intel64 # NOQA
from chainer import initializer # NOQA
import chainerx # NOQA
Shape = tp.Tuple[int, ...]
ShapeSpec = tp.Union[int, tp.Sequence[int]] # Sequence includes Tuple[int, ...] # NOQA
DTypeSpec = tp.Union[tp.Any] # TODO(okapies): encode numpy.dtype
NdArray = tp.Union[
'numpy.ndarray',
'cuda.ndarray',
# 'intel64.mdarray',
# TODO(okapies): mdarray is partially incompatible with other ndarrays
'chainerx.ndarray',
]
"""The ndarray types supported in :func:`chainer.get_array_types`
"""
Xp = tp.Union[tp.Any] # TODO(okapies): encode numpy/cupy/ideep/chainerx
class AbstractInitializer(tpe.Protocol):
"""Protocol class for Initializer.
It can be either an :class:`chainer.Initializer` or a callable object
that takes an ndarray.
This is only for PEP 544 compliant static type checkers.
"""
dtype = None # type: tp.Optional[DTypeSpec]
def __call__(self, array: NdArray) -> None:
pass
ScalarValue = tp.Union[
'numpy.generic',
bytes,
str,
memoryview,
numbers.Number,
]
"""The scalar types supported in :func:`numpy.isscalar`.
"""
InitializerSpec = tp.Union[AbstractInitializer, ScalarValue, 'numpy.ndarray']
DeviceSpec = tp.Union[
'backend.Device',
'chainerx.Device',
'cuda.Device',
str,
tp.Tuple[str, int],
'ModuleType', # numpy and intel64 module
tp.Tuple['ModuleType', int], # cupy module and device ID
]
"""The device specifier types supported in :func:`chainer.get_device`
"""
# TODO(okapies): Use Xp instead of ModuleType
CudaDeviceSpec = tp.Union['cuda.Device', int, 'numpy.integer'] # NOQA
"""
This type only for the deprecated :func:`chainer.cuda.get_device` API.
Use :class:`~chainer.types.DeviceSpec` instead.
"""
| 2,253
| 22.978723
| 87
|
py
|
chainer
|
chainer-master/chainer/_environment_check.py
|
from __future__ import absolute_import
import os
import sys
import warnings
import numpy.distutils.system_info
import pkg_resources
import chainer
def _check_python_350():
if sys.version_info[:3] == (3, 5, 0):
if not int(os.getenv('CHAINER_PYTHON_350_FORCE', '0')):
msg = """
Chainer does not work with Python 3.5.0.
We strongly recommend to use another version of Python.
If you want to use Chainer with Python 3.5.0 at your own risk,
set 1 to CHAINER_PYTHON_350_FORCE environment variable."""
raise Exception(msg)
def _check_osx_numpy_backend():
if sys.platform != 'darwin':
return
blas_opt_info = numpy.distutils.system_info.get_info('blas_opt')
if blas_opt_info:
extra_link_args = blas_opt_info.get('extra_link_args')
if extra_link_args and '-Wl,Accelerate' in extra_link_args:
warnings.warn('''\
Accelerate has been detected as a NumPy backend library.
vecLib, which is a part of Accelerate, is known not to work correctly with Chainer.
We recommend using other BLAS libraries such as OpenBLAS.
For details of the issue, please see
https://docs.chainer.org/en/stable/tips.html#mnist-example-does-not-converge-in-cpu-mode-on-mac-os-x.
Please be aware that Mac OS X is not an officially supported OS.
''') # NOQA
def _check_optional_dependencies():
for dep in chainer._version._optional_dependencies:
name = dep['name']
pkgs = dep['packages']
spec = dep['specifier']
help = dep['help']
installed = False
for pkg in pkgs:
found = False
requirement = pkg
if os.environ.get('CHAINER_WARN_VERSION_MISMATCH', '1') == '1':
requirement = '{}{}'.format(pkg, spec)
try:
pkg_resources.require(requirement)
found = True
except pkg_resources.DistributionNotFound:
continue
except pkg_resources.VersionConflict:
msg = '''
--------------------------------------------------------------------------------
{name} ({pkg}) version {version} may not be compatible with this version of Chainer.
Please consider installing the supported version by running:
$ pip install '{requirement}'
See the following page for more details:
{help}
--------------------------------------------------------------------------------
''' # NOQA
warnings.warn(msg.format(
name=name, pkg=pkg,
version=pkg_resources.get_distribution(pkg).version,
requirement=requirement, help=help))
found = True
except Exception:
warnings.warn(
'Failed to check requirement: {}'.format(requirement))
break
if found:
if installed:
warnings.warn('''
--------------------------------------------------------------------------------
Multiple installations of {name} package has been detected.
You should select only one package from from {pkgs}.
Follow these steps to resolve this issue:
1. `pip list` to list {name} packages installed
2. `pip uninstall <package name>` to uninstall all {name} packages
3. `pip install <package name>` to install the proper one
--------------------------------------------------------------------------------
'''.format(name=name, pkgs=pkgs))
installed = True
def check():
_check_python_350()
_check_osx_numpy_backend()
_check_optional_dependencies()
| 3,590
| 34.554455
| 101
|
py
|
chainer
|
chainer-master/chainer/variable.py
|
from __future__ import absolute_import
import copy
import threading
import traceback
import typing as tp # NOQA
import warnings
import weakref
import numpy
import chainer
from chainer import _backprop
from chainer import backend
from chainer.backends import _cpu
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import initializers
from chainer.initializers import constant
from chainer import types # NOQA
import chainer.utils._collections
from chainer.utils import argument
import chainerx
_thread_local = threading.local()
def _raise_grad_error(exc_type, func, msg):
detail = ''
if func:
detail = 'Function `{0}` ({1}) has a bug.\n'.format(
type(func)._impl_name, func.label)
stack = func.stack
if stack:
detail += 'Stacktrace of the function is below:\n'
for line in traceback.format_list(func.stack):
detail += line
detail += '''
Please report this error to the issue tracker with the stack trace,
the information of your environment, and your script:
https://github.com/chainer/chainer/issues/new.
'''
raise exc_type(detail + msg)
def _check_grad_type(func, x, is_node_x, gx):
# is_node_x: equivalent to isinstance(x, VariableNode)
assert gx is not None
# x_shape is the raw shape
# TODO(kataoka): avoid `isinstance`
if isinstance(x, _ChainerxVariableNodeProps):
x_data = None
x_layout = None
x_shape = x.shape
elif is_node_x:
x_data = x._data
x_layout = x._layout
x_shape = x.shape
if x_layout is not None:
# to raw shape
x_shape = chainer.memory_layouts._transpose_shape(
x_shape, None, x_layout)
else:
# assert isinstance(x, Variable)
x_data = x._data[0]
x_layout = x._layout
x_shape = None if x_data is None else x_data.shape
# TODO(kataoka): Make _update_data_info store the array module.
# ``is_node_x and x_data is None`` implies that the data array is not
# retained.
# ``not is_node_x and x_data is None`` implies that grad of uninitialized
# variable is checked here.
if x_data is None and not is_node_x:
# TODO(kataoka): This should be an error.
return
if x_layout is None:
if x.dtype is None or x.shape is None:
# unretained Variable(None)
# TODO(kataoka): This should be an error.
return
if not isinstance(gx, chainer.get_array_types()):
_raise_grad_error(
TypeError,
func,
('Type of grad is invalid:\n'
+ 'Expected: Any of {}\n'.format(chainer.get_array_types())
+ 'Actual: {}'.format(type(gx))))
elif x_data is not None and not chainer.is_arrays_compatible((gx, x_data)):
_raise_grad_error(
TypeError,
func,
('Type of data and grad mismatch\ngrad: %s != data: %s' %
(type(gx), type(x_data))))
elif gx.dtype != x.dtype:
_raise_grad_error(
TypeError,
func,
('Dtype of data and grad mismatch\ngrad: %s != data: %s' %
(gx.dtype, x.dtype)))
elif gx.shape != x_shape: # comparing semantic shapes (not semantic)
_raise_grad_error(
ValueError,
func,
('Shape of data and grad mismatch\ngrad: %s != data: %s' %
(gx.shape, x_shape)))
def variable_repr(var):
"""Return the string representation of a variable.
Args:
var (~chainer.Variable): Input Variable.
.. seealso:: numpy.array_repr
"""
arr = _cpu._to_cpu(var.array)
if var.name:
prefix = 'variable ' + var.name
else:
prefix = 'variable'
if arr is None:
lst = 'None'
elif arr.size > 0 or arr.shape == (0,):
lst = numpy.array2string(arr, None, None, None, ', ', prefix + '(')
else: # show zero-length shape unless it is (0,)
lst = '[], shape=%s' % (repr(arr.shape),)
return '%s(%s)' % (prefix, lst)
def variable_str(var):
"""Return the string representation of a variable.
Args:
var (~chainer.Variable): Input Variable.
.. seealso:: numpy.array_str
"""
arr = _cpu._to_cpu(var.array)
if var.name:
prefix = 'variable ' + var.name
else:
prefix = 'variable'
if arr is None:
lst = 'None'
else:
lst = numpy.array2string(arr, None, None, None, ' ', prefix + '(')
return '%s(%s)' % (prefix, lst)
class VariableNode(object):
"""Node in the backward computational graph representing a variable.
This object represents a variable node in a computational graph. The node
is used in error backpropagation (a.k.a. backprop) to determine which
gradient to be passed to each function.
A variable node is held by the corresponding :class:`~chainer.Variable`
object, which is managed by users. :class:`~chainer.FunctionNode` objects
that take the variable as an input also hold references to the variable
node.
Note that the node does not hold a reference to the corresponding data
array in general. The data array is actually accessible by the node in the
following cases.
1. If there exists a :class:`~chainer.Variable` object that holds a
reference to the variable node, the variable node holds a weak reference
to the variable object, and thus the data array is accessible via the
weak reference.
2. If :meth:`retain_data` is called, the node holds a reference to the data
array. It is mainly called by a function that needs the input or output
data array in its backprop procedure.
See :meth:`FunctionNode.retain_inputs()
<chainer.FunctionNode.retain_inputs>`
and :meth:`FunctionNode.retain_outputs()
<chainer.FunctionNode.retain_outputs>` for more details.
Users usually do not need to touch this variable node object. The
computational graph is automatically managed by Chainer, and any interface
that is beneficial for users is also provided by
:class:`~chainer.Variable`.
Args:
variable (~chainer.Variable): The corresponding variable object.
name (str): Name of the variable node.
Attributes:
dtype: Data type of the data array.
shape: Shape of the data array.
name (str): Name of the variable node.
"""
dtype = None
shape = None # semantic shape
_creator_node = None
_data = None # type: types.NdArray
_rank = 0 # type: int
# Name of the Function is assigned if this variable is a gradient generated
# by an old-style Function
_old_style_grad_generator = None # type: str
_layout = None
def __init__(
self,
variable: 'Variable',
name: tp.Optional[str],
**kwargs: tp.Any
) -> None:
if kwargs:
argument.check_unexpected_kwargs(
kwargs,
grad='unexpected keyword argument "grad": '
'pass the gradient to Variable instead'
)
self._variable = weakref.ref(variable)
self.name = name
self._requires_grad = variable.requires_grad
self._layout = variable.layout
vdata = variable.raw_array
self._update_data_info(vdata)
@property
def creator(self):
"""Function object that created this variable node.
When the function is implemented with the old-style API (i.e., it uses
:class:`~chainer.Function` class),
this property returns the :class:`~chainer.Function` object.
The object is extracted from the :class:`~chainer.FunctionAdapter`
object, so the returned object is not the function node, but instead
the actual implementation of forward and backward procedures.
When the function is implemented with the new-style API (i.e., it uses
:class:`~chainer.FunctionNode` class),
this property returns the function node
object. In this case, the returned object is same as
:attr:`creator_node`.
.. warning::
As of v3.0.0, when the creator is an old-style function, the
following code is invalid:
.. code-block:: python
creator = v.creator
v.creator = None
...
v.creator = creator
The point is that :class:`~chainer.FunctionNode` objects are used
as nodes in the computational graph instead of
:class:`~chainer.Function`, and each :class:`~chainer.Function`
object only holds a *weak reference* to the corresponding
:class:`~chainer.FunctionNode`.
Since ``creator`` returns the :class:`~chainer.Function` object,
the :class:`~chainer.FunctionNode` object is not kept by preserving
``creator``.
The above code should be fixed as follows.
.. code-block:: python
creator_node = v.creator_node
v.creator_node = None
...
v.creator_node = creator_node
"""
node = self._creator_node
if node is None:
return None
if isinstance(node, chainer.function.FunctionAdapter):
return node.function
return node
@creator.setter
def creator(self, func):
self.creator_node = func
@property
def creator_node(self):
"""Function node that has this variable as an output.
See :class:`~chainer.FunctionNode` for the definition of a function
node.
"""
return self._creator_node
@creator_node.setter
def creator_node(self, func):
if isinstance(func, chainer.Function):
func = func.node
self._creator_node = func
if func is not None:
self._rank = func.rank + 1
@property
def data(self):
"""Data array of the corresponding variable.
If the data is not available, it returns ``None``.
"""
return self._data
@data.setter
def data(self, d):
self._data = d
self._update_data_info(d)
@property
def grad(self):
"""Gradient array of the corresponding variable.
If the variable is not available, it returns ``None``.
"""
var = self._variable()
return None if var is None else var.grad
@property
def grad_var(self):
"""Gradient variable of the corresponding variable.
If the corresponding variable is not available, it return ``None``.
"""
var = self._variable()
return None if var is None else var.grad_var
def _set_grad_var_if_available(self, g):
var = self._variable()
if var is not None:
var._set_grad_var_without_check(g)
@property
def label(self):
"""Short text that represents the variable node."""
if self.shape == ():
return str(self.dtype)
return '(%s), %s' % (', '.join(map(str, self.shape)),
str(self.dtype))
@property
def rank(self):
return self._rank
@property
def requires_grad(self):
"""It indicates that ``grad`` will be set in backward calculation."""
return self._requires_grad
def get_variable(self):
"""Returns the corresponding :class:`~chainer.Variable` object.
VariableNode object holds a weak reference of the variable object. If
the reference is alive, it is returned by this property. Otherwise,
this property creates a new :class:`~chainer.Variable` object from
this node object and returns it.
Returns:
~chainer.Variable: The variable object that refers this node.
"""
var = self._variable()
if var is not None:
return var
var = Variable._init_unchecked(
self.data,
name=self.name,
requires_grad=self.requires_grad,
node=self,
layout=self._layout)
return var
def get_variable_or_none(self):
"""Returns the holding :class:`~chainer.Variable` object or ``None``.
VariableNode object holds a weak reference of the variable object.If
the reference is alive, it is returned by this property. Otherwise,
returns ``None``.
Returns:
~chainer.Variable: The variable object that refers this node.
"""
return self._variable()
def set_creator(self, creator):
"""Sets a :class:`~chainer.Function` object that created this node.
This method is equivalent to ``self.creator = creator``. A
:class:`~chainer.FunctionNode` object can also be passed.
Args:
creator (Function or FunctionNode): Function that has created this
variable.
"""
self.creator = creator
def set_creator_node(self, creator_node):
"""Sets a :class:`~chainer.FunctionNode` object that created this node.
This method is equivalent to ``self.creator_node = creator_node``. A
:class:`~chainer.Function` object can also be passed, in which case the
:attr:`Function.node <chainer.Function.node>` attribute is used.
Args:
creator_node (FunctionNode or Function): Function node that has
this variable as an output.
"""
self.creator_node = creator_node
def unchain(self):
"""Deletes the reference to the creator of this variable node.
This method is equivalent to ``self.creator_node = None``.
"""
self.creator_node = None
def retain_data(self):
"""Lets the node hold a reference to the underlying data array.
This method gets the data array of the corresponding variable and keeps
it. If the weak reference to the corresponding variable is dead, it
raises an error.
"""
variable = self._variable()
if variable is not None:
self.data = variable.data
else:
raise RuntimeError('cannot retain variable data: the variable has '
'been already released')
def _update_data_info(self, d):
# d is a raw array (with raw shape)
if d is None:
self.dtype = None
self.shape = None
else:
self.dtype = d.dtype
if self._layout is None:
self.shape = d.shape
else:
self.shape = chainer.memory_layouts._transpose_shape(
d.shape, self._layout, None)
# If the node has a reference to data, update it as well.
if self._data is not None:
self._data = d
def _check_old_style_gradient(self):
if self._old_style_grad_generator is not None:
raise RuntimeError(
'cannot twice-differentiate an old style Function "%s"' %
self._old_style_grad_generator)
def _create_variable(data, name, grad, requires_grad, device):
var = Variable(
data, name=name, grad=grad, requires_grad=requires_grad)
var.to_device(device)
return var
class Variable(object):
"""__init__(data=None, *, name=None, grad=None, requires_grad=True)
Array with a structure to keep track of computation.
Every variable holds a data array of type either :class:`numpy.ndarray` or
:class:`cupy.ndarray`.
A variable object holds a data array and a
:class:`~chainer.variable.VariableNode` object of
a computational graph. If the variable is constructed by the user, the node
is *root* and does not hold any parent. If the variable is constructed by a
:class:`~chainer.FunctionNode` object (i.e., by calling functions under
``chainer.functions`` or user-defined functions), or by using operators
(see the list below), the node holds a reference to its parent called
:attr:`creator_node`.
This reference is used in backpropagation to backtrack the graph.
Users can disable (resp. enable) this chaining behavior by calling
:func:`~chainer.no_backprop_mode` (resp.
:func:`~chainer.force_backprop_mode`).
In the former context, a variable never creates a computational graph,
whereas in the latter context, it is forced to create.
.. note::
The following operators are defined for variable(s).
* Indexing: ``a[slices]`` (:meth:`__getitem__`)
* Addition: ``a + b`` (:meth:`__add__`, :meth:`__radd__`)
* Subtraction: ``a - b`` (:meth:`__sub__`, :meth:`__rsub__`)
* Multiplication: ``a * b`` (:meth:`__mul__`, :meth:`__rmul__`)
* Division: ``a / b`` (:meth:`__div__`, :meth:`__rdiv__`, \
:meth:`__truediv__`, :meth:`__rtruediv__`)
* Floor Division: ``a // b`` (:meth:`__floordiv__`, \
:meth:`__rfloordiv__`)
* Exponentiation: ``a ** b`` (:meth:`__pow__`, :meth:`__rpow__`)
* Matrix Multiplication: ``a @ b`` (:meth:`__matmul__`, \
:meth:`__rmatmul__`)
* Negation (Arithmetic): ``- a`` (:meth:`__neg__`)
* Absolute value: ``abs(a)`` (:meth:`__abs__`)
Args:
data (:ref:`ndarray`): Initial data array.
name (str): Name of the variable.
grad (:ref:`ndarray`): Initial gradient array.
requires_grad (bool): Boolean indicating whether ``grad`` will be set
in backward calculation.
"""
# Cached value of `self.xp is chainerx`. It prevents from initializing
# self._device as much as possible because it is really costly.
_has_chainerx_array = False
# Cached grad-stopped view of chainerx array. This is the return value
# of `array` and `data` properties.
_chainerx_nobp_array_cache = None
# Cached grad-stopped view of the array returned by `grad` property.
# It's a 2-element tuple, where the first is the original grad array and
# the second is a grad-stopped view of the first. `grad` property returns
# the second element.
_chainerx_grad_cache = None
_chainerx_name = None # type: tp.Optional[str]
# A NumPy, CuPy array cache to avoid redundant conversions between
# NumPy/CuPy and ChainerX.
# TODO(hvy): Avoid modifying this variable from outside this class.
_chainerx_fallback_array = None
# Used in non-ChainerX variables. The gradient array is stored in
# this attribute on Variable.grad setter to delay creation of grad_var
# instance.
_grad = None
_layout = None
def as_layout(self, layout):
src_layout = self._layout
if src_layout == layout:
return self
y, = chainer.memory_layouts.AsLayout(layout).apply((self,))
return y
def __init__(
self,
data: tp.Optional[types.NdArray] = None,
**kwargs: tp.Any
) -> None:
name, grad, requires_grad, grad_valid, layout = argument.parse_kwargs(
kwargs, ('name', None), ('grad', None), ('requires_grad', True),
('_grad_valid', True), ('layout', None),
volatile='volatile argument is not supported anymore. '
'Use chainer.using_config')
# _grad_valid is for internal use, hence the prefix _.
assert isinstance(requires_grad, bool)
if data is not None:
array_types = chainer.get_array_types()
if not isinstance(data, array_types):
msg = '{} or {} are expected. Actual: {}'.format(
', '.join([str(at) for at in array_types[:-1]]),
array_types[-1], type(data))
raise TypeError(msg)
self._init_impl(
data, None, name, grad, grad_valid, requires_grad, None, None,
layout)
@staticmethod
def _init_unchecked(
data=None, device=None, name=None, grad=None, grad_valid=True,
requires_grad=True, is_chainerx_array=None, node=None,
layout=None):
"""Creates a new :class:`Variable` without the validations for
optimizing performance.
"""
# Create a Variable without invoking __init__
var = Variable.__new__(Variable)
var._init_impl(
data, device, name, grad, grad_valid, requires_grad,
is_chainerx_array, node, layout)
return var
def _init_impl(self, data, device, name, grad, grad_valid, requires_grad,
is_chainerx_array, node, layout):
# `device` must be of type chainer.backend.Device.
# Check is skipped for performance.
self._requires_grad = requires_grad # type: bool
self._loss_scale = None
self._grad_var = None
self._device = device
# A flag to prevent grad from being used before calling cleargrad().
# It becomes True when either
# - cleargrad() is called, or
# - zerograd() is called, or
# - grad is set.
# Note that it won't be True by merely initializing an uninitialized
# Parameter.
self._grad_valid = grad_valid
self._layout = layout
if is_chainerx_array is None:
is_chainerx_array = isinstance(data, chainerx.ndarray)
if is_chainerx_array:
if not requires_grad and grad is not None:
raise ValueError(
'Cannot initialize a variable with gradients if the '
'require_grad argument is False.')
self._set_chainerx_array(data, grad) # type: ignore
# ChainerX itself has own node objects, but not exposed to python.
self._node = None # type: tp.Optional[VariableNode]
self._chainerx_name = name
else:
# Use a list as a data structure to hold the data array indirectly
# to abstract its initialized/uninitialized state.
self._data = [data] # type: tp.List[tp.Optional[types.NdArray]]
if node is None:
self._node = VariableNode(self, name)
else:
self._node = node
self._grad = grad
def __copy__(self):
return self._copy_to(Variable())
def _copy_to(self, target):
target.__dict__ = copy.copy(self.__dict__)
target._node = VariableNode(target, self.name)
return target
def __reduce__(self):
args = (
self.array, self.name, self.grad, self._requires_grad, self.device)
return _create_variable, args
def __repr__(self):
return variable_repr(self)
def __str__(self):
return variable_str(self)
def _clear_chainerx(self):
self._chainerx_nobp_array_cache = None
self._chainerx_grad_cache = None
self._chainerx_fallback_array = None
def _ensure_grad_var_up_to_date(self):
# For non-ChainerX, this method creates _grad_var if it's not yet
# created and _grad is set.
# For ChainerX, this method checks consistency between
# _grad_var._data[0] and self._data[0].grad and recreates _grad_var
# as necessary. (chainerx.ndarray.grad can be altered independently
# from chainer)
if self._has_chainerx_array:
self._grad = None
# Update gradient variable if it has not yet been initialized or
# it happens to be dirty w.r.t. the actual gradient of the
# underlying chainerx.ndarray.
arr = self._data[0]
actual_grad = (
arr.grad
if arr is not None and arr.is_grad_required()
else None)
if actual_grad is None:
self._grad_var = None
else:
grad_var = self._grad_var
old_grad = None if grad_var is None else grad_var._data[0]
if actual_grad is not old_grad:
self._grad_var = Variable(
actual_grad,
requires_grad=actual_grad.is_backprop_required(),
layout=self._layout)
return
if self._grad_var is None:
if self._grad is not None:
self._grad_var = Variable(self._grad, layout=self._layout)
def _set_chainerx_array(
self,
array: tp.Optional['chainerx.ndarray'],
grad: tp.Optional['chainerx.ndarray']
) -> None:
# Sets chainerx array and grad.
assert array is None or isinstance(array, chainerx.ndarray)
requires_grad = self._requires_grad
self._grad = None
if (not requires_grad
and array is not None
and array.is_backprop_required()):
raise ValueError(
'Cannot initialize a variable to not require '
'gradients if the ChainerX array already requires '
'backprop.')
# Create a view of the given data to hold internally and modify.
if array is None:
self._data = [None]
else:
# If the array `array` is not connected to a graph, a view of it is
# created and kept, in order not to change the no-graph status of
# it. If the array is connected, the graph status is kept track of.
if not array.is_backprop_required():
array = array.view()
if requires_grad:
array.require_grad()
if grad is not None:
array.set_grad(grad)
self._data = [array]
self._has_chainerx_array = True # even if data is None
self._chainerx_nobp_array_cache = None
self._chainerx_grad_cache = None
self._chainerx_fallback_array = None
@property
def device(self):
"""Device on which the data array of this variable reside."""
# lazy initialization for performance
if self._device is None:
if self._data[0] is None:
self._device = backend.CpuDevice()
else:
self._device = backend.get_device_from_array(self._data[0])
return self._device
@property
def xp(self) -> tp.Optional[types.Xp]:
"""Array module for the data array of this variable."""
if self._has_chainerx_array:
return chainerx
else:
device = self.device
return None if device is None else device.xp
@property
def name(self):
if self._has_chainerx_array:
return self._chainerx_name
return self._node.name
@name.setter
def name(self, n):
if self._has_chainerx_array:
self._chainerx_name = n
return
self._node.name = n
def summary(self):
if self.name:
return '<variable %s>' % self.name
else:
return '<variable at 0x%x>' % id(self)
def debug_print(self):
"""Display a summary of the stored data and location of the Variable"""
msg = """{summary}
- device: {device}
- backend: {backend}
- shape: {shape}
- dtype: {dtype}
- statistics: {stats}
- grad: {grad}"""
stats_msg = 'mean={0:.8f}, std={1:.8f}'
array = self.array
device = self.device
with chainer.using_device(device):
xp = device.xp
if array is None:
# `array` can be `None` if constructed without any arguments
device = None
backend = None
stats = None
else:
device = getattr(array, 'device', 'CPU')
backend = type(array)
stats = stats_msg.format(float(xp.mean(array)),
float(xp.std(array)))
shape = getattr(array, 'shape', None)
dtype = getattr(array, 'dtype', None)
if self.grad is None:
grad = None
elif xp.all(self.grad == 0):
grad = 0
else:
grad = stats_msg.format(float(xp.mean(self.grad)),
float(xp.std(self.grad)))
return msg.format(summary=self.summary(), device=device,
backend=backend, shape=shape, dtype=dtype,
stats=stats, grad=grad)
def __pos__(self):
return self
def __len__(self):
"""Returns the first dimension of the data array.
Returns:
int: Number of the first dimension of the data array.
"""
return len(self.array)
@property
def label(self):
"""Short text that represents the variable."""
if self._has_chainerx_array:
raise RuntimeError(
'A variable of ChainerX does not provide a node label.')
return self._node.label
@property
def creator(self):
"""Function implementation that created this variable.
When this variable has been created by an old-style function (i.e., it
is implemented as a subclass of :class:`Function`), this property
returns that :class:`Function` object.
When this variable has been created by a new-style function (i.e., it
is implemented as a subclass of :class:`FunctionNode` class), this
property returns that node object.
"""
if self._has_chainerx_array:
raise RuntimeError(
'A variable of ChainerX does not provide a creator.')
return self._node.creator
@creator.setter
def creator(self, func):
if self._has_chainerx_array:
raise RuntimeError(
'A variable of ChainerX does not provide a creator.')
self._node.creator = func
@property
def creator_node(self):
""":class:`FunctionNode` object that created this variable.
This property has a setter to which ``None`` can be set. Setting
``None`` to this property is equivalent to call :meth:`unchain`;
it purges the variable from the function that created this variable.
The setter also accepts the original :class:`FunctionNode` object that
created this variable. For example, you can once set ``None`` to this
property and then set the original value again.
.. note::
Setting an irrelevant :meth:`FunctionNode` object does not emit any
error immediately, whereas the behavior is undefined. Do not set
a :meth:`FunctionNode` object that did not create this variable
object.
"""
if self._has_chainerx_array:
raise RuntimeError(
'A variable of ChainerX does not provide a creator_node.')
return self._node._creator_node
@creator_node.setter
def creator_node(self, func):
if self._has_chainerx_array:
raise RuntimeError(
'A variable of ChainerX does not provide a creator_node.')
self._node.creator_node = func
@property
def array(self) -> tp.Optional[types.NdArray]:
"""The underlying data array.
It is either :class:`numpy.ndarray` or :class:`cupy.ndarray` object,
or ``None`` if the variable in in an uninitialized state.
"""
return self._get_array()
def _get_array(self):
if (self._layout is not None
and not (
_allow_array_access_with_nonstandard_layout())):
raise RuntimeError(
'Cannot directly retrieve the underlying array from a '
'variable with non-standard layout.')
return self.raw_array
@property
def raw_array(self):
"""The underlying raw data array.
Its shape does not have to be the semantic shape, if the memory layout
is non-standard.
"""
# For ChainerX, this property always returns a grad-stopped view.
# The view is cached to reduce potential overhead.
if self._has_chainerx_array:
if (self._chainerx_nobp_array_cache is None
and self._data[0] is not None):
self._chainerx_nobp_array_cache = (
self._data[0].as_grad_stopped()) # type: ignore
return self._chainerx_nobp_array_cache
return self._data[0]
@array.setter
def array(self, d: tp.Optional[types.NdArray]) -> None:
self._set_array(d)
def _set_array(self, d, *, layout_check=True):
if (layout_check
and self._layout is not None
and not (
_allow_array_access_with_nonstandard_layout())):
raise RuntimeError(
'Cannot directly set the underlying array of a variable with '
'non-standard layout.')
if self._has_chainerx_array:
d_old = self._data[0]
if (d_old is not None
and (d_old.is_backprop_required() # type: ignore
or d.is_backprop_required())): # type: ignore
raise ValueError(
'Cannot update the array of a Variable if either the '
'existing or the new array requires backprop.')
self._set_chainerx_array(d, None) # type: ignore
else:
self._node._update_data_info(d) # type: ignore # _node doesn't have value when xp is chainerx # NOQA
self._data[0] = d
self._has_chainerx_array = False
@property
def chx_array(self):
"""A view of the raw ChainerX array.
In contrary to :data:`Variable.array` which is always disconnected,
the array represented by this attribute may be connected to the
computational graph.
It is a view, so it has a distinct gradient from the original array.
If this attribute is queried on a :class:`Variable` with a non-ChainerX
array, :class:`ValueError` will be raised.
"""
if not self._has_chainerx_array:
raise ValueError(
'chx_array is not available for Variable with '
'non-ChainerX array.')
return self._data[0].view()
@property
def data(self) -> tp.Optional[types.NdArray]:
"""The underlying data array (equivalent to :attr:`array`).
Note that using this attribute directly is discouraged; use
:attr:`array` instead. Using :attr:`array`, you can find an error
earlier when your code mixes up Variable and ndarray because
ndarray does not have an attribute ``.array`` while it has
``.data``.
"""
return self.array
@data.setter
def data(self, d: types.NdArray) -> None:
self.array = d
@property
def layout(self):
return self._layout
def _set_chainerx_grad(self, g, from_grad_var):
# Assigns chainerx.ndarray.grad.
#
# If the main array is connected to the graph, in order to enable
# double-backprop, the grad will also be backprop-required
# (a view is created not to affect the given grad).
# If the given grad is from a grad_var, this operation is skipped,
# as the status of the given grad reflects the necessity of
# double-backprop.
assert self.xp is chainerx
if not self._requires_grad and g is not None:
raise RuntimeError(
'Cannot set the gradient of a variable that is flagged to not '
'require one.')
arr = self._data[0]
if arr is None:
if g is not None:
raise RuntimeError(
'Cannot set a gradient to an empty variable')
elif arr.is_backprop_required():
# If g is grad-stopped, require grad on it.
# Make a view in order not to affect the input.
if (g is not None
and not from_grad_var
and not g.is_backprop_required()):
g = g.view().require_grad()
arr.set_grad(g)
def _set_grad_without_check(self, g):
if self._has_chainerx_array:
self._set_chainerx_grad(g, False)
self._grad_var = None
self._grad_valid = True
return
self._grad = g
self._grad_var = None
self._grad_valid = True
@property
def grad(self) -> tp.Optional[types.NdArray]:
"""Gradient array of this variable.
Note that this property returns the underlying array of the gradient
variable instead of the gradient variable itself; to get/set
gradient variable, use :attr:`grad_var` instead.
If the underlying array is a :class:`chainerx.ndarray` and
requires_grad is false, trying to access the gradient will results in
and error.
"""
return self._get_grad()
def _get_grad(self):
if (self._layout is not None
and not (
_thread_local.allow_array_access_with_nonstandard_layout)):
raise RuntimeError(
'Cannot directly retrieve the gradient array of a '
'variable with non-standard layout.')
if not self._grad_valid:
raise RuntimeError(
'Cannot retrieve Variable.grad. '
'Either it must be set manually or Variable.cleargrad() '
'must be called beforehand.')
if self._has_chainerx_array:
arr = self._data[0]
if arr is None or not arr.is_backprop_required():
self._chainerx_grad_cache = None
return None
actual_grad = arr.grad
if actual_grad is None:
self._chainerx_grad_cache = None
return None
# If grad is cached and the actual grad has not changed, return
# the cache.
if self._chainerx_grad_cache is not None:
orig_grad, grad_stopped_grad = self._chainerx_grad_cache
if orig_grad is actual_grad:
return grad_stopped_grad
# Update the cache
grad_stopped_grad = actual_grad.as_grad_stopped()
self._chainerx_grad_cache = (actual_grad, grad_stopped_grad)
return grad_stopped_grad
if self._grad_var is not None:
return self._grad_var.array
return self._grad
@grad.setter
def grad(self, g: tp.Optional[types.NdArray]) -> None:
self._set_grad(g)
def _set_grad(self, g, *, layout_check=True):
if (layout_check
and self._layout is not None
and not (
_allow_array_access_with_nonstandard_layout())):
raise RuntimeError(
'Cannot directly set the gradient array of a '
'variable with non-standard layout.')
if g is not None:
_check_grad_type(None, self, False, g)
self._set_grad_without_check(g)
def _set_grad_var_without_check(self, gv):
if self._has_chainerx_array:
self._set_chainerx_grad(
None if gv is None else gv._data[0],
True)
self._grad_var = gv
return
self._grad_var = gv
self._grad = None if gv is None else gv.array
@property
def grad_var(self) -> tp.Optional['Variable']:
"""Gradient variable."""
self._ensure_grad_var_up_to_date()
return self._grad_var
@grad_var.setter
def grad_var(self, g: tp.Optional['Variable']) -> None:
if g is not None:
_check_grad_type(None, self, False, g.array)
self._set_grad_var_without_check(g)
@property
def shape(self):
raw_shape = self._data[0].shape
if self._layout is not None:
# Convert to semantic shape
return chainer.memory_layouts._transpose_shape(
raw_shape, self._layout, None)
return raw_shape
@property
def ndim(self):
return self._data[0].ndim
@property
def size(self):
return self._data[0].size
@property
def dtype(self):
return self._data[0].dtype
@property
def rank(self):
if self._has_chainerx_array:
raise RuntimeError(
'A variable of ChainerX does not provide a node rank.')
return self._node.rank
@property
def node(self):
if self._has_chainerx_array:
raise RuntimeError(
'A variable of ChainerX does not provide a node.')
return self._node
@property
def requires_grad(self):
"""It indicates that ``grad`` will be set in backward calculation."""
return self._requires_grad
@property
def T(self):
"""Transposition of this variable."""
return chainer.functions.transpose(self)
def to_cpu(self):
"""Copies the data and gradient arrays to CPU."""
self.to_device(backend.CpuDevice())
def to_gpu(self, device=None):
"""Copies the data and gradient arrays to specified GPU.
Args:
device: Target device specifier. If omitted, the current device is
used.
"""
cuda.check_cuda_available()
self.to_device(cuda._get_device_or_current(device))
def to_intel64(self):
"""Copies the data and gradient arrays to intel64 specific mdarray.
If the array is not suited for intel64, it will be converted to
:class:`numpy.ndarray`.
"""
intel64.check_ideep_available()
self.to_device(intel64.Intel64Device())
def to_chx(self):
"""Converts the array and gradient to ChainerX arrays without copy.
This method converts the underlying array and gradient to
:class:`chainerx.ndarray` on the same physical device. It does nothing
if the array held by the Variable object is already a ChainerX array.
The new array is a view of the original one.
"""
self._to_chx(allow_unchaining=False)
def _to_chx(self, allow_unchaining):
if not chainerx.is_available():
raise RuntimeError('ChainerX is not available.')
if self._has_chainerx_array:
return
if not allow_unchaining and self.creator is not None:
raise RuntimeError(
'A variable with a creator cannot be converted into ChainerX '
'array')
self._to_device(
backend.ChainerxDevice.from_fallback_device(self.device),
allow_unchaining)
def from_chx(self):
"""Converts the array and gradient to non-ChainerX arrays without copy.
This method converts the underlying ChainerX array and gradient
residing in either a ``native`` or ``cuda`` device to NumPy or CuPy
arrays respectively, on their same physical device. It does nothing
if the array held by the Variable object is not a ChainerX array. The
new array is a view of the original one.
Raises an error if such a conversion is not supported for the device.
"""
self._from_chx(allow_unchaining=False)
def _from_chx(self, allow_unchaining):
if not self._has_chainerx_array:
return
if not allow_unchaining and self._data[0].is_backprop_required():
raise RuntimeError(
'Cannot convert from a Variable with a ChainerX array that is '
'connected to a graph.')
self.to_device(self.device.fallback_device)
def to_device(self, device):
"""Copies the data and gradient arrays to specified device.
Args:
device: Target device specifier. See
:func:`~chainer.get_device` for available values.
"""
self._to_device(device, allow_unchaining=False)
def _to_device(self, device, allow_unchaining):
device = chainer.get_device(device)
was_chainerx = self._has_chainerx_array
is_chainerx = device.xp is chainerx
if not allow_unchaining:
if was_chainerx and not is_chainerx:
chx_arr = self._data[0]
if chx_arr is not None and chx_arr.is_backprop_required():
raise RuntimeError(
'A variable of a ChainerX array which requires '
'gradients cannot be copied into non-chainerx device '
'({}).'.format(device))
elif not was_chainerx and is_chainerx:
arr = self._data[0]
if arr is not None and self.creator is not None:
raise RuntimeError(
'A variable of a non-ChainerX array which is '
'connected to a graph cannot be copied to a ChainerX '
'device ({}).'.format(device))
arr = self._data[0]
grad_var = self.grad_var
if was_chainerx and not is_chainerx:
self._clear_chainerx()
self._node = VariableNode(self, self._chainerx_name)
elif not was_chainerx and is_chainerx:
self._chainerx_name = self._node.name
self._device = device
self._has_chainerx_array = is_chainerx
if arr is None:
return
if backend.get_device_from_array(arr) == device:
return
new_arr = device.send(arr)
if is_chainerx:
if grad_var is None:
new_grad = None
else:
new_grad = device.send(grad_var._data[0])
self._set_chainerx_array(new_arr, new_grad)
else:
self._data = [new_arr]
if grad_var is not None:
grad_var._to_device(device, allow_unchaining=allow_unchaining)
# _grad has been invalidated by the line above.
self._grad = grad_var.raw_array
# ensure that the node tracks the device migration
node = self._node
if is_chainerx:
# ChainerX itself has own node objects,
# ensure that the node is disconnected with this variable.
if node is not None:
# Disconnect by replacing with an alternative of dead weakref
node._variable = lambda: None
self._node = None
else:
if node._data is not None:
node.retain_data()
def cleargrad(self):
"""Clears the gradient array."""
self.grad_var = None
self._grad_valid = True
def zerograd(self):
"""Initializes the gradient array by zeros.
Note that the gradient variable is unchained from the computational
graph by this method, because this operation breaks the backprop
validity.
.. deprecated:: v1.15
Use more efficient :meth:`cleargrads` instead.
"""
warnings.warn(
'Variable.zerograd is deprecated. Use Variable.cleargrad instead.',
DeprecationWarning)
arr = self.array
if arr is None:
self._grad_valid = True
return
if self._has_chainerx_array:
gv = self.grad_var
if gv is None:
self.grad = chainerx.zeros_like(
arr, device=self.device.device)
else:
gv._data[0].fill(0)
else:
with chainer.using_device(self.device):
xp = self.device.xp
if self._grad is None:
self._grad = xp.zeros_like(arr)
self._grad_var = None
else:
gv = self._grad_var
if gv is not None:
gv.unchain()
self._grad.fill(0)
self._grad_valid = True
def copydata(self, var):
"""Copies the data array from given source variable.
This method copies the data array from given variable to this variable.
The copy is done even if the arrays reside on different devices,
including across the host and a GPU device. If this variable has an
uninitialized data array, this method initializes it by the data array
of the given variable. Similarly, if the given variable has an
uninitialized data array, this method initializes it by the data array
of this variable (``self``). If both are uninitialized, this method
does nothing.
Args:
var (~chainer.Variable): Source variable.
"""
src = var.array
dst = self.array
if src is None:
if dst is None:
return
var.initialize(self.shape)
src = var.array
elif dst is None:
self.initialize(src.shape)
dst = self.array
backend.copyto(dst, src)
def addgrad(self, var):
"""Accumulates the gradient array from given source variable.
This method adds the gradient of a given variable to the gradient of
this variable. The accumulation is even done across the host and
different devices. If this variable has uninitialized data/grad arrays,
this method initializes it with the shape of the given variable and
then accumulates the gradient.
Args:
var (~chainer.Variable): Source variable.
"""
dst_device = self.device
is_chainerx = dst_device.xp is chainerx
if is_chainerx != (var.device.xp is chainerx):
raise RuntimeError(
'Variable.addgrad does not support addition between '
'gradients on non-ChainerX and ChainerX devices.\n'
'Adding gradient to: {}\n'
'Adding gradient from: {}'.format(
dst_device, var.device))
if var.grad is None:
return
src = var.grad_var
if self.array is None:
self.initialize(var.shape)
dst = self.grad_var
src_device = src.device
if src_device != dst_device:
src = chainer.functions.copy(src, dst_device)
self.grad_var = src if dst is None else src + dst
def set_creator(self, gen_func):
"""Notifies the variable that the given function is its creator.
Args:
gen_func (Function): Function object that creates this variable as
one of its outputs.
"""
if self._has_chainerx_array:
raise RuntimeError(
'A variable of ChainerX does not provide a creator.')
self._node.set_creator(gen_func)
def set_creator_node(self, fnode):
"""Notifies the variable that the given node is its creator.
Args:
fnode (FunctionNode): Function node that has this variable as an
output.
"""
if self._has_chainerx_array:
raise RuntimeError(
'A variable of ChainerX does not provide a creator node.')
self._node.set_creator_node(fnode)
def backward(self, retain_grad=False, enable_double_backprop=False,
loss_scale=None):
"""Runs error backpropagation (a.k.a.\\ backprop) from this variable.
On backprop,
:meth:`FunctionNode.backward() <chainer.FunctionNode.backward>`
is called on each :class:`~chainer.FunctionNode` object appearing in
the backward graph starting from this variable.
The backward graph is represented by backward
references from variable nodes to their creators, and from function
nodes to their input variable nodes. The backprop stops at all root
nodes. Some function nodes set ``None`` as gradients of some inputs,
where further backprop does not take place at such inputs.
This method uses :data:`grad` as the initial error array. User can
manually set a gradient array before calling this method.
If the shape of :data:`data` is ``()`` (i.e., it is scalar) and
:data:`grad` is ``None``, then this method automatically complements
1.0 as the initial error. This is useful on starting backprop from
some scalar loss value.
From v3, this method supports *differentiable backprop* (a.k.a. double
backprop, grad of grads). To enable it, pass
``enable_double_backprop=True``.
Args:
retain_grad (bool): If ``True``, the gradient arrays of all
intermediate variables are kept.
Otherwise, :data:`~chainer.Variable.grad` of the
intermediate variables are set to ``None`` on appropriate
timing, which may reduce the maximum memory consumption.
In most cases of training some models, the purpose of backprop
is to compute gradients of parameters, not of all variables,
and therefore it is recommended that this flag be set to
``False``.
enable_double_backprop (bool): *(Added in v3.0)* If ``True``,
computational trace of the whole backpropagation procedure is
recorded to the computational graph so that one can further do
backpropagation from the resulting gradients. Note that
enabling it results in larger memory consumption needed to
store the gradients w.r.t intermediate variables that are
required for the second gradient computation.
loss_scale (float): Loss scaling factor. Loss scaling is a useful
technique to mitigate vanishing gradient issue that tends to
happen when low precision data type like float16 is used during
training. If you set loss scaling factor, gradients of loss
values are to be multiplied by the factor before backprop
starts. The factor is propagated to whole gradients in a
computational graph along the backprop. The gradients of
parameters are divided by the factor just before the parameters
are to be updated.
"""
if self._has_chainerx_array:
if retain_grad:
raise RuntimeError(
'retain_grad is not supported for ChainerX array.')
arr = self._data[0]
assert isinstance(arr, chainerx.ndarray)
# pybind has issues when converting int -> opt<float>
if loss_scale:
loss_scale = float(loss_scale)
chainerx.backward(
arr, enable_double_backprop=enable_double_backprop,
loss_scale=loss_scale)
return
# Initialize error by 1, if this is a loss variable
if self.array.size == 1 and self.grad_var is None:
if self.array.ndim != 0:
warnings.warn(
'Treating a variable with only one element as a scalar'
' in Variable.backward is deprecated. A scalar variable'
' must be a 0-dimensional array. Apply'
' chainer.functions.squeeze to obtain a scalar variable.'
' If the size of this variable accidentally becomes one,'
' set zero to grad.',
DeprecationWarning)
with chainer.using_device(self.device):
self.grad = self.device.xp.ones_like(self.array)
if loss_scale is not None:
self.grad *= loss_scale
node = self.node
grad_var = self.grad_var
self.grad_var = None
with chainer.using_config('enable_backprop', enable_double_backprop):
# TODO(kataoka): The following line should not pass grad_var = None
# to _backprop_to_all, but it is working because grad_var is
# immediately popped away as None = _backprop_utils._reduce([None])
_backprop._backprop_to_all(
[(node, grad_var)], retain_grad, loss_scale)
def item(self):
"""Converts the variable with one element to a Python scalar.
This will incur host-device synchronization.
Returns:
int or float: The element of the array.
"""
return self.array.item()
def mean(self, axis=None, *, weights=None, keepdims=False):
"""Calculate weighted average of array elements over a given axis.
.. seealso::
:func:`chainer.functions.average` for full documentation,
"""
return chainer.functions.average(self, axis, weights, keepdims)
def reshape(self, *shape):
"""Returns a variable of a different shape and the same content.
.. seealso::
:func:`chainer.functions.reshape` for full documentation,
"""
if len(shape) == 1 and isinstance(shape[0], (tuple, list)):
shape = shape[0]
return chainer.functions.reshape(self, shape)
def transpose(self, *axes):
"""Permute the dimensions of an input variable without copy.
.. seealso::
:func:`chainer.functions.transpose` for full documentation.
"""
if len(axes) == 0:
axes = None
elif len(axes) == 1 and (isinstance(axes[0], (tuple, list)) or
axes[0] is None):
axes = axes[0]
return chainer.functions.transpose(self, axes)
def unchain(self):
"""Deletes the reference to the creator of this variable.
This method deletes the reference to the creator from the corresponding
variable node. Unlike :meth:`unchain_backward`, it does not backtrack
the graph.
This method is equivalent to ``self.creator_node = None``.
"""
if self._has_chainerx_array:
raise RuntimeError(
'A variable of ChainerX does not provide an unchain method.')
self.creator_node = None
def unchain_backward(self):
"""Deletes references between variable nodes and functions backward.
After this method completes, intermediate variable nodes and functions
that are not referenced from anywhere are deallocated by reference
count GC. Also this variable itself deletes the reference to its
creator function from the node, i.e. the node becomes root in the
computation graph. It indicates that backprop after unchaining stops at
this variable. This behavior is useful to implement truncated BPTT.
"""
if self._has_chainerx_array:
raise RuntimeError(
'A variable of ChainerX does not provide an unchain_backward '
'method.')
cand_funcs = []
seen_set = set()
def add_cand(cand):
if cand is not None and cand not in seen_set:
cand_funcs.append(cand)
seen_set.add(cand)
add_cand(self.creator_node)
while cand_funcs:
func = cand_funcs.pop()
for var in func.inputs:
add_cand(var.creator_node)
func.unchain()
def retain_data(self):
"""Lets the corresponding variable node keep the underlying array."""
if self._has_chainerx_array:
raise RuntimeError(
'A variable of ChainerX does not provide a retain_data '
'method.')
self._node.data = self._data[0]
def _error_nobp_op(self, op):
raise TypeError(
'Variables do not support {} operator. '
'You could use `array` attribute instead.'.format(op))
def __lt__(self, other):
"""This operator is not supported in Variables."""
self._error_nobp_op('<')
def __le__(self, other):
"""This operator is not supported in Variables."""
self._error_nobp_op('<=')
def __eq__(self, other):
"""This operator is not supported in Variables."""
self._error_nobp_op('==')
def __ne__(self, other):
"""This operator is not supported in Variables."""
self._error_nobp_op('!=')
def __gt__(self, other):
"""This operator is not supported in Variables."""
self._error_nobp_op('>')
def __ge__(self, other):
"""This operator is not supported in Variables."""
self._error_nobp_op('>=')
def __nonzero__(self):
"""This operator is not supported in Variables."""
# Python 2.x
raise TypeError(
'Variables cannot be evaluated as Python bool.')
def __bool__(self):
"""This operator is not supported in Variables."""
# Python 3.x
raise TypeError(
'Variables cannot be evaluated as Python bool.')
__array_priority__ = 200 # type: int
__hash__ = None # type: tp.Callable[[object], int]
class Parameter(Variable):
"""Parameter variable that can be registered to a link.
Parameter is a subclass of :class:`Variable`. It almost behaves as same
as a usual variable except that a parameter can be registered to a
:class:`~chainer.Link` object just by assigning it to an attribute of
the link within an :meth:`~chainer.Link.init_scope` context.
Parameter also supports an initialization by an initializer. It can have
two initializers: one for the data array, and the other for the gradient
array. The initializer only specifies the way of filling the elements of
these arrays, and the shape information is specified at the initialization
point.
When a link that the parameter has been registered to is passed to an
:class:`~chainer.GradientMethod`, an update rule is set to the parameter.
This update rule specifies how to update the data array of the parameter
using its gradient array.
Args:
initializer (~chainer.Initializer or :ref:`ndarray`):
Initializer of the data array. If ``shape`` is given, this
initializer is immediately used to initialize the data array.
Otherwise, if it is an array, it is immediately used as the data
array, and otherwise the data array is left uninitialized and will
be initialized by this initializer in :meth:`initialize`. It can
also be a scalar, in which case the data array will be filled by
this scalar. Note that float32 is used in this case.
shape (int or tuple of int or None): Shape of the parameter. If it is
``None``, the initialization is deferred to the call of
:meth:`initialize`.
name (str): Name of the parameter.
Attributes:
initializer: Initializer of the data array. It is used for
initializing the data array of an uninitialized variable.
update_rule: :class:`~chainer.optimizer.UpdateRule` instance that
updates this variable as a parameter. This argument is set to
:attr:`update_rule`.
"""
initializer = None # type: tp.Optional[tp.Union[tp.Optional[types.AbstractInitializer], types.NdArray]] # NOQA
# TODO(okapies): fix the behavior when shape is None and remove NdArray
_grad_initializer = None # type: tp.Optional[types.AbstractInitializer]
def __init__(
self,
initializer: tp.Optional[types.InitializerSpec] = None,
shape: tp.Optional[types.ShapeSpec] = None,
name: tp.Optional[str] = None,
*,
layout=None
) -> None:
if initializer is None:
initializer = constant.NaN()
elif numpy.isscalar(initializer):
initializer = constant.Constant(initializer)
if shape is None:
if isinstance(initializer, chainer.get_array_types()):
# parameter initialized by the initial array
super(Parameter, self).__init__(
initializer, name=name, layout=layout)
else:
# uninitialized parameter
super(Parameter, self).__init__(
name=name, _grad_valid=False, layout=layout)
dtype = getattr(initializer, 'dtype', None)
self._grad_initializer = constant.NaN(dtype)
else:
# parameter initialized with a given shape
if isinstance(initializer, chainer.get_array_types()):
xp = backend.get_array_module(initializer)
initializer = constant.Constant(initializer)
else:
xp = numpy
data = initializers.generate_array(initializer, shape, xp) # type: ignore # NOQA
grad = xp.full_like(data, numpy.nan)
super(Parameter, self).__init__(
data, name=name, grad=grad, layout=layout)
self._initial_device = backend.CpuDevice()
self.update_rule = None
self.initializer = initializer
def __copy__(self):
return self._copy_to(Parameter())
def __reduce__(self):
args = (
self.array, self.name, self._grad, self._grad_valid,
self.initializer, self.update_rule, self.device)
return _recover_parameter, args
@property
def is_initialized(self):
return self._data[0] is not None
@property
def dtype(self):
array = self._data[0]
if array is not None:
return array.dtype
# uninitialized
initializer = self.initializer
if hasattr(initializer, 'dtype'):
return numpy.dtype(initializer.dtype)
raise RuntimeError(
'Dtype of the parameter is not determined yet because it\'s '
'uninitialized and dtype was not explicitly given.')
def to_cpu(self):
return self.to_device(backend.CpuDevice())
def to_gpu(self, device=None):
device = chainer.get_device(cuda._get_device_or_current(device))
assert device.xp is cuda.cupy
self.to_device(device)
def to_intel64(self):
self.to_device(intel64.Intel64Device())
def to_chx(self):
if not chainerx.is_available():
raise RuntimeError('ChainerX is not available.')
# Derive the target ChainerX device from the array if it is
# initialized. Otherwise, from the current initial device.
if self.array is not None:
device = backend.get_device_from_array(self.array)
else:
device = self._initial_device
if device.xp is numpy:
self._initial_device = backend.ChainerxDevice(
chainerx.get_device('native:0'))
elif device.xp is cuda.cupy:
self._initial_device = backend.ChainerxDevice(
chainerx.get_device('cuda:{}'.format(device.device.id)))
super(Parameter, self)._to_chx(allow_unchaining=True)
def from_chx(self):
if self.array is not None:
device = backend.get_device_from_array(self.array)
else:
device = self._initial_device
if device.xp is chainerx:
backend_name = device.device.backend.name
if backend_name == 'native':
self._initial_device = backend.CpuDevice()
elif backend_name == 'cuda':
self._initial_device = backend.GpuDevice.from_device_id(
device.device.index)
super(Parameter, self)._from_chx(allow_unchaining=True)
def to_device(self, device):
device = chainer.get_device(device)
if self._data[0] is None and self._initial_device != device:
self._data = [None] # Renew placeholder to break sharing
self._has_chainerx_array = False
self._initial_device = device
super(Parameter, self)._to_device(device, allow_unchaining=True)
def cleargrad(self):
super(Parameter, self).cleargrad()
if not self.is_initialized:
self._grad_initializer = None
def zerograd(self):
super(Parameter, self).zerograd()
if not self.is_initialized:
dtype = getattr(self.initializer, 'dtype', None)
self._grad_initializer = initializers.Zero(dtype)
def initialize(self, shape):
"""Initializes the uninitialized variable.
Uninitialized variable is a variable created with the data array set to
None. This method creates and initializes the data array. The shape of
the variable can be left unknown until this method is called.
Args:
shape (tuple of int): Shape of the data array.
"""
device = self._initial_device
assert device is not None
xp = device.xp
data = initializers.generate_array(
self.initializer, shape, xp, device=device)
data = chainer.memory_layouts._transpose_array(data, None, self.layout)
if self._grad_initializer is None:
grad = None
else:
grad = initializers.generate_array(
self._grad_initializer, shape, xp, device=device)
grad = chainer.memory_layouts._transpose_array(
grad, None, self.layout)
self._set_array(data, layout_check=False)
self._set_grad(grad, layout_check=False)
# Convert the array for iDeep.
# TODO(niboshi): This could be done in generate_array().
if isinstance(self._initial_device, intel64.Intel64Device):
self.to_intel64()
def update(self):
"""Updates the data array using the gradient and the update rule.
This method updates the parameter using the attached update rule.
"""
if self.update_rule is not None:
if not self.update_rule.is_elementwise:
if self.layout is not None:
raise RuntimeError(
'Parameter with a non-standard layout cannot be '
'updated with a non-elementwise update rule '
'({}).'.format(self.update_rule))
self.update_rule.update(self)
def as_variable(obj):
"""Converts an array or a variable into :class:`~chainer.Variable`.
This is a convenient function to get a :class:`~chainer.Variable` object
transparently from a raw array or a variable.
Note that this function should only be used for type consistency (i.e., to
enforce the return value of an API having type :class:`~chainer.Variable`).
The :class:`~chainer.Variable.requires_grad` flag is kept as is; if ``obj``
is a raw array, the newly created variable has ``requires_grad = False``.
In order to make a variable w.r.t. which you want to compute the gradient,
you should use :class:`~chainer.Variable` directly.
Args:
obj (:ref:`ndarray` or ~chainer.Variable): An array or
a variable that you want to convert to :class:`~chainer.Variable`.
Returns:
~chainer.Variable:
A variable converted from ``obj``. If ``obj`` is a raw array, this is a
new :class:`~chainer.Variable` object that wraps the array. If ``obj``
is already a :class:`~chainer.Variable` object, this function returns
``obj`` as is.
"""
if isinstance(obj, Variable):
return obj
if isinstance(obj, chainerx.ndarray):
requires_grad = obj.is_backprop_required()
else:
requires_grad = False
return Variable(obj, requires_grad=requires_grad)
def as_array(obj):
"""Returns the underlying array from a variable or an array.
This is a convenient function to get the underlying array object
transparently from an object that could be either a variable or an array.
Args:
obj (:ref:`ndarray` or ~chainer.Variable): An array or a variable.
Returns:
:ref:`ndarray` or ~chainer.Variable:
The underlying array object of the argument.
"""
if isinstance(obj, Variable):
return obj.array
return obj
def _recover_parameter(*args):
if len(args) == 7:
# latest
data, name, grad, grad_valid, initializer, update_rule, device = args
elif len(args) == 6:
data, name, grad, initializer, update_rule, device = args
grad_valid = True
else:
assert False, len(args)
p = Parameter(initializer=initializer, name=name)
p.array = data
p._grad = grad
p._grad_valid = grad_valid
p.update_rule = update_rule
p.to_device(device)
return p
class _ChainerxVariableNodeProps(object):
def __init__(self, x):
self.shape = x.shape
self.dtype = x.dtype
class _AllowArrayAccessWithNonstandardLayout:
"""Context manager within which access to Variable.array is allowed for \
variables with a non-standard layout."""
def __enter__(self):
self._old = _allow_array_access_with_nonstandard_layout()
_thread_local.allow_array_access_with_nonstandard_layout = True
def __exit__(self, typ, value, traceback):
_thread_local.allow_array_access_with_nonstandard_layout = self._old
def _allow_array_access_with_nonstandard_layout():
# Returns wether a thread-local variable
# `allow_array_access_with_nonstandard_layout` is set to True.
try:
return _thread_local.allow_array_access_with_nonstandard_layout
except AttributeError:
return False
| 73,696
| 34.844844
| 115
|
py
|
chainer
|
chainer-master/chainer/function.py
|
from __future__ import absolute_import
import warnings
import weakref
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import configuration
# for backward compatibility
from chainer.function_hook import FunctionHook # NOQA
from chainer import function_node
from chainer import variable
import chainerx
class _BackpropModeContext(object):
# Combines multiple contexts.
# A single context object cannot be nested.
def __init__(self, contexts):
self.contexts = contexts
def __enter__(self):
for c in self.contexts:
c.__enter__()
def __exit__(self, typ, value, traceback):
for c in reversed(self.contexts):
c.__exit__(typ, value, traceback)
def no_backprop_mode():
"""Make a context manager which disables back-propagation.
In this context, Chainer does not make a computational graph. It has the
benefit of reducing memory consumption. However, a
:class:`~chainer.Variable` created in this context does not hold a
reference to the :class:`~chainer.FunctionNode` that created itself so no
gradients are accumulated by :func:`~chainer.Variable.backward`.
In the following example, ``y`` is created in this context, which means
that calling :func:`~chainer.Variable.backward` on ``y`` has no effect on
the gradients of ``x``.
>>> x = chainer.Variable(np.array([1,], np.float32))
>>> with chainer.no_backprop_mode():
... y = x + 1
>>> y.backward()
>>> x.grad is None
True
.. note::
``chainer.no_backprop_mode()`` implicitly applies ChainerX's
counterpart :func:`chainerx.no_backprop_mode()`, but not vice versa.
Also, setting ``enable_backprop`` :ref:`configuration <configuration>`
does not affect ChainerX.
.. seealso::
See :func:`chainer.force_backprop_mode` for details on how to override
this context.
"""
c = configuration.using_config('enable_backprop', False)
if chainerx.is_available():
return _BackpropModeContext((c, chainerx.no_backprop_mode()))
return _BackpropModeContext((c,))
def force_backprop_mode():
"""Make a context manager which enables back-propagation.
When you want to enable back-propagation in :func:`no_backprop_mode`, call
this method. A :class:`~chainer.Variable` created in this context always
has a computational graph unless overridden by deeper contexts. If you call
this method outside of :func:`no_backprop_mode` context, it changes
nothing.
In the following example, ``y`` has a computational graph and calling
:func:`~chainer.Variable.backward` on ``y`` will compute and accumulate the
gradients of the variables in the graph, in this case only ``x``.
>>> x = chainer.Variable(np.array([1,], np.float32))
>>> with chainer.no_backprop_mode():
... with chainer.force_backprop_mode():
... y = x + 1
>>> y.backward()
>>> x.grad
array([1.], dtype=float32)
.. note::
``chainer.force_backprop_mode()`` implicitly applies ChainerX's
counterpart :func:`chainerx.force_backprop_mode()`, but not vice versa.
Also, setting ``enable_backprop`` :ref:`configuration <configuration>`
does not affect ChainerX.
.. seealso::
See :func:`chainer.no_backprop_mode` for details on disabled
back-propagation mode.
"""
c = configuration.using_config('enable_backprop', True)
if chainerx.is_available():
return _BackpropModeContext((c, chainerx.force_backprop_mode()))
return _BackpropModeContext((c,))
class FunctionAdapter(function_node.FunctionNode):
"""Adapter class to wrap Function with FunctionNode.
While :class:`~chainer.FunctionNode` provides the interface
of new-style differentiable functions, the old-style
:class:`~chainer.Function` can still be used for the backward
compatibility.
This class provides an adapter of there interface; it adds
:class:`~chainer.FunctionNode` interface to any
:class:`~chainer.Function` object by delegation.
.. note::
The ownership of :class:`FunctionAdapter` and :class:`~chainer.Function`
is a bit tricky.
At the initialization, :class:`FunctionAdapter` is owned by the
:class:`~chainer.Function` object.
Once the function is applied to variables, the ownership is reversed;
the adapter becomes the owner of the
:class:`~chainer.Function` object and the :class:`~chainer.Function`
object changes the reference to a weak one.
Args:
function (~chainer.Function): The function object to wrap.
.. versionadded:: 3.0.0
"""
_function = None # type: Function
_weak_function = None # type: weakref.ReferenceType[Function]
def __init__(self, function: 'Function') -> None:
super(FunctionAdapter, self).__init__()
self._weak_function = weakref.ref(function)
function._owned_node = self
@property
def function(self):
"""The :class:`Function` object that this adapter is wrapping."""
func = self._function
if func is not None:
return func
weak_func = self._weak_function
return weak_func and weak_func()
@property
def label(self):
return self._function.label
@property
def _impl_name(self):
return self._function.__class__.__name__
def check_type_forward(self, in_types):
self._function.check_type_forward(in_types)
def forward(self, inputs):
# Retain all inputs by default in old-style functions.
self.retain_inputs(six.moves.range(len(inputs)))
if self._is_chainerx_fallback_mode:
with function_node._chainerx_attribute_fallback(
self._function, self.chainerx_device):
return self._function.forward(inputs)
else:
return self._function.forward(inputs)
def backward(self, target_input_indexes, grad_outputs):
retained_inputs = self.get_retained_inputs()
inputs = [None] * len(self.inputs)
in_data = [None] * len(self.inputs)
for retained, i_in in six.moves.zip(
retained_inputs, self._input_indexes_to_retain):
inputs[i_in] = retained
in_data[i_in] = None if retained is None else retained.array
in_data = tuple(in_data)
grad_out_data = tuple([None if grad is None else grad.array
for grad in grad_outputs])
is_chainerx_fallback_mode = self._is_chainerx_fallback_mode
if is_chainerx_fallback_mode:
# Convert input and output gradients to numpy/cupy
in_data = backend.from_chx(in_data)
grad_out_data = backend.from_chx(grad_out_data)
# Call Function.backward
with chainer.using_device(
backend.get_device_from_array(*(in_data + grad_out_data))):
if is_chainerx_fallback_mode:
# Enable attribute fallback
with function_node._chainerx_attribute_fallback(
self._function, self.chainerx_device):
gxs = self._function.backward(in_data, grad_out_data)
else:
gxs = self._function.backward(in_data, grad_out_data)
# Check gradients
for x, gx in six.moves.zip(self.inputs, gxs):
if gx is not None:
variable._check_grad_type(self, x, True, gx)
# Convert input gradients back to ChainerX
if is_chainerx_fallback_mode:
gxs = backend.to_chx(gxs)
ret = []
for i in target_input_indexes:
if gxs[i] is None:
g = None
else:
# Intentionally not passing requires_grad=False so that
# backprop routines can raise an error when a further backprop
# is attempted against this gradient variable.
g = variable.Variable(gxs[i])
if g.xp is not chainerx:
g.node._old_style_grad_generator = self._function.label
ret.append(g)
return tuple(ret)
class Function(object):
"""Old-style interface of a differentiable function.
This class provides an interface to implement an old-style differentiable
function (i.e., the function application is recorded to the computational
graph). The subclass of :class:`Function` that implement :meth:`forward`
and :meth:`backward` can be used to run the forward computation and
automatically induce the backpropagation procedure.
There is another way to implement such a function: subclassing
:class:`~chainer.FunctionNode`. There are mainly two
differences between them.
1. The *differentiable backprop* is available for
:class:`~chainer.FunctionNode`,
while it is not for :class:`Function` because the :meth:`backward`
of the latter directly operates on the arrays instead of
:class:`Variable` objects so that it cannot record the history of
the computation.
2. The information passed to :meth:`backward` is different. In
:class:`~chainer.FunctionNode`,
which inputs the function node has to compute
the gradients w.r.t. is passed so that it can omit unnecessary
computations, while :class:`Function` always has to compute gradients
w.r.t. all the input nodes.
The :class:`~chainer.FunctionNode` also accepts the
current gradient values of the input nodes so that the accumulation
work can be merged with the gradient computation if an efficient kernel
is available.
This class uses :class:`~chainer.FunctionAdapter` to convert
the interface to that of :class:`~chainer.FunctionNode` and
adds the :class:`~chainer.FunctionNode` object to the
computational graph.
See :class:`~chainer.FunctionNode` for the details of
building the computational graph in Chainer.
"""
_node = None
_owned_node = None
def __call__(self, *inputs):
"""Applies forward propagation with chaining backward references.
This method creates a new :class:`~chainer.FunctionAdapter`
object and runs the forward propagation using it.
See :class:`~chainer.FunctionNode` for the detailed
behavior of building the computational graph.
Args:
inputs: Tuple of input :class:`Variable` or :ref:`ndarray` objects.
If the input is :ref:`ndarray`, it is automatically wrapped
with :class:`Variable`.
Returns:
One :class:`Variable` object or a tuple of multiple
:class:`Variable` objects.
"""
node = self.node
# Swap the ownership
node._function = self
node._weak_function = None
self._node = weakref.ref(node)
self._owned_node = None
ret = node.apply(inputs)
if len(ret) == 1:
return ret[0]
else:
return tuple(ret)
@property
def inputs(self):
"""The input nodes of the function."""
return self.node.inputs
@property
def outputs(self):
"""Weak references to the output nodes of the function."""
return self.node.outputs
@property
def node(self):
"""The :class:`FunctionAdapter` object that wraps this Function.
If the Function does not have a node object, this property
automatically creates a new one.
"""
noderef = self._node
nd = (noderef and noderef()) or self._owned_node
if nd is not None:
return nd
nd = FunctionAdapter(self)
self._owned_node = nd
return nd
@property
def local_function_hooks(self):
"""Ordered Dictionary of registered function hooks.
See :attr:`FunctionNode.local_function_hooks` for the detail.
"""
return self.node.local_function_hooks
@property
def label(self):
"""Short text that represents the function.
The default implementation returns its type name.
Each function should override it to give more information.
"""
return self.__class__.__name__
@property
def output_data(self):
"""A tuple of the retained output arrays.
It has the same length as the :attr:`outputs`. Elements that are not
retained are set to ``None``.
"""
if self.node._is_chainerx_fallback_mode:
return backend.from_chx(self.node.output_data)
return self.node.output_data
@property
def rank(self):
"""The topological ordinal of the corresponding function node."""
return self.node.rank
@property
def stack(self):
return self.node.stack
def check_type_forward(self, in_types):
"""Checks types of input data before forward propagation.
Before :meth:`forward` is called, this function is called.
You need to validate types of input data in this function
using :ref:`the type checking utilities <type-check-utils>`.
Args:
in_types (~chainer.utils.type_check.TypeInfoTuple): The type
information of input data for :meth:`forward`.
"""
pass
def forward(self, inputs):
"""Applies forward propagation to input arrays.
It delegates the procedure to :meth:`forward_cpu` or
:meth:`forward_gpu` by default. Which it selects is determined by the
type of input arrays.
Implementations of :class:`Function` must implement either CPU/GPU
methods or this method.
Args:
inputs: Tuple of input array(s).
Returns:
Tuple of output array(s).
.. warning::
Implementations of :class:`Function` must take care that the
return value must be a tuple even if it returns only one array.
"""
if any(isinstance(x, cuda.ndarray) for x in inputs):
return self.forward_gpu(inputs)
else:
return self.forward_cpu(inputs)
def forward_cpu(self, inputs):
"""Applies forward propagation to input arrays on CPU.
Args:
inputs: Tuple of :class:`numpy.ndarray` object(s).
Returns:
tuple: Tuple of :class:`numpy.ndarray` object(s).
.. warning::
Implementations of :class:`Function` must take care that the
return value must be a tuple even if it returns only one array.
"""
raise NotImplementedError()
def forward_gpu(self, inputs):
"""Applies forward propagation to input arrays on GPU.
Args:
inputs: Tuple of :class:`cupy.ndarray` object(s).
Returns:
tuple: Tuple of :class:`cupy.ndarray` object(s).
.. warning::
Implementations of :class:`Function` must take care that the
return value must be a tuple even if it returns only one array.
"""
raise NotImplementedError()
def backward(self, inputs, grad_outputs):
"""Applies backprop to output gradient arrays.
It delegates the procedure to :meth:`backward_cpu` or
:meth:`backward_gpu` by default. Which it selects is determined by the
type of input arrays and output gradient arrays. Implementations of
:class:`Function` must implement either CPU/GPU methods or this method,
if the function is intended to be backprop-ed.
Args:
inputs: Tuple of input arrays.
grad_outputs: Tuple of output gradient arrays.
Returns:
tuple: Tuple of input gradient arrays. Some or all of them can be
``None``, if the function is not differentiable on
inputs.
.. warning::
Implementations of :class:`Function` must take care that the
return value must be a tuple even if it returns only one array.
"""
if any(isinstance(x, cuda.ndarray) for x in inputs + grad_outputs):
return self.backward_gpu(inputs, grad_outputs)
else:
return self.backward_cpu(inputs, grad_outputs)
def backward_cpu(self, inputs, grad_outputs):
"""Applies backprop to output gradient arrays on CPU.
Args:
inputs: Tuple of input :class:`numpy.ndarray` object(s).
grad_outputs: Tuple of output gradient :class:`numpy.ndarray`
object(s).
Returns:
tuple: Tuple of input gradient :class:`numpy.ndarray` object(s).
Some or all of them can be ``None``, if the function is not
differentiable on corresponding inputs.
.. warning::
Implementations of :class:`Function` must take care that the
return value must be a tuple even if it returns only one array.
"""
return tuple(None for _ in inputs)
def backward_gpu(self, inputs, grad_outputs):
"""Applies backprop to output gradient arrays on GPU.
Args:
inputs: Tuple of input :class:`cupy.ndarray`
object(s).
grad_outputs: Tuple of output gradient
:class:`cupy.ndarray` object(s).
Returns:
tuple: Tuple of input gradient :class:`cupy.ndarray`
object(s). Some or all of them can be ``None``, if the function is
not differentiable on corresponding inputs.
.. warning::
Implementations of :class:`Function` must take care that the
return value must be a tuple even if it returns only one array.
"""
return tuple(None for _ in inputs)
def unchain(self):
"""Purges in/out nodes and this function itself from the graph.
See :meth:`FunctionNode.unchain() <chainer.FunctionNode.unchain>`
for the detail.
"""
self.node.unchain()
def add_hook(self, hook, name=None):
"""Registers a function hook.
See :meth:`FunctionNode.add_hook` for the detail.
Args:
hook(~chainer.FunctionHook):
Function hook to be registered.
name(str): Name of the function hook.
name must be unique among function hooks
registered to the function. If ``None``,
default name of the function hook is used.
"""
self.node.add_hook(hook, name)
def delete_hook(self, name):
"""Unregisters the specified function hook.
Args:
name(str): the name of the function hook
to be unregistered.
"""
self.node.delete_hook(name)
def retain_inputs(self, indexes):
"""Lets specified input variable nodes keep data arrays.
By calling this method from :meth:`forward`, the function can specify
which inputs are required for backprop.
If this method is not called, the function keeps all input arrays. If
you want to release all input arrays, call this method by passing an
empty sequence. *Note that this behavior is different from that of*
:meth:`FunctionNode.retain_inputs() \
<chainer.FunctionNode.retain_inputs>`.
Note that **this method must not be called from the outside of**
:meth:`forward`.
Args:
indexes (iterable of int): Indexes of input variables that the
function will require for backprop.
"""
self.node.retain_inputs(indexes)
def retain_outputs(self, indexes, retain_after_backward=False):
"""Lets specified output variable nodes keep data arrays.
By calling this method from :meth:`forward`, the function can specify
which outputs are required for backprop. If this method is not called,
any output variables are not marked to keep the data array at the point
of returning from :meth:`__call__`. The retained arrays are stored to
:attr:`output_data`.
.. note::
It is STRONGLY RECOMMENDED that you use this method if the function
requires some or all output arrays in backprop. The function can
also use output arrays just by keeping references to them directly,
whereas it might influence on the performance of later function
applications to the output variables.
Note that **this method must not be called from the outside of**
:meth:`forward`.
Args:
indexes (iterable of int): Indexes of input variables that the
function will require for backprop.
retain_after_backward (bool): This option has no effect. It is
left only for the backward compatibility.
"""
if retain_after_backward:
warnings.warn('retain_after_backward option has no effect',
DeprecationWarning)
self.node.retain_outputs(indexes)
| 21,115
| 33.390879
| 79
|
py
|
chainer
|
chainer-master/chainer/backend.py
|
import numpy
import six
import chainer
from chainer.backends import _chainerx
from chainer.backends import _cpu
from chainer.backends import cuda
from chainer.backends import intel64
import chainerx
# Aliases
from chainer._backend import Device
from chainer.backends._chainerx import ChainerxDevice
from chainer.backends._chainerx import from_chx # NOQA
from chainer.backends._chainerx import to_chx # NOQA
from chainer.backends._cpu import CpuDevice
from chainer.backends.cuda import GpuDevice
from chainer.backends.intel64 import Intel64Device
from chainer import types # NOQA
def _contains_nan(x):
"""Returns whether the input array has NaN values.
Args:
x (numpy.ndarray or cupy.ndarray): Array to be checked.
Returns:
bool: True if the input has NaN values.
"""
if x.dtype.kind in ('f', 'c'):
device = get_device_from_array(x)
with chainer.using_device(device):
return device.xp.isnan(x).any()
else:
return False
def copyto(dst, src):
"""Copies the elements of an ndarray to those of another one.
This function can copy the CPU/GPU arrays to the destination arrays on
another device.
Args:
dst (:class:`numpy.ndarray`, :class:`cupy.ndarray`, \
:class:`ideep4py.mdarray` or :class:`chainerx.ndarray`):
Destination array.
src (:class:`numpy.ndarray`, :class:`cupy.ndarray`, \
:class:`ideep4py.mdarray` or :class:`chainerx.ndarray`):
Source array.
"""
if isinstance(dst, chainerx.ndarray):
dst[...] = _chainerx._array_to_chainerx(src, dst.device)
return
if isinstance(src, chainerx.ndarray):
src = from_chx(src)
if isinstance(dst, numpy.ndarray):
numpy.copyto(dst, _cpu._to_cpu(src))
elif isinstance(dst, intel64.mdarray):
intel64.ideep.basic_copyto(
dst, _cpu._to_cpu(src))
elif isinstance(dst, cuda.ndarray):
if isinstance(src, chainer.get_cpu_array_types()):
src = numpy.asarray(src)
if (src.dtype == dst.dtype
and (dst.flags.c_contiguous or dst.flags.f_contiguous)):
dst.set(src)
else:
cuda.cupy.copyto(dst, cuda.to_gpu(src, device=dst.device))
elif isinstance(src, cuda.ndarray):
cuda.cupy.copyto(dst, src)
else:
raise TypeError('cannot copy from non-array object of type {}'
.format(type(src)))
else:
raise TypeError('cannot copy to non-array object of type {}'.format(
type(dst)))
def _guess_device_from_array_module(xp):
"""Returns a plausible device from array module
.. warning::
There can be multiple devices for a module
"""
if xp is cuda.cupy:
return cuda.GpuDevice(cuda.Device())
elif xp is chainerx:
return _chainerx.ChainerxDevice(chainerx.get_default_device())
else:
# Cannot detect intel64, because xp of intel64 is numpy.
return _cpu.CpuDevice()
def get_device(device_spec: types.DeviceSpec) -> Device:
"""Returns a device object.
Args:
device_spec (object): Device specifier.
If a :class:`chainer.backend.Device` instance is given, it is
returned intact. Otherwise the following values are supported:
* ChainerX devices
* A string representing a device.
(ex. ``'native:0'``, ``'native'``)
* A :class:`chainerx.Device` object.
* CuPy
* A string starts with ``'@cupy:'``.
(ex. ``'@cupy:0'``)
* A :class:`cupy.cuda.Device` object.
* NumPy
* The string ``'@numpy'``.
* NumPy with Intel Architecture
* The string ``'@intel64'``.
"""
if isinstance(device_spec, Device):
return device_spec
if isinstance(device_spec, cuda._integer_types):
return _get_device_cupy_or_numpy(device_spec)
if chainerx.is_available() and isinstance(device_spec, chainerx.Device):
return _chainerx.ChainerxDevice(device_spec)
if cuda.available and isinstance(device_spec, cuda.Device):
return cuda.GpuDevice(device_spec)
if isinstance(device_spec, six.string_types):
# '-1', '0', '1', ...
try:
int_device_spec = int(device_spec)
except ValueError:
pass
else:
return _get_device_cupy_or_numpy(int_device_spec)
if device_spec.startswith('@'):
# '@module:...'
mod_name, colon, precise_spec = device_spec[1:].partition(':')
if mod_name == 'numpy':
if not colon:
return _cpu.CpuDevice()
elif mod_name == 'cupy':
if colon:
return cuda.GpuDevice.from_device_id(int(precise_spec))
elif mod_name == 'intel64':
if not colon:
return intel64.Intel64Device()
raise ValueError(
'Device specifiers starting with \'@\' must be followed by'
' a module name and depending on the module, module specific'
' precise device specifiers. Actual: {}'.format(device_spec))
else:
# String device specifier without '@' prefix is assumed to be a
# ChainerX device.
if not chainerx.is_available():
raise RuntimeError(
'Tried to parse ChainerX device specifier \'{}\', '
'but ChainerX is not available. '
'Note that device specifiers without \'@\' prefix are '
'assumed to be ChainerX device '
'specifiers.'.format(device_spec))
return _chainerx.ChainerxDevice(chainerx.get_device(device_spec))
raise TypeError(
'Device specifier must be a backend.Device, cuda.Device,'
' chainerx.Device, integer or a string. Actual: {}'.format(
type(device_spec)))
def _get_device_cupy_or_numpy(device_spec):
# legacy spec of (gpu) device
if device_spec >= 0:
return cuda.GpuDevice.from_device_id(device_spec)
else:
return _cpu.CpuDevice()
def using_device(device_spec):
"""Context manager to apply the thread-local device state.
Args:
device_spec (object): Device specifier. See :func:`chainer.get_device`
for details.
.. admonition:: Example
.. testcode::
:skipif: doctest_helper.skipif_not_enough_cuda_devices(2)
with chainer.using_device('@cupy:1'):
a = cupy.empty((3, 2))
assert a.device.id == 1
"""
# TODO(niboshi): Set default device (once this concept is introduced in
# Chainer).
device = get_device(device_spec)
return device.create_context()
def get_array_module(*args):
"""Gets an appropriate NumPy-compatible module to process arguments
This function will return their data arrays' array module for
:class:`~chainer.Variable` arguments.
Args:
args: Values to determine whether NumPy, CuPy, or ChainerX should be
used.
Returns:
module: :mod:`numpy`, :mod:`cupy`, or :mod:`chainerx` is returned based
on the types of the arguments.
"""
is_chainerx_available = chainerx.is_available()
if is_chainerx_available or cuda.available:
arrays = []
for arg in args:
# Unwrap arrays
if isinstance(arg, chainer.variable.Variable):
array = arg.data
else:
array = arg
if is_chainerx_available and isinstance(array, chainerx.ndarray):
return chainerx
arrays.append(array)
if cuda.available:
return cuda.cupy.get_array_module(*arrays)
return numpy
def get_device_from_array(*arrays):
"""Gets the device from arrays.
The device on which the given array reside is returned.
.. note::
Unlike :func:`get_array_module`, this method does not recognize
:class:`~chainer.Variable` objects.
If you need to get device from the :class:`~chainer.Variable` instance
``v``, you need to use ``get_device_from_array(v.array)``.
Args:
arrays (array or list of arrays):
Arrays to determine the device. If multiple arrays are given, the
device correspoinding to the first array which is not NumPy array
is returned.
Returns:
chainer.backend.Device: Device instance.
"""
for array in arrays:
device = GpuDevice.from_array(array)
if device is not None:
return device
if isinstance(array, chainerx.ndarray):
return ChainerxDevice(array.device)
device = Intel64Device.from_array(array)
if device is not None:
return device
return CpuDevice()
| 9,016
| 30.75
| 79
|
py
|
chainer
|
chainer-master/chainer/_runtime_info.py
|
import platform
import sys
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer.backends import intel64
import chainerx
class _RuntimeInfo(object):
chainer_version = None
numpy_version = None
cuda_info = None
ideep_version = None
def __init__(self):
self.chainer_version = chainer.__version__
self.chainerx_available = chainerx.is_available()
self.numpy_version = numpy.__version__
self.platform_version = platform.platform()
if cuda.available:
self.cuda_info = cuda.cupyx.get_runtime_info()
else:
self.cuda_info = None
if intel64.is_ideep_available():
self.ideep_version = intel64.ideep.__version__
else:
self.ideep_version = None
def __str__(self):
s = six.StringIO()
s.write('''Platform: {}\n'''.format(self.platform_version))
s.write('''Chainer: {}\n'''.format(self.chainer_version))
s.write('''ChainerX: {}\n'''.format(
'Available' if self.chainerx_available else 'Not Available'))
s.write('''NumPy: {}\n'''.format(self.numpy_version))
if self.cuda_info is None:
s.write('''CuPy: Not Available\n''')
else:
s.write('''CuPy:\n''')
for line in str(self.cuda_info).splitlines():
s.write(''' {}\n'''.format(line))
if self.ideep_version is None:
s.write('''iDeep: Not Available\n''')
else:
s.write('''iDeep: {}\n'''.format(self.ideep_version))
return s.getvalue()
def _get_runtime_info():
return _RuntimeInfo()
def print_runtime_info(out=None):
"""Shows Chainer runtime information.
Runtime information includes:
- OS platform
- Chainer version
- ChainerX version
- NumPy version
- CuPy version
- CUDA information
- cuDNN information
- NCCL information
- iDeep version
Args:
out: Output destination.
If it is ``None``, runtime information
will be shown in ``sys.stdout``.
"""
if out is None:
out = sys.stdout
out.write(str(_get_runtime_info()))
if hasattr(out, 'flush'):
out.flush()
| 2,263
| 24.155556
| 73
|
py
|
chainer
|
chainer-master/chainer/function_node.py
|
import collections
import contextlib
import heapq
import inspect
import traceback
import weakref
import six
import chainer
from chainer import _backprop_utils
from chainer import backend
from chainer.backends import cuda
from chainer import configuration
from chainer import function_hook
from chainer.graph_optimizations.static_graph_utilities \
import static_forward_optimizations
from chainer import utils
from chainer.utils import type_check
from chainer import variable
import chainerx
def _to_variable_with_chainerx_fallback_array(
chainerx_device, chainerx_array, fallback_array):
# chainerx_array can be None.
assert (
chainerx_array is None
or chainerx_array.device == chainerx_device.device)
var = variable.Variable._init_unchecked(
chainerx_array,
device=chainerx_device,
requires_grad=(
False if chainerx_array is None
else chainerx_array.is_backprop_required()))
var._chainerx_fallback_array = fallback_array
return var
class FunctionNode(object):
"""Function node of the computational graph.
FunctionNode is a class representing a node in a computational graph. The
node corresponds to an application of a differentiable function to input
variables.
When a differentiable function is applied to :class:`~chainer.Variable`
objects,
it creates an instance of FunctionNode implementation and calls its
:meth:`apply` method. The :meth:`apply` method basically does the following
three things.
1. Adding an edge from the function node to the variable node corresponding
to each input. The node of each input is extracted by
:attr:`Variable.node <chainer.Variable.node>`.
2. Computing the output arrays of the function.
3. Creating a :class:`~chainer.Variable` object for each output array and
adding an edge from the node of the variable to the function node.
The output variables are then returned.
.. admonition:: Example
Let ``x`` be an instance of :class:`~chainer.Variable` and ``f`` be an
instance of :class:`FunctionNode` taking only one argument.
Then the following code
>>> import numpy, chainer
>>> x = chainer.Variable(numpy.zeros(10))
>>> f = chainer.functions.math.identity.Identity()
>>> y = f.apply((x,))[0]
computes a new variable ``y`` and creates backward references. The
backward references are actually set as per the following diagram::
x.node <--- f <--- y.node
If an application of another function ``g`` occurs as
>>> g = chainer.functions.math.identity.Identity()
>>> z = g.apply((x,))[0]
then the graph grows with a branch::
|--- f <--- y.node
x.node <-+
|--- g <--- z.node
Note that the branching is correctly managed on backward computation,
i.e. the gradients from ``f`` and ``g`` are accumulated to the gradient
of ``x``.
Every function-node implementation should provide :meth:`forward` and
:meth:`backward`. Instead of overriding :meth:`forward`, one can also
implement :meth:`forward_cpu` and :meth:`forward_gpu` when the
implementations for CPU and GPU arrays are totally different.
Note that the input and output variables are inaccessible from
:meth:`backward` by default. If it needs accesses to these variables, the
:meth:`forward` method (or its CPU/GPU variants) has to call
:meth:`retain_inputs` and :meth:`retain_outputs` appropriately. The
retained input/output variables can be accessed from :meth:`backward` by
calling :meth:`get_retained_inputs` and :meth:`get_retained_outputs`.
.. note::
There are two types of differentiable functions in Chainer (since v3).
The first type is of a function using a subclass of
:class:`~chainer.Function`,
which is called *old-style differentiable function*. The second type is
of a function using a subclass of :class:`FunctionNode`, which is called
**new-style differentiable function**. There are several advantages on
using the new-style differentiable function.
- The new-style differentiable function supports *differentiable
backpropagation*. The backpropagated gradients computed through the
new-style differentiable functions themselves support further
backpropagations so that the automatic higher-order differentiation is
available.
- The backpropagation of the new-style differentiable function can be
more computationally efficient because the interface allows an
implementation to omit the computation of unneeded input gradients.
Note that the new-style differentiable function is the standard way of
defining a function node of the computational graph in Chainer; old-
style differentiable functions are implemented as wrappers of the new-
style differentiable functions.
Attributes:
~FunctionNode.inputs: A tuple of the input
:class:`~chainer.variable.VariableNode` objects.
~FunctionNode.outputs: A tuple of weak references to the output
:class:`~chainer.variable.VariableNode` objects.
~FunctionNode.rank (int): An ordinal following the topological order
of the computational graph.
~FunctionNode.stack: Stack trace retrieved at the forward computation.
The stack trace is available only in the debug mode.
.. versionadded:: 3.0.0
"""
inputs = None
outputs = None
_input_layouts = None
_output_layouts = None
_output_count = None
rank = 0
stack = None
_input_indexes_to_retain = None
_output_indexes_to_retain = None
_retained_output_data = None
_local_function_hooks = None
_supports_static_optimizations = False
# True if the function node is operating on ChainerX arrays and it falls
# back to NumPy/CuPy implementation.
_is_chainerx_fallback_mode = False
# chainerx.Device instance if _is_chainerx_fallback_mode == True
chainerx_device = None
_chainerx_retained_inputs = None
_chainerx_retained_outputs = None
lazy_grad_sum = False
is_elementwise = False
@property
def local_function_hooks(self):
"""Ordered dictionary of registered function hooks.
Contrary to ``chainer.thread_local.function_hooks``,
which registers its elements to all functions,
Function hooks in this property is specific to this function.
"""
if self._local_function_hooks is None:
self._local_function_hooks = collections.OrderedDict()
return self._local_function_hooks
@property
def _n_local_function_hooks(self):
return (0 if self._local_function_hooks is None
else len(self._local_function_hooks))
@property
def label(self):
"""Short text that represents the function.
The default implementation returns its type name.
Each function should override it to give more information.
"""
return self.__class__.__name__
@property
def output_data(self):
"""A tuple of the retained output arrays.
This property is mainly used by :class:`Function`. Users basically do
not have to use this property; use :meth:`get_retained_outputs`
instead.
"""
if self._is_chainerx_fallback_mode:
retained_output_data = [
None if var is None
else var.array
for var in self._chainerx_retained_outputs]
else:
if self._retained_output_data is None:
raise RuntimeError('retained output data is gone')
retained_output_data = self._retained_output_data
out_data = [None] * self._output_count
for index, data in six.moves.zip(self._output_indexes_to_retain,
retained_output_data):
out_data[index] = data
return tuple(out_data)
@property
def _impl_name(self):
return self.__class__.__name__
def __call__(self, *args, **kwargs):
if self.__class__.__module__.startswith('chainer.'):
msg = '''\
Chainer's built-in function class object ({}) which is derived from \
chainer.FunctionNode has been called as if it were a callable. \
Use FunctionNode.apply() method instead.
Furthermore, it's not recommended that you use built-in function classes \
directly; use corresponding function aliases (those with snake_case name, \
such as F.convolution_nd) instead.\
'''.format(self.__class__.__name__)
else:
msg = '''\
A function class object ({}) which is derived from \
chainer.FunctionNode has been called as if it were a callable. \
Use apply() method instead.\
'''.format(self.__class__.__name__)
raise RuntimeError(msg)
def apply(self, inputs):
"""Computes output variables and grows the computational graph.
Basic behavior is expressed in the documentation of
:class:`FunctionNode`.
.. note::
If the :data:`~Variable.data` attributes of the input variables
exist on a GPU device, that device is made current before calling
:meth:`forward`, so implementers do not need to take care of device
selection in most cases.
Args:
inputs: Tuple of input variables. Each element can be either
:class:`~chainer.Variable` or :ref:`ndarray`. If the element
is an ndarray, it is automatically wrapped with
:class:`~chainer.Variable`.
Returns:
A tuple of output :class:`~chainer.Variable` objects.
"""
chainerx_in_data = None
chainerx_device = None
is_chainerx, in_data = _extract_apply_in_data(inputs)
utils._check_arrays_forward_compatible(in_data, self.label)
if is_chainerx:
# Try ChainerX C++ implementation.
# If it's supported, the output arrays are wrapped with Variables
# and returned.
# If not supported, FunctionNode.forward_chainerx should return
# Fallback.
# In that case the input arrays are converted to numpy.ndarray
# or cupy.ndarray (depending on the ChainerX backend) and
# forward computation falls back to the conventional
# FunctionNode.forward() implementaion.
outputs = self.forward_chainerx(in_data)
if outputs is not chainer.Fallback:
# Supported. Wrap with variables and return
assert isinstance(outputs, tuple)
return tuple([
variable.Variable._init_unchecked(
y, requires_grad=y.is_backprop_required(),
is_chainerx_array=True)
for y in outputs])
# Fall back to FunctionNode.forward()
chainerx_in_data, in_data, chainerx_device = (
self._chainerx_apply_fallback_preprocess(in_data, inputs))
self._is_chainerx_fallback_mode = True
self.chainerx_device = chainerx_device
is_debug = chainer.is_debug()
if is_debug:
# Keep stack trace for debug
self.stack = traceback.extract_stack()
input_vars = [chainer.as_variable(x) for x in inputs]
self._input_layouts = tuple([x.layout for x in input_vars])
if configuration.config.type_check:
self._check_data_type_forward(in_data)
self.check_layout_forward(input_vars)
# Call preprocess hooks
hooks = chainer.get_function_hooks()
if self._n_local_function_hooks > 0:
hooks = collections.OrderedDict(hooks)
hooks.update(self.local_function_hooks)
hooks = hooks.values() # avoid six for performance
for hook in hooks:
hook.forward_preprocess(self, in_data)
# Forward propagation
with chainer.using_device(backend.get_device_from_array(*in_data)):
self._input_indexes_to_retain = None
self._output_indexes_to_retain = None
if chainer.config.schedule_func is not None:
outputs = static_forward_optimizations(self, in_data)
elif self._is_chainerx_fallback_mode:
# In ChainerX fallback, __class__ is temporarily replaced with
# the fabricated one with automatic attirbute fallback.
with _chainerx_attribute_fallback(self, chainerx_device):
outputs = self.forward(in_data)
else:
# In normal case, simply run the forward method.
outputs = self.forward(in_data)
# Check for output array types
if not isinstance(outputs, tuple):
raise TypeError(
'forward output must be a tuple ({})\n'
'Actual: {}'.format(self.label, type(outputs)))
if self.is_elementwise:
if not all([y.shape == outputs[0].shape for y in outputs]):
raise RuntimeError(
'An elementwise function returned outputs with '
'different shapes.\n'
'Function: {}\n'
'Input shapes: {}\n'
'Output shapes: {}'.format(
self.label,
', '.join(repr(x.shape) for x in outputs),
', '.join(repr(y.shape) for y in outputs),
))
if not chainer.is_arrays_compatible(outputs):
if not all(
isinstance(y, chainer.get_array_types())
for y in outputs):
raise TypeError(
'forward output must be a tuple of ndarrays.\n'
'Function: {}\n'
'Actual output types: {}'
.format(
self.label,
tuple(type(y) for y in outputs)))
raise TypeError(
'incompatible array types are mixed in the forward output '
'({}).\n'
'Actual: {}'.format(
self.label,
', '.join(str(type(x)) for x in outputs)))
# If output layouts is not specified, assign the default layouts.
if self.is_elementwise:
assert self._output_layouts is None
layout = self._input_layouts[0]
self._output_layouts = (layout,) * len(outputs)
elif self._output_layouts is None:
self._output_layouts = (None,) * len(outputs)
# Call postprocess hooks
for hook in hooks:
hook.forward_postprocess(self, in_data)
# NaN check of output values
if is_debug:
for out in outputs:
if out is not None and chainer.backend._contains_nan(out):
msg = ('NaN is detected on forward computation of '
'{}'.format(self.label))
raise RuntimeError(msg)
self._output_count = len(outputs)
if self._is_chainerx_fallback_mode:
ret = self._chainerx_apply_fallback_postprocess(
chainerx_device,
chainerx_in_data, inputs, outputs)
else:
requires_grad = any([x.requires_grad for x in input_vars])
ret = tuple(
[variable.Variable(
y, requires_grad=requires_grad, layout=layout)
for y, layout in zip(outputs, self.output_layouts)])
if configuration.config.enable_backprop:
# Topological ordering
self.rank = max(
[x.rank for x in input_vars]) if input_vars else 0
# Add backward edges
for y in ret:
y.creator_node = self
self.inputs = tuple([x.node for x in input_vars])
# Add forward edges (must be weak references)
self.outputs = tuple([weakref.ref(y.node) for y in ret])
if self._input_indexes_to_retain is not None:
for index in self._input_indexes_to_retain:
input_vars[index].retain_data()
if self._output_indexes_to_retain is not None:
retained_data = []
for index in self._output_indexes_to_retain:
ret[index].retain_data()
retained_data.append(outputs[index])
self._retained_output_data = tuple(retained_data)
self.lazy_grad_sum = configuration.config.lazy_grad_sum
return ret
def _check_data_type_forward(self, in_data):
in_layouts = self.input_layouts
in_shapes = None
if any([layout is not None for layout in in_layouts]):
in_shapes = tuple([
chainer.memory_layouts._transpose_shape(x.shape, layout, None)
for x, layout in zip(in_data, in_layouts)])
in_type = type_check.get_light_types(in_data, shapes=in_shapes)
try:
with type_check.light_mode:
self.check_type_forward(in_type)
return
except type_check.InvalidType:
# Ignore errors on first run
pass
in_type = type_check.get_types(
in_data, 'in_types', False, shapes=in_shapes)
with type_check.get_function_check_context(self):
self.check_type_forward(in_type)
def check_type_forward(self, in_types):
"""Checks types of input data before forward propagation.
This method is called before :meth:`forward` and validates the types of
input variables using
:ref:`the type checking utilities <type-check-utils>`.
Args:
in_types (~chainer.utils.type_check.TypeInfoTuple): The type
information of input variables for :meth:`forward`.
"""
pass
def check_layout_forward(self, inputs):
if self.is_elementwise:
if not all([x.layout == inputs[0].layout for x in inputs]):
raise RuntimeError(
'Inputs with mixed memory layouts were given to '
'an elementwise function.\n'
'Function: {}\n'
'Input layouts: {}\n'.format(
self.label,
', '.join(str(x.layout) for x in inputs),
))
else:
if not all([x.layout is None for x in inputs]):
raise RuntimeError(
'Inputs with non-standard layouts were given to '
'a function without explicit `check_layout_forward` '
'implementation.\n'
'Function: {}\n'
'Input layouts: {}\n'.format(
self.label,
', '.join(str(x.layout) for x in inputs),
))
def _chainerx_apply_fallback_preprocess(self, in_data, inputs):
chainerx_in_data = in_data
in_data = []
device = None
for data, x in six.moves.zip(chainerx_in_data, inputs):
if data is None:
fallback_data = None
else:
# Use the cached fallback arrays as inputs if they exist.
x_is_variable = isinstance(x, variable.Variable)
if x_is_variable and x._chainerx_fallback_array is not None:
fallback_data = x._chainerx_fallback_array
if device is None:
device = x.device
else:
fallback_data = backend.from_chx(data)
if device is None:
device = backend.ChainerxDevice(data.device)
# Update the fallback cache if possible.
if x_is_variable:
x._chainerx_fallback_array = fallback_data
in_data.append(fallback_data)
in_data = tuple(in_data)
return chainerx_in_data, in_data, device
def _chainerx_apply_fallback_postprocess(
self, chainerx_device, chainerx_in_data, inputs, outputs):
# TODO(hvy): Take configuration.config.enable_backprop into
# account?
chainerx_out_data = chainerx_device.send(outputs)
# Insert a ChainerX op-node that calls FunctionNode.backward in
# backprop. Note that chainerx_out_data may not require gradients.
chainerx._core._function_node_forward(
self, chainerx_in_data, chainerx_out_data,
[] if self._input_indexes_to_retain is None
else self._input_indexes_to_retain,
[] if self._output_indexes_to_retain is None
else self._output_indexes_to_retain)
self.inputs = tuple([
None if x is None
else variable._ChainerxVariableNodeProps(x) for x in inputs])
ret = tuple([
_to_variable_with_chainerx_fallback_array(
chainerx_device,
chainerx_out_array, out_array)
for chainerx_out_array, out_array
in six.moves.zip(chainerx_out_data, outputs)])
return ret
def forward_chainerx(self, inputs):
"""Computes the output arrays from the input ChainerX arrays.
This method may check the input arrays and other attributes to see
if the computation can be done using ChainerX implementation.
If it's not supported, :data:`chainer.Fallback` should be returned
instead of output arrays. In that case, computation using conventional
Python implementation will be performed.
Args:
inputs: Tuple of input array(s).
Returns:
Tuple of output array(s) or :data:`chainer.Fallback`\\ .
"""
return chainer.Fallback
def forward(self, inputs):
"""Computes the output arrays from the input arrays.
It delegates the procedure to :meth:`forward_cpu` or
:meth:`forward_gpu` by default. Which of them this method selects is
determined by the type of input arrays. Implementations of
:class:`FunctionNode` must implement either CPU/GPU methods or this
method.
Args:
inputs: Tuple of input array(s).
Returns:
Tuple of output array(s).
.. warning::
Implementations of :class:`FunctionNode` must take care that the
return value must be a tuple even if it returns only one array.
"""
assert len(inputs) > 0
if isinstance(inputs[0], cuda.ndarray):
return self.forward_gpu(inputs)
return self.forward_cpu(inputs)
def forward_cpu(self, inputs):
"""Computes the output arrays from the input NumPy arrays.
Args:
inputs: Tuple of input :class:`numpy.ndarray` objects.
Returns:
Tuple of output arrays. Each element can be NumPy or CuPy arrays.
.. warning::
Implementation of :class:`FunctionNode` must take care that the
return value must be a tuple even if it returns only one array.
"""
raise NotImplementedError
def forward_gpu(self, inputs):
"""Computes the output arrays from the input CuPy arrays.
Args:
inputs: Tuple of input :class:`cupy.ndarray` objects.
Returns:
Tuple of output arrays. Each element can be NumPy or CuPy arrays.
.. warning::
Implementation of :class:`FunctionNode` must take care that the
return value must be a tuple even if it returns only one array.
"""
raise NotImplementedError
@property
def input_layouts(self):
assert self._input_layouts is not None
return self._input_layouts
@property
def output_layouts(self):
assert self._output_layouts is not None
return self._output_layouts
@output_layouts.setter
def output_layouts(self, layouts):
assert isinstance(layouts, tuple)
self._output_layouts = layouts
def retain_inputs(self, indexes):
"""Lets specified input variable nodes keep data arrays.
By calling this method from :meth:`forward`, the function node can
specify which inputs are required for backprop. The input variables
with retained arrays can then be obtained by calling
:meth:`get_retained_inputs` from inside :meth:`backward`.
Unlike :class:`~chainer.Function`, the function node **DOES NOT** keep
input
arrays by default. If you want to keep some or all input arrays, do not
forget to call this method.
Note that **this method must not be called from the outside of**
:meth:`forward`.
Args:
indexes (iterable of int): Indexes of input variables that the
function will require for backprop.
"""
self._input_indexes_to_retain = indexes
def retain_outputs(self, indexes):
"""Lets specified output variable nodes keep data arrays.
By calling this method from :meth:`forward`, the function node can
specify which outputs are required for backprop. If this method is not
called, no output variables will be marked to keep their data array at
the point of returning from :meth:`apply`. The output variables with
retained arrays can then be obtained by calling
:meth:`get_retained_outputs` from inside :meth:`backward`.
.. note::
It is recommended to use this method if the function requires some
or all output arrays in backprop. The function can also use output
arrays just by keeping references to them directly, although it
might affect the performance of later function applications on the
output variables.
Note that **this method must not be called from the outside of**
:meth:`forward`.
Args:
indexes (iterable of int): Indexes of output variables that the
function will require for backprop.
"""
self._output_indexes_to_retain = indexes
def backward(self, target_input_indexes, grad_outputs):
"""Computes gradients w.r.t.\\ specified inputs given output gradients.
This method is used to compute one step of the backpropagation
corresponding to the forward computation of this function node.
Given the gradients w.r.t. output variables, this method computes the
gradients w.r.t. specified input variables. Note that this method does
not need to compute any input gradients not specified by
``target_input_indices``.
Unlike :meth:`Function.backward() <chainer.Function.backward>`,
gradients are given as :class:`~chainer.Variable` objects and this
method itself has to return input gradients as
:class:`~chainer.Variable` objects. It enables the function node to
return the input gradients with the full computational history, in
which case it supports *differentiable backpropagation* or
*higher-order differentiation*.
The default implementation returns ``None`` s, which means the
function is not differentiable.
Args:
target_input_indexes (tuple of int): Sorted indices of the input
variables w.r.t. which the gradients are required. It is
guaranteed that this tuple contains at least one element.
grad_outputs (tuple of :class:`~chainer.Variable`\\ s): Gradients
w.r.t. the output variables.
If the gradient w.r.t. an output variable is not
given, the corresponding element is ``None``.
Returns:
Tuple of variables that represent the gradients w.r.t. specified
input variables. The length of the tuple can be same as either
``len(target_input_indexes)`` or the number of inputs. In the
latter case, the elements not specified by ``target_input_indexes``
will be discarded.
.. seealso::
:meth:`backward_accumulate` provides an alternative interface that
allows you to implement the backward computation fused with the
gradient accumulation.
"""
return (None,) * len(target_input_indexes)
def backward_accumulate(self, target_input_indexes, grad_outputs,
grad_inputs):
"""Computes gradients w.r.t.\\ specified inputs and accumulates them.
This method provides a way to fuse the backward computation and the
gradient accumulations in the case that the multiple functions are
applied to the same variable.
Users have to override either of this method or :meth:`backward`.
It is often simpler to implement :meth:`backward` and is recommended
if you do not need to provide efficient gradient accumulation.
Args:
target_input_indexes (tuple of int): Sorted indices of the input
variables w.r.t. which the gradients are required. It is
guaranteed that this tuple contains at least one element.
grad_outputs (tuple of Variable): Gradients w.r.t. the output
variables. If the gradient w.r.t. an output variable is not
given, the corresponding element is ``None``.
grad_inputs (tuple of Variable): Gradients w.r.t. the input
variables specified by ``target_input_indexes``. These values
are computed by other computation paths. If there is no
gradient value existing for the variable, the corresponding
element is ``None``. See also the note below.
Returns:
Tuple of variables that represent the gradients w.r.t. specified
input variables. Unlike :meth:`backward`, the length of the tuple
**must** be same as that of ``target_input_indices``.
.. note::
Gradient variables in ``grad_outputs`` are distinct, even if a
variable is passed to multiple input arguments of the function.
This is an implementation-detail convention to avoid the
complication of correctly accumulating gradients in such a case.
Usually, only the first position of ``grad_inputs`` corresponding to
these input arguments may contain the gradient variable
corresponding to that input variable, and other entries are set to
``None``. This is not the case with the ``lazy_grad_sum`` feature.
This behavior might be changed in a future version.
"""
# If backward_accumulate is implemented, it should be equivalent to
# the following code using backward(). This code is provided for the
# convenience, and it's *not* used unless you override it. You don't
# have to use backward().
assert isinstance(target_input_indexes, tuple)
assert isinstance(grad_outputs, tuple)
assert isinstance(grad_inputs, tuple)
gxs = self._backward_target_inputs(target_input_indexes, grad_outputs)
return tuple([gx if g_input is None else
g_input if gx is None else
gx + g_input
for gx, g_input in six.moves.zip(gxs, grad_inputs)])
def _backward_chainerx(self, target_input_indexes, grad_outputs,
retained_inputs, retained_outputs):
# Backward wrapper that is called from C++ via a Python binding in case
# self.apply was called with chainerx.ndarrays.
assert self._is_chainerx_fallback_mode
assert len(target_input_indexes) > 0
assert (
(self._input_indexes_to_retain is None
and len(retained_inputs) == 0)
or (len(self._input_indexes_to_retain) == len(retained_inputs)))
assert (
(self._output_indexes_to_retain is None
and len(retained_outputs) == 0)
or (len(self._output_indexes_to_retain) == len(retained_outputs)))
assert all([
a is None or isinstance(a, chainerx.ndarray)
for a in grad_outputs])
self._chainerx_retained_inputs = tuple([
None if array is None
else variable.Variable(
array, requires_grad=array.is_backprop_required())
for array in retained_inputs])
self._chainerx_retained_outputs = tuple([
None if array is None
else variable.Variable(
array, requires_grad=(
False if array is None else array.is_backprop_required()))
for array in retained_outputs])
device = backend.get_device_from_array(
*(retained_inputs + retained_outputs + grad_outputs))
with chainer.using_device(device):
gxs = self._backward_target_inputs(
tuple(target_input_indexes),
tuple([
None
if gy is None
else chainer.Variable(
gy, requires_grad=gy.is_backprop_required())
for gy in grad_outputs]))
gx_arrs = [gx._data[0] for gx in gxs]
assert all([isinstance(gx, chainerx.ndarray) for gx in gx_arrs])
return gx_arrs
def _backward_target_inputs(self, target_input_indexes, grad_outputs):
# Filters out input gradients that are not required and returns the
# rest.
assert all([
gy is None or yl == gy.layout
for yl, gy in zip(self.output_layouts, grad_outputs)])
gxs = self.backward(target_input_indexes, grad_outputs)
len_gxs = len(gxs)
if len_gxs == len(self.inputs):
gxs = tuple([gxs[i] for i in target_input_indexes])
else:
assert len_gxs == len(target_input_indexes)
return gxs
def _get_error_message(self, message):
lines = [
message,
' function={} ({})'.format(self._impl_name, self.label)
]
if self.inputs:
for i, input in enumerate(self.inputs):
lines.append(
' input {}: shape={} dtype={}'.format(
i, input.shape, input.dtype))
if self.outputs:
for i, output_ref in enumerate(self.outputs):
output = output_ref()
if output is None:
lines.append(
' output {}: not available')
else:
lines.append(
' output {}: shape={} dtype={}'.format(
i, output.shape, output.dtype))
return '\n'.join(lines)
def get_retained_inputs(self):
"""Returns a tuple of retained input variables.
This method is used to retrieve the input variables retained in
:meth:`forward`.
Returns:
A tuple of retained input variables, if available. Otherwise
return `None`.
"""
if self._is_chainerx_fallback_mode:
return self._chainerx_retained_inputs
if self._input_indexes_to_retain is None or self.inputs is None:
return ()
retained_inputs = []
for index in self._input_indexes_to_retain:
input = self.inputs[index]
if input.data is None:
retained_inputs.append(None)
else:
retained_inputs.append(input.get_variable())
return tuple(retained_inputs)
def get_retained_outputs(self):
"""Returns a tuple of retained output variables.
This method is used to retrieve the output variables retained in
:meth:`forward`.
Returns:
A tuple of retained output variables, if available. Otherwise
return `None`.
.. note::
This method does a tricky thing to support the case of an output
node garbage-collected before this method is called; in this case,
this method creates a fresh variable node that acts as an output
node of the function node.
"""
if self._is_chainerx_fallback_mode:
return self._chainerx_retained_outputs
if self._output_indexes_to_retain is None or self.outputs is None:
return ()
# TODO(hvy): It should be safe to remove this check.
if self._retained_output_data is None:
raise ValueError(self._get_error_message(
'retain_outputs is not called in forward.'))
ret = []
outputs = self.outputs
new_outputs = list(outputs)
outputs_modified = False
for index, data in six.moves.zip(self._output_indexes_to_retain,
self._retained_output_data):
output = outputs[index]()
if output is None:
# The output node is garbage collected, so create a fresh
# Variable object.
output_var = variable.Variable(data)
output_var.creator_node = self
new_outputs[index] = weakref.ref(output_var.node)
outputs_modified = True
else:
output_var = output.get_variable()
if output_var.raw_array is None:
ret.append(None)
else:
ret.append(output_var)
if outputs_modified:
self.outputs = tuple(new_outputs)
return tuple(ret)
def unchain(self):
"""Purges in/out nodes and this function node itself from the graph."""
if self._is_chainerx_fallback_mode:
raise NotImplementedError(
'Unchaining is not yet supported in ChainerX fallback mode.')
for y in self.outputs:
y_ref = y()
if y_ref is not None:
y_ref.unchain()
self.inputs = None
self.outputs = None
def add_hook(self, hook, name=None):
"""Registers a function hook.
Args:
hook (~chainer.FunctionHook): Function hook to be
registered.
name (str): Name of the function hook. The name must be unique
among function hooks registered to this function. If ``None``,
the default name of the function hook is used.
"""
if not isinstance(hook, function_hook.FunctionHook):
raise TypeError('Hook must be of type FunctionHook')
if name is None:
name = hook.name
hooks = self.local_function_hooks
if name in hooks:
raise KeyError('Hook %s already exists' % name)
hooks[name] = hook
hook.added(self)
def delete_hook(self, name):
"""Unregisters the function hook.
Args:
name (str): The name of the function hook to be unregistered.
"""
if name in self.local_function_hooks:
self.local_function_hooks[name].deleted(self)
del self.local_function_hooks[name]
else:
raise KeyError('Hook %s does not exist' % name)
def grad(outputs, inputs, grad_outputs=None, grad_inputs=None, set_grad=False,
retain_grad=False, enable_double_backprop=False, loss_scale=None):
"""Computes the gradient of output variables w.r.t.\\ the input variables.
This function implements the backpropagation algorithm. While
:meth:`Variable.backward` also implements backprop, this function selects
the smallest paths in the computational graph needed to compute the
gradients w.r.t. inputs. The error is backpropagated only through these
selected paths, which may reduce the overall computational cost.
This function also differs from :meth:`Variable.backward` in the way to
return the gradients; it directly returns the gradient variables as a list
instead of setting gradients to the :attr:`Variable.grad_var` attribute of
the original variable. It means users do not need to clear the gradient
w.r.t. each variable before computing the gradient using this function.
If ``set_grad`` option is set to ``True``, the computed gradient is also
stored in the :attr:`Variable.grad_var` attribute of each variable, in
which case any original value of :attr:`Variable.grad_var` will be updated
even if it had already been set.
Args:
outputs (tuple or list of :class:`~chainer.Variable`):
A sequence of output variables from which backprop starts.
inputs (tuple or list of :class:`~chainer.Variable`):
A sequence of input variables each of which this function computes
the gradient w.r.t.
grad_outputs (tuple or list of :class:`~chainer.Variable` or None):
A sequence of variables that gives the initial value of each output
gradient.
If an element is set to ``None``, an array filled with 1 is used.
If this argument itself is ``None``, it is treated as a sequence of
``None``\\ s.
grad_inputs (tuple or list of :class:`~chainer.Variable` or None):
A sequence of variables that gives the initial value of each input
gradient. The gradients computed by the backprop
algorithm are accumulated to them (not in-place). If an element
is set to ``None``, the gradient is not accumulated to this value.
If this argument itself is ``None``, it is treated as a sequence of
``None``\\ s.
set_grad (bool): If it is ``True``, the :attr:`Variable.grad_var`
attribute of each input variable is set to the corresponding
computed gradient variable.
retain_grad (bool): If it is ``True``, the gradients w.r.t. all the
intermediate variables are stored in the :attr:`Variable.grad_var`
attribute. In this case, the ``set_grad`` option is ignored.
enable_double_backprop (bool): If it is ``True``, the computed
gradients can be further backpropagated. Enabling it may increase
the memory consumption (and possibly the computational time) to
remember the intermediate gradient values for the second
backpropagation.
loss_scale (float): Loss scaling factor. Loss scaling is a useful
technique to mitigate vanishing gradient issue that tends to happen
when low precision data type like float16 is used during training.
If you set loss scaling factor, gradients of loss values are to be
multiplied by the factor before backprop starts. The factor is
propagated to whole gradients in a computational graph along the
backprop. The gradients of parameters are divided by the factor
just before the parameters are to be updated.
Returns:
A list of gradient variables w.r.t. the inputs.
"""
if not isinstance(outputs, (tuple, list)):
raise TypeError(
'outputs must be a tuple or a list, not {}.'.format(type(outputs)))
if not isinstance(inputs, (tuple, list)):
raise TypeError(
'inputs must be a tuple or a list, not {}.'.format(type(inputs)))
if grad_outputs is not None:
if not isinstance(grad_outputs, (tuple, list)):
raise TypeError(
'grad_outputs must be a tuple or a list or None, not {}.'
.format(type(grad_outputs)))
if len(outputs) != len(grad_outputs):
raise ValueError(
'grad_outputs must be of the same length as outputs.\n'
'len(outputs) = {}, len(grad_outputs) = {}'
.format(len(outputs), len(grad_outputs)))
if grad_inputs is not None:
if not isinstance(grad_inputs, (tuple, list)):
raise TypeError(
'grad_inputs must be a tuple or a list or None, not {}.'
.format(type(grad_inputs)))
if len(inputs) != len(grad_inputs):
raise ValueError(
'grad_inputs must be of the same length as inputs.\n'
'len(inputs) = {}, len(grad_inputs) = {}'
.format(len(inputs), len(grad_inputs)))
# Check if all the inputs are chainerx arrays and if so
# Relies in chainerx.grad function
n_chx_inputs = sum([False if x is None else x._has_chainerx_array
for x in inputs])
if n_chx_inputs == len(inputs):
# Need to access the arrays to invoke the chainer grad function
if grad_outputs:
grad_outputs_chx = [x._data[0] for x in grad_outputs]
else:
grad_outputs_chx = []
outputs_chx = [x._data[0] for x in outputs]
inputs_chx = [x._data[0] for x in inputs]
# pybind has issues when converting opt<int> -> opt<float>
if loss_scale is not None:
loss_scale = float(loss_scale)
grads = chainerx.grad(outputs_chx, inputs_chx,
backprop_id=None,
enable_double_backprop=enable_double_backprop,
set_grad=set_grad,
retain_grad=retain_grad,
grad_outputs=grad_outputs_chx,
loss_scale=loss_scale)
if grad_inputs:
grads = [g+gi._data[0] for g, gi in zip(grads, grad_inputs)]
return [variable.Variable(g, requires_grad=g.is_backprop_required())
for g in grads]
elif n_chx_inputs > 0:
raise TypeError(
'Mixing chainerx and non-chainerx variables is not allowed')
for v in outputs:
# Raise error here if v is created by Function.backward.
# In such case, we don't know exact inputs of the creator.
v.node._check_old_style_gradient()
# The implementation consists of three steps.
# 1. Backward enumeration: all the nodes reachable backward from the output
# nodes are enumerated. The forward direction links are collected in
# this step. Note that the variable nodes whose requires_grad is false
# are ignored and their creators are not searched.
candidate_funcs = [v.creator_node for v in outputs
if v.creator_node is not None]
visited_funcs = set()
forward_graph = collections.defaultdict(list)
while candidate_funcs:
func = candidate_funcs.pop()
if func in visited_funcs:
continue
visited_funcs.add(func)
for x in func.inputs:
# Raise error here if x is created by Function.backward.
# In such case, we don't know exact inputs of the creator.
x._check_old_style_gradient()
if not x.requires_grad:
continue
forward_graph[x].append(func)
creator = x.creator_node
if creator is not None and creator not in visited_funcs:
candidate_funcs.append(creator)
# 2. Forward enumeration: all the nodes in the subgraph reachable from the
# input nodes are enumerated. The extracted (sub-)subgraph is the union
# of all paths that backpropagation will visit.
candidate_vars = [x.node for x in inputs]
visited_funcs = set()
grad_required = set()
while candidate_vars:
x = candidate_vars.pop()
grad_required.add(x)
for func in forward_graph[x]:
if func in visited_funcs:
continue
visited_funcs.add(func)
for y_ref in func.outputs:
y = y_ref()
if y is not None and y in forward_graph:
candidate_vars.append(y)
# 3. Backpropagation: the backpropagation is executed along the
# (sub-)subgraph. It uses the topological order of the subgraph which is
# induced by the reversed order of function applications ("rank").
grads = _backprop_utils.GradTable()
# Initialize the gradient mapping.
if grad_outputs is None:
grad_outputs = (None,) * len(outputs)
for y, gy in zip(outputs, grad_outputs):
if gy is None:
with chainer.using_device(y.device):
gy_data = y.device.xp.ones_like(y.array)
gy = variable.Variable(gy_data, requires_grad=False)
if loss_scale is not None:
gy.data *= loss_scale
grads[y.node] = gy
if grad_inputs is not None:
for x, gx in zip(inputs, grad_inputs):
if gx is not None:
grads[x.node] = gx
# Backprop implementation. It edits grads which will only contain the
# gradients w.r.t. the inputs.
with chainer.using_config('enable_backprop', enable_double_backprop):
ret_dict = _backprop(
outputs, inputs, grad_required, retain_grad, grads, loss_scale)
# Extract the gradients w.r.t. the inputs and return them.
ret = [ret_dict[x.node] for x in inputs]
if set_grad:
for x, gx in zip(inputs, ret):
x.grad_var = gx
return ret
def _backprop(outputs, inputs, grad_required, retain_grad, grads, loss_scale):
candidate_funcs, push_candidate, pop_candidate = _get_ordered_func_heap()
for y in outputs:
creator = y.creator_node
if creator is not None:
push_candidate(creator)
input_nodes = set(x.node for x in inputs)
ret_dict = {}
is_debug = chainer.is_debug()
base_hooks = chainer.get_function_hooks().values()
while candidate_funcs:
func = pop_candidate()
# Collect the gradients w.r.t. the outputs
ys = [y() for y in func.outputs] # access via weak ref
gys = tuple([grads.pop(y)
if y is not None and y.creator_node is not None else None
for y in ys])
for node, gy in six.moves.zip(ys, gys):
if node is not None:
if node in input_nodes:
ret_dict[node] = gy
if retain_grad:
y = node.get_variable_or_none()
if y is not None:
y.grad_var = gy
y._loss_scale = loss_scale
# Collect the gradients w.r.t. the inputs
input_indexes = []
x_grads = collections.OrderedDict()
for i, x in enumerate(func.inputs):
if x not in grad_required:
continue
input_indexes.append(i)
if x not in x_grads:
x_grads[x] = grads.get_as_list(x)
if not input_indexes:
continue
input_indexes = tuple(input_indexes)
# Do backward
# Call pre-backward hooks
if func._n_local_function_hooks != 0:
local_hooks = collections.OrderedDict(chainer.get_function_hooks())
local_hooks.update(func.local_function_hooks)
hooks = local_hooks.values() # avoid six for performance
else:
hooks = base_hooks
in_data = [x.data for x in func.inputs]
out_grad_data = [None if g is None else g.data for g in gys]
with chainer.using_device(backend.get_device_from_array(*in_data)):
for hook in hooks:
hook.backward_preprocess(
func, tuple(in_data), tuple(out_grad_data))
_backprop_utils.backprop_step(func, input_indexes, gys, x_grads,
is_debug)
# Call post-backward hooks
for hook in hooks:
hook.backward_postprocess(
func, tuple(in_data), tuple(out_grad_data))
# Update grads
for node, g in x_grads.items():
if not g: # gradient == None
continue
creator = node.creator_node
if creator is not None:
push_candidate(creator)
for x in input_nodes:
if x not in ret_dict:
ret_dict[x] = grads.pop(x)
return ret_dict
def _extract_apply_in_data(inputs):
# Extracts arrays from FunctionNode.apply() inputs.
#
# A flag that indicates whether inputs are chainerx arrays is also
# returned.
#
# Each object in `inputs` may be `Variable` or an array.
# If it's a `Variable` and its underlying array is a chainerx array,
# `Variable._data[0]` (which is backproppable in contrast to
# `Variable.array`) is returned.
#
# If at least one of the arrays is a ChainerX array, all other
# arrays need to be ChainerX arrays.
if not inputs:
return False, ()
if chainerx.is_available():
has_chainerx_array = False
# Unwrap arrays
arrays = []
for x in inputs:
if isinstance(x, variable.Variable):
arrays.append(x._data[0])
if x._has_chainerx_array:
has_chainerx_array = True
else: # x is ndarray
arrays.append(x)
if not has_chainerx_array:
if isinstance(x, chainerx.ndarray):
has_chainerx_array = True
return has_chainerx_array, tuple(arrays)
else:
return False, tuple([
x.raw_array if isinstance(x, variable.Variable) else x
for x in inputs])
def _get_ordered_func_heap():
heap = []
visited_funcs = set()
def push_heap(func):
if func not in visited_funcs:
# Negate since heapq is min-heap
# The second element is used to make each item unique
ordered_func = -func.rank, len(visited_funcs), func
visited_funcs.add(func)
heapq.heappush(heap, ordered_func)
def pop_heap():
_, _, func = heapq.heappop(heap)
return func
return heap, push_heap, pop_heap
def _make_chainerx_attribute_fallback_class(obj, device):
# Creates a fabricated class based on a concerete class
# (either FunctionNode or Function),
# equipped with the automatic attribute fallback. This is enabled
# during FunctionNode.forward(), Function.forward() and
# Function.backward().
#
# In the fallback mechanism, when an array with the fallback ndarray
# type (e.g. numpy.ndarray for ChainerX native devices) is assigned
# as an attribute, it's automatically converted to a ChainerX ndarray
# with the corresponding ChainerX device and stored in that form.
# Conversely, when an attribute with ChainerX ndarray type is queried,
# it's converted to the fallback ndarray before being returned.
# That way, concrete function implementations can use attributes
# as ndarray storage, without converting from/to ChainerX manually.
#
# Note that it works only if the attribute has an ndarray type. If the
# array is wrapped in a tuple, for example, no automatic conversion
# will be taken place.
fallback_device = device.fallback_device
sup = super(obj.__class__, obj)
# Cache to avoid converting same arrays multiple times
fallback_array_cache = {}
# self.__getattribute__ for fallback arrays
def getattribute(self, name):
value = sup.__getattribute__(name)
if isinstance(value, chainerx.ndarray):
fallback_arr = fallback_array_cache.get(name)
if fallback_arr is None:
fallback_arr = backend.from_chx(value)
fallback_array_cache[name] = fallback_arr
return fallback_arr
return value
# self.__setattr__ for fallback arrays
def setattr(self, name, value):
if isinstance(value, fallback_device.xp.ndarray):
fallback_array_cache[name] = value
sup.__setattr__(name, backend.to_chx(value))
return
sup.__setattr__(name, value)
# Return a fabricated FunctionNode class
new_class = type(
obj.__class__.__name__,
inspect.getmro(obj.__class__),
{
'__getattribute__': getattribute,
'__setattr__': setattr,
})
return new_class
@contextlib.contextmanager
def _chainerx_attribute_fallback(obj, chainerx_device):
old_class = obj.__class__
obj.__class__ = _make_chainerx_attribute_fallback_class(
obj, chainerx_device)
try:
yield
finally:
obj.__class__ = old_class
| 56,952
| 38.799441
| 80
|
py
|
chainer
|
chainer-master/chainer/__init__.py
|
from __future__ import absolute_import
import collections
import os
import threading
import warnings as builtin_warnings
import numpy
from chainer import _version
from chainer import backends # NOQA
from chainer import dataset # NOQA
from chainer import datasets # NOQA
from chainer import distributions # NOQA
from chainer import function_hooks # NOQA
from chainer import functions # NOQA
from chainer import graph_optimizations # NOQA
from chainer import initializers # NOQA
from chainer import iterators # NOQA
from chainer import links # NOQA
from chainer import optimizers # NOQA
from chainer import serializers # NOQA
from chainer import training # NOQA
from chainer import variable # NOQA
from chainer import warnings # NOQA
# import class and function
# These functions from backends.cuda are kept for backward compatibility
from chainer._backprop import backward # NOQA
from chainer._runtime_info import print_runtime_info # NOQA
from chainer.backend import get_device # NOQA
from chainer.backend import using_device # NOQA
from chainer.backends.cuda import should_use_cudnn # NOQA
from chainer.backends.cuda import should_use_cudnn_tensor_core # NOQA
from chainer.configuration import config # NOQA
from chainer.configuration import global_config # NOQA
from chainer.configuration import using_config # NOQA
from chainer.device_resident import DeviceResident # NOQA
from chainer.distribution import cross_entropy # NOQA
from chainer.distribution import Distribution # NOQA
from chainer.distribution import kl_divergence # NOQA
from chainer.distribution import register_kl # NOQA
from chainer.function import force_backprop_mode # NOQA
from chainer.function import Function # NOQA
from chainer.function import FunctionAdapter # NOQA
from chainer.function import no_backprop_mode # NOQA
from chainer.function_hook import FunctionHook # NOQA
from chainer.function_node import FunctionNode # NOQA
from chainer.function_node import grad # NOQA
from chainer.functions import array # NOQA
from chainer.functions.math import basic_math # NOQA
from chainer.graph_optimizations.static_graph import static_graph # NOQA
from chainer.graph_optimizations.static_graph_utilities import static_code # NOQA
from chainer.initializer import Initializer # NOQA
from chainer.link import Chain # NOQA
from chainer.link import ChainList # NOQA
from chainer.link import Link # NOQA
from chainer.link_hook import LinkHook # NOQA
from chainer.optimizer import GradientMethod # NOQA
from chainer.optimizer import Optimizer # NOQA
from chainer.optimizer import UpdateRule # NOQA
from chainer.reporter import DictSummary # NOQA
from chainer.reporter import get_current_reporter # NOQA
from chainer.reporter import report # NOQA
from chainer.reporter import report_scope # NOQA
from chainer.reporter import Reporter # NOQA
from chainer.reporter import Summary # NOQA
from chainer.sequential import Sequential # NOQA
from chainer.serializer import AbstractSerializer # NOQA
from chainer.serializer import Deserializer # NOQA
from chainer.serializer import Serializer # NOQA
from chainer.variable import as_array # NOQA
from chainer.variable import as_variable # NOQA
from chainer.variable import Parameter # NOQA
from chainer.variable import Variable # NOQA
# Alias for backward compatibility
from chainer import cuda # NOQA
from chainer import _environment_check
import chainerx
# Introduce an alias that cannot be declared at the original place due to
# circular imports.
import chainer.utils.walker_alias
chainer.utils.WalkerAlias = chainer.utils.walker_alias.WalkerAlias
del chainer
# Check environment conditions
_environment_check.check()
__version__ = _version.__version__
_thread_local = threading.local()
_array_types = None
_cpu_array_types = None
# Used in chainer.FunctionNode.forward_chainerx().
# This value is returned to indicate that the function does not support forward
# computation in ChainerX implementation with given input arrays and other
# arguments.
class _FallbackType(object):
def __repr__(self):
return 'Fallback'
Fallback = _FallbackType()
def get_function_hooks():
try:
ret = _thread_local.function_hooks
except AttributeError:
ret = collections.OrderedDict()
_thread_local.function_hooks = ret
return ret
def _get_link_hooks():
try:
ret = _thread_local.link_hooks
except AttributeError:
ret = collections.OrderedDict()
_thread_local.link_hooks = ret
return ret
def _load_array_types():
# Note: this function may not be protected by GIL because of external
# calls.
global _array_types
global _cpu_array_types
if _array_types is None:
array_types = [numpy.ndarray]
cpu_array_types = [numpy.ndarray]
if backends.cuda.available:
array_types.append(backends.cuda.ndarray)
if backends.intel64.is_ideep_available():
array_types.append(backends.intel64.mdarray)
cpu_array_types.append(backends.intel64.mdarray)
if chainerx.is_available():
array_types.append(chainerx.ndarray)
cpu_array_types.append(chainerx.ndarray)
array_types = tuple(array_types)
cpu_array_types = tuple(cpu_array_types)
_array_types = array_types
_cpu_array_types = cpu_array_types
def get_array_types():
_load_array_types()
return _array_types
def get_cpu_array_types():
_load_array_types()
return _cpu_array_types
# TODO(hvy): Move this function to backend?
def is_arrays_compatible(arrays):
# Do not use this function to check if a single object is an array or
# not. Use isinstance(obj, chainer.get_array_types()) instead.
arrays = [a for a in arrays if a is not None]
if not arrays:
return True
# If there's at least one chainerx.ndarray, all other arrays
# must be chainerx as well
are_chainerx = [isinstance(arr, chainerx.ndarray) for arr in arrays]
if chainerx.is_available() and any(are_chainerx):
return all(are_chainerx)
if isinstance(arrays[0], backends.cuda.ndarray):
types = backends.cuda.ndarray
else:
types = get_cpu_array_types()
return all([isinstance(a, types) for a in arrays])
class _Mixed16(object):
dtype = numpy.dtype(numpy.float16)
def __repr__(self):
return "dtype('mixed16')"
mixed16 = _Mixed16()
"""Dtype-like object that represents 16/32 bits mixed precision float."""
global_config.debug = bool(int(os.environ.get('CHAINER_DEBUG', '0')))
global_config.cudnn_deterministic = False
global_config.warn_nondeterministic = False
global_config.enable_backprop = True
global_config.keep_graph_on_report = bool(int(
os.environ.get('CHAINER_KEEP_GRAPH_ON_REPORT', '0')))
global_config.train = True
global_config.type_check = bool(int(os.environ.get('CHAINER_TYPE_CHECK', '1')))
global_config.use_cudnn = os.environ.get('CHAINER_USE_CUDNN', 'auto')
global_config.use_cudnn_tensor_core = 'auto'
global_config.autotune = False
global_config.schedule_func = None
global_config.use_static_graph = True
global_config.use_ideep = os.environ.get('CHAINER_USE_IDEEP', 'never')
global_config.lazy_grad_sum = bool(int(
os.environ.get('CHAINER_LAZY_GRAD_SUM', '0')))
global_config.cudnn_fast_batch_normalization = bool(int(
os.environ.get('CHAINER_CUDNN_FAST_BATCH_NORMALIZATION', '0')))
_chainer_dtype = os.environ.get('CHAINER_DTYPE', 'float32')
if _chainer_dtype in ('float16', 'float32', 'float64'):
global_config.dtype = numpy.dtype(_chainer_dtype)
elif _chainer_dtype == 'mixed16':
global_config.dtype = mixed16
else:
raise TypeError('incorrect dtype name in CHAINER_DTYPE: "{}". '
'Only float16/32/64 are allowed.'.format(_chainer_dtype))
global_config.in_recomputing = False
global_config._will_recompute = False
global_config.compute_mode = None
def is_debug():
"""Returns if the debug mode is enabled or not in the current thread.
Returns:
bool: ``True`` if the debug mode is enabled.
"""
return bool(config.__getattr__('debug'))
def set_debug(debug):
"""Enables or disables the debug mode in the current thread.
.. note::
``chainer.set_debug(value)`` is equivalent to
``chainer.config.debug = value``.
Args:
debug (bool): New debug mode.
"""
config.debug = debug
class DebugMode(object):
"""Debug mode context.
This class provides a context manager for debug mode. When entering the
context, it sets the debug mode to the value of `debug` parameter with
memorizing its original value. When exiting the context, it sets the debug
mode back to the original value.
.. deprecated:: v2.0.0
Use :func:`chainer.using_config` instead. See :ref:`debug` for details.
Args:
debug (bool): Debug mode used in the context.
"""
def __init__(self, debug):
builtin_warnings.warn(
'chainer.DebugMode is deprecated. '
'Use chainer.using_config("debug", ...) instead.',
DeprecationWarning)
self._using = using_config('debug', debug)
def __enter__(self):
self._using.__enter__()
def __exit__(self, *args):
self._using.__exit__(*args)
def get_dtype(dtype=None, map_mixed16=None):
"""Resolves Chainer's default dtype.
Args:
dtype: Dtype specifier. If this value is specified (not ``None``),
this function returns the dtype object corresponding to it.
map_mixed16: Dtype specifier. When ``chainer.config.dtype`` is mixed16,
this option is used. If this value is ``None``, float16 is used.
Returns:
If ``dtype`` is not ``None``, it returns the dtype normalized by
``numpy.dtype()``. Otherwise, it returns ``chainer.config.dtype`` (see
:ref:`configuration`) normalized as well. When ``chainer.config.dtype``
is :data:`~chainer.mixed16` and ``map_mixed16`` is specified, it
returns the normalized version of ``map_mixed16``.
"""
if dtype is None:
dtype = config.dtype
if dtype is mixed16 and map_mixed16 is not None:
dtype = map_mixed16
return numpy.dtype(dtype)
def get_compute_mode():
return config.compute_mode
basic_math.install_variable_arithmetics()
array.get_item.install_variable_get_item()
disable_experimental_feature_warning = False
| 10,467
| 31.209231
| 82
|
py
|
chainer
|
chainer-master/chainer/_backprop.py
|
from __future__ import absolute_import
import collections
import heapq
import warnings
import six
import chainer
from chainer import _backprop_utils
from chainer import backend
from chainer.utils import argument
import chainerx
def backward(outputs, grad_outputs=None, **kwargs):
"""backward(outputs, grad_outputs=None, *, enable_double_backprop=False)
Runs backpropagation from variables simultaneously.
.. warning::
This feature is experimental. The interface can change in the future.
Args:
outputs (tuple or list of :class:`~chainer.Variable`):
A sequence of output variables from which backprop starts.
grad_outputs (None or tuple or list of :class:`~chainer.Variable`):
A sequence of variables that gives the initial value of each output
gradient.
If this argument is ``None``, backprop uses
:attr:`~chainer.Variable.grad_var` of ``outputs``.
enable_double_backprop (bool): If ``True``,
computational trace of the whole backpropagation procedure is
recorded to the computational graph so that one can further do
backpropagation from the resulting gradients. Note that
enabling it results in larger memory consumption needed to
store the gradients w.r.t intermediate variables that are
required for the second gradient computation.
.. seealso::
:meth:`chainer.Variable.backward`
:func:`chainer.grad`
"""
enable_double_backprop, = argument.parse_kwargs(
kwargs, ('enable_double_backprop', False),
retain_grad='semantics for retain_grad=True is under discussion',
loss_scale='chainer.backward does not support loss_scale option',
)
if not isinstance(outputs, (tuple, list)):
raise TypeError(
'outputs must be a tuple or a list, not {}.'.format(type(outputs)))
for v in outputs:
if not isinstance(v, chainer.Variable):
raise TypeError(
'each output must be a Variable, not {}'.format(type(v)))
if grad_outputs is not None:
if not isinstance(grad_outputs, (tuple, list)):
raise TypeError(
'grad_outputs must be None, a tuple, or a list, not {}.'
.format(type(grad_outputs)))
if len(outputs) != len(grad_outputs):
raise ValueError(
'grad_outputs must be of the same length as outputs.\n'
'len(outputs) = {}, len(grad_outputs) = {}'
.format(len(outputs), len(grad_outputs)))
is_chainerx = [v._has_chainerx_array for v in outputs]
if any(is_chainerx):
if not all(is_chainerx):
# The restriction is required as soon as the workarounds below
# are removed.
raise ValueError('cannot mix chainerx and other backends')
# Cannot use chainerx.backward directly, because it does not follow
# retain_grad=False
# TODO(kataoka): Fix chainerx.backward and remove this workaround
if grad_outputs is None:
grad_outputs = []
for y in outputs:
grad_outputs.append(y.grad_var)
y.grad_var = None
# The check is required because chainerx.backward sets default grads.
# TODO(kataoka): Fix chainerx.backward and remove this workaround
indices = [i for i, gy in enumerate(grad_outputs) if gy is not None]
outputs = [outputs[i] for i in indices]
grad_outputs = [grad_outputs[i] for i in indices]
# Use new variables to start backprop
# TODO(kataoka): Implement chainerx.backward(output, grad_outputs)
# and remove this workaround.
outputs = chainer.functions.identity(*outputs)
if not isinstance(outputs, tuple):
outputs = outputs,
grad_outputs = chainer.functions.identity(*grad_outputs)
if not isinstance(grad_outputs, tuple):
grad_outputs = grad_outputs,
# TODO(kataoka): Even after F.identity, non-float grad cannot be set.
# Move the check to elsewhere and remove this workaround.
outputs_ = []
for y, gy in zip(outputs, grad_outputs):
if not y.requires_grad and gy is not None:
warnings.warn(
'Some of grads are ignored by chainer.backward.\n'
'backend: ChainerX, '
'output.dtype: {}, grad_output.dtype: {}'.format(
y.dtype, gy.dtype),
RuntimeWarning)
continue
y.grad_var = gy
outputs_.append(y)
outputs = outputs_
del outputs_
# See also the ChainerX case of Variable.backward
arrs = []
for y in outputs:
arr = y._data[0]
assert isinstance(arr, chainerx.ndarray)
arrs.append(arr)
chainerx.backward(
arrs, enable_double_backprop=enable_double_backprop)
return
if grad_outputs is None:
grad_outputs = []
for y in outputs:
grad_var = y.grad_var
if grad_var is None:
warnings.warn(
'outputs contains a Variable without grad, or '
'duplicate outputs. Note that '
'chainer.backward does not set default grad.',
RuntimeWarning)
y.grad_var = None
grad_outputs.append(grad_var)
outputs = [
(y.node, gy) for y, gy in zip(outputs, grad_outputs) if gy is not None]
with chainer.using_config('enable_backprop', enable_double_backprop):
_backprop_to_all(outputs, False, None)
def _backprop_to_all(outputs, retain_grad, loss_scale):
"""Backprop to all input variables
Args:
outputs (list of tuple): each tuple is (y_node, y_grad_var).
y_grad_var should not be None.
retain_grad (bool): see docstring of Variable.backward
loss_scale (float): see docstring of Variable.backward
"""
OrderedDict = chainer.utils._collections.OrderedDict # fix py2 memory leak
cand_funcs = []
seen_set = set()
def add_cand(cand):
if cand not in seen_set:
# Negate since heapq is min-heap
heapq.heappush(cand_funcs, (-cand.rank, len(seen_set), cand))
seen_set.add(cand)
grads = _backprop_utils.GradTable(accumulate_grad_inputs=True)
leaf_nodes = set()
for y, gy in outputs:
grads.accumulate(y, gy)
func = y.creator_node
if func is None: # leaf
leaf_nodes.add(y)
else:
add_cand(func)
# Fix F812 (Python 2)
y = None
del y
is_debug = chainer.is_debug()
base_hooks = chainer.get_function_hooks().values()
while cand_funcs:
_, _, func = heapq.heappop(cand_funcs)
inputs = func.inputs
target_input_indexes = tuple([
i for i, x in enumerate(inputs) if x.requires_grad
])
outputs = [y() for y in func.outputs] # access via weak ref
out_grad = tuple([grads.pop(y)
if y is not None and y.creator_node is not None
else None
for y in outputs])
if not target_input_indexes:
continue
in_data = [x.data for x in inputs]
out_grad_array = [None if g is None else g.raw_array for g in out_grad]
if func._n_local_function_hooks != 0:
local_hooks = collections.OrderedDict(chainer.get_function_hooks())
local_hooks.update(func.local_function_hooks)
hooks = local_hooks.values() # avoid six for performance
else:
hooks = base_hooks
with chainer.using_device(
backend.get_device_from_array(*(in_data + out_grad_array))):
for hook in hooks:
hook.backward_preprocess(
func, tuple(in_data), tuple(out_grad_array))
# Collect the current input gradients.
target_inputs = [inputs[i] for i in target_input_indexes]
# Keep the order for the portability, rather than
# in_grad = {x: grads.get_as_list(x)
# for x in set(target_inputs)}
in_grad = OrderedDict()
for x in target_inputs:
if x not in in_grad:
in_grad[x] = grads.get_as_list(x)
_backprop_utils.backprop_step(
func, target_input_indexes, out_grad, in_grad, is_debug)
for hook in hooks:
hook.backward_postprocess(
func, tuple(in_data), tuple(out_grad_array))
if retain_grad:
# The gradients of the outputs of `func` are final. Store them if
# retain_grad=True.
for y, gy in six.moves.zip(outputs, out_grad):
if y is not None:
y._set_grad_var_if_available(gy)
del gy # to reduce memory usage
del out_grad # to reduce memory usage
for x, gx in in_grad.items():
if not gx: # gradient == None
continue
for gx_elem in gx:
if gx_elem is not None:
chainer.variable._check_grad_type(
func, x, True, gx_elem.raw_array)
del gx_elem # to reduce memory usage
if x.creator_node is None: # leaf
leaf_nodes.add(x)
else:
add_cand(x.creator_node)
del gx, in_grad # to reduce memory usage
for x in leaf_nodes:
x_var = x.get_variable_or_none()
gx = grads.pop(x)
if x_var is not None:
x_var._set_grad_var_without_check(gx)
x_var._loss_scale = loss_scale
grads.assert_no_grads()
| 9,915
| 36.560606
| 79
|
py
|
chainer
|
chainer-master/chainer/_backend.py
|
def _convert_arrays(array, func):
# Converts array or arrays
if isinstance(array, (list, tuple)):
# The same object encountered multiple times in the container is
# converted into the same object.
d = {}
ret = []
for arr in array:
if arr is None:
ret.append(None)
else:
arr2 = d.get(id(arr))
if arr2 is None:
arr2 = func(arr)
d[id(arr)] = arr2
ret.append(arr2)
return type(array)(ret)
else:
return func(array)
class _DummyContext(object):
def __enter__(self):
pass
def __exit__(self, typ, value, traceback):
pass
_dummy_context = _DummyContext()
# TODO(niboshi): Write more detailed description about interface/usage.
class Device(object):
"""A base class of unified devices.
Chainer has the following concrete implementations:
- :class:`chainer.backend.CpuDevice`
- :class:`chainer.backend.GpuDevice`
- :class:`chainer.backend.Intel64Device`
- :class:`chainer.backend.ChainerxDevice`
"""
@property
def xp(self):
"""Array module corresponding to the device."""
raise NotImplementedError(
'Device implementation must override this property.')
@property
def name(self):
"""A unique name of the device."""
raise NotImplementedError(
'Device implementation must override this property.')
@property
def supported_array_types(self):
"""Array types supported by the device.
Returns:
tuple of array types which the device's module functions can
handle.
"""
raise NotImplementedError(
'Device implementation must override this property.')
def __str__(self):
return self.name
def __enter__(self):
"""A dummy definition that simply raises RuntimeError.
:meth:`chainer.using_device` should be used instead.
"""
raise RuntimeError(
'Device class does not support runtime context using `with` '
'statement. Use chainer.using_device instead.')
def __exit__(self, exc_type, exc_value, traceback):
"""A dummy definition that should never be called."""
# Definition of __exit__ is needed to raise a custom error on
# __enter__.
pass
def __eq__(self, other):
raise NotImplementedError(
'Device implementation must override this method.')
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.name)
def create_context(self):
"""Returns a context manager in which the device is made current.
.. seealso::
:meth:`chainer.using_device` calls this method internally.
"""
return _dummy_context
def send(self, arrays):
"""Transfers given arrays to the device.
Args:
arrays: Array or arrays of NumPy, CuPy, or ChainerX.
Returns:
Transferred arrays.
"""
return _convert_arrays(arrays, self.send_array)
def use(self):
"""Makes the device current in the current thread.
"""
pass
def is_array_supported(self, array):
"""Returns if the specified array is compatible with the device.
Args:
array (:ref:`ndarray`): An array to be checked
Returns:
``True`` if the array is compatible with the device. Otherwise
``False`` is returned.
"""
raise NotImplementedError(
'Device implementation must override this method.')
| 3,734
| 27.51145
| 74
|
py
|
chainer
|
chainer-master/chainer/_backprop_utils.py
|
import os
import shutil
import sys
import traceback
import six
import chainer
def _reduce(grad_list):
if not grad_list:
return None
if len(grad_list) >= 2:
grad_list[:] = [chainer.functions.add(*grad_list)]
return grad_list[0]
def _pure(grad):
return [] if grad is None else [grad]
def _pop_or_none(grad_list):
return grad_list.pop() if grad_list else None
def _grad_var_from_alive_node(node):
# Used by `accumulate_grad_inputs` option of `GradTable`
var = node.get_variable_or_none()
if var is None:
return None
else:
gv = var.grad_var
var.grad_var = None
return gv
class GradTable(object):
"""Dict of nodes to references of gradients
The gradients are stored as references to them in the backprop process. The
current implementation uses lists. Keep the lengths of lists <= 1 for the
strict accumulation of gradients. Leave them to accumulate gradients
lazily.
Args:
accumulate_grad_inputs (bool): Fallback to grad_var of input variables.
However, the current implementation reproduces the legacy behavior,
i.e. to read ``grad_var`` of node when the node has not been added.
"""
def __init__(self, accumulate_grad_inputs=False):
self.grads = {}
self._load_if_new = accumulate_grad_inputs
def __setitem__(self, node, grad):
assert node is not None
self.grads[node] = _pure(grad)
def accumulate(self, node, grad):
self.get_as_list(node).append(grad)
def get_as_list(self, node):
assert node is not None
grads = self.grads
if node not in grads:
if self._load_if_new and node.creator_node is None:
node._check_old_style_gradient()
# accumulate the gradient only if the node is a leaf
grads[node] = _pure(_grad_var_from_alive_node(node))
else:
grads[node] = []
return grads[node]
def pop(self, node):
if node is None:
return None
grads = self.grads
if node in grads:
return _reduce(grads.pop(node))
if self._load_if_new:
return _grad_var_from_alive_node(node)
else:
return None
def assert_no_grads(self):
for gx in self.grads.values():
assert gx == []
def backprop_step(
func, target_input_indexes, grad_outputs, grad_inputs, is_debug):
"""Accumulates gradients of a FunctionNode
This routine is used by :meth:`chainer.Variable.backward` and
:func:`chainer.grad`.
Args:
func (~chainer.FunctionNode): The function for which gradients are
accumulated.
target_input_indexes (tuple of int): Sorted indices of the inputs
that require gradients. It is guaranteed that this tuple contains
at least one element.
grad_outputs (tuple of Variable): Gradients w.r.t. the output
variables. If the gradient w.r.t. an output variable is not
given, the corresponding element is ``None``.
grad_inputs (dict): References of the gradients w.r.t. the input
variables.
is_debug (bool): ``True`` if the debug mode is enabled.
"""
if is_debug:
assert isinstance(target_input_indexes, tuple)
assert target_input_indexes == tuple(sorted(target_input_indexes))
assert isinstance(grad_outputs, tuple)
if func.backward_accumulate.__code__ \
is not chainer.FunctionNode.backward_accumulate.__code__:
# backward_accumulate is overridden
grad_inputs_tuple = tuple([
_pop_or_none(grad_inputs[func.inputs[i]])
for i in target_input_indexes
])
# Call backward_accumulate()
try:
gxs = func.backward_accumulate(
target_input_indexes, grad_outputs, grad_inputs_tuple)
except Exception as e:
_reraise_with_stack(func, e)
else: # otherwise, backward should be overridden
# Call backward()
try:
gxs = func.backward(
target_input_indexes, grad_outputs)
except Exception as e:
_reraise_with_stack(func, e)
if is_debug:
for gx in gxs:
if not (gx is None or isinstance(gx, chainer.Variable)):
raise ValueError(func._get_error_message(
'type of gradients returned from backward is '
'incorrect: '
'{} != expected {}'.format(
type(gx), chainer.Variable)))
len_gxs = len(gxs)
if len_gxs == len(func.inputs):
gxs = tuple([gxs[i] for i in target_input_indexes])
elif len_gxs != len(target_input_indexes):
msg = 'number of gradients returned from backward is incorrect: '
if len(func.inputs) == len(target_input_indexes):
msg += (
'%s != expected %s' % (len_gxs, len(func.inputs)))
else:
msg += (
'%s != expected %s or %s'
% (len_gxs, len(func.inputs), len(target_input_indexes)))
raise ValueError(func._get_error_message(msg))
for i, gx in six.moves.zip(target_input_indexes, gxs):
if gx is None or gx.raw_array is None:
continue
grad_inputs[func.inputs[i]].append(gx)
if is_debug:
node_x = func.inputs[i]
g_input_list = grad_inputs[node_x]
if gx.shape != node_x.shape:
raise ValueError(func._get_error_message(
'shape of gradients returned from backward is '
'incorrect: '
'input-index={}, actual {} != expected {}'.format(
i, gx.shape, node_x.shape)))
if gx is not None and g_input_list:
g_input = g_input_list[0]
if gx.shape != g_input.shape:
raise ValueError(func._get_error_message(
'shape of gradients returned from backward is '
'incorrect: '
'input-index={}, actual {} != expected {}'.format(
i, gx.shape, g_input.shape)))
if gx.dtype != g_input.dtype:
raise ValueError(func._get_error_message(
'dtype of gradients returned from backward is '
'incorrect: '
'input-index={}, actual {} != expected {}'.format(
i, gx.dtype, g_input.dtype)))
del gxs
if is_debug:
# each grad is a list of variables
# iter_gxs expands it as a sequence of variables.
def iter_gxs(gxs):
for gx in gxs:
for gx_elem in gx:
yield gx_elem
for gx in iter_gxs(grad_inputs.values()):
if chainer.backend._contains_nan(gx.data):
raise RuntimeError(
'NaN is detected on backward computation of {}'
.format(func.label))
if not func.lazy_grad_sum:
for gx in grad_inputs.values():
_reduce(gx)
def _get_columns():
# Returns the terminal column width.
if sys.version_info >= (3, 3):
cols, rows = shutil.get_terminal_size()
return cols
return int(os.getenv('COLUMNS', 80))
def _reraise_with_stack(func, e):
if func.stack is not None:
# Reraise any type of exceptions including the following:
# - Chainer raises RuntimeError for NaN values; and
# - NumPy raises FloatingPointError for invalid values.
# TODO(kataoka): unify variable._check_grad_type and below
additional_message = \
'\n{}\nStacktrace of the function is below:\n{}'.format(
'-' * _get_columns(),
''.join(traceback.format_list(func.stack[:-1])))
if e.args:
e.args = (e.args[0] + additional_message,) + e.args[1:]
else:
e.args = (additional_message,)
raise
| 8,228
| 33.430962
| 79
|
py
|
chainer
|
chainer-master/chainer/initializer.py
|
import typing as tp # NOQA
from chainer import types # NOQA
from chainer import utils
class Initializer(object):
"""Initializes array.
It initializes the given array.
Attributes:
dtype: Data type specifier. It is for type check in ``__call__``
function.
"""
def __init__(self, dtype: tp.Optional[types.DTypeSpec] = None) -> None:
self.dtype = dtype # type: types.DTypeSpec
def __call__(self, array: types.NdArray) -> None:
"""Initializes given array.
This method destructively changes the value of array.
The derived class is required to implement this method.
The algorithms used to make the new values depend on the
concrete derived classes.
Args:
array (:ref:`ndarray`):
An array to be initialized by this initializer.
"""
raise NotImplementedError()
# Original code forked from MIT licensed keras project
# https://github.com/fchollet/keras/blob/master/keras/initializations.py
def get_fans(shape):
if not isinstance(shape, tuple):
raise ValueError(
'shape must be tuple. Actual type: {}'.format(type(shape)))
if len(shape) < 2:
raise ValueError(
'shape must be of length >= 2. Actual shape: {}'.format(shape))
receptive_field_size = utils.size_of_shape(shape[2:])
fan_in = shape[1] * receptive_field_size
fan_out = shape[0] * receptive_field_size
return fan_in, fan_out
| 1,502
| 27.358491
| 75
|
py
|
chainer
|
chainer-master/chainer/optimizer.py
|
from __future__ import absolute_import
import collections
import copy
import math
import warnings
import numpy
import six
import chainer
from chainer import link as link_module
from chainer import optimizer_hooks
from chainer import serializer as serializer_module
from chainer import variable
import chainerx
class _Hookable(object):
"""A hookable.
Args:
invalid_timing_fallback(bool):
If ``True``, an invalid value of ``timing`` will fall back to
``'pre'``.
"""
def __init__(self, invalid_timing_fallback=False):
self._pre_update_hooks = collections.OrderedDict()
self._post_update_hooks = collections.OrderedDict()
self._invalid_timing_fallback = invalid_timing_fallback
def add_hook(self, hook, name, timing):
"""Adds a hook function."""
if not callable(hook):
raise TypeError('hook function must be callable')
if timing not in ('pre', 'post', 'auto'):
raise ValueError(
'timing must be one of (\'pre\', \'post\', \'auto\')')
if timing == 'auto':
timing = getattr(hook, 'timing', 'pre')
if (timing not in ('pre', 'post')
and self._invalid_timing_fallback):
warnings.warn(
'Hook timing attribute not in (\'pre\', \'post\'), '
'defaulting timing to \'pre\'.')
timing = 'pre'
if name is None:
name = getattr(hook, 'name', getattr(hook, '__name__', None))
if name is None:
raise ValueError(
'the name of the hook function is not specified')
if name in self._pre_update_hooks or name in self._post_update_hooks:
raise KeyError('hook "{}" already exists'.format(name))
if timing == 'pre':
self._pre_update_hooks[name] = hook
else:
self._post_update_hooks[name] = hook
def remove_hook(self, name):
"""Removes the specified hook function.
Args:
name (str): Name of the hook function to be removed. The hook
function registered with this name will be removed.
"""
try:
del self._pre_update_hooks[name]
except KeyError:
del self._post_update_hooks[name]
def call_hooks(self, timing, args):
"""Invokes hook functions in registration order."""
hooks = self.__get_hooks(timing)
for hook in six.itervalues(hooks):
self.call_hook(hook, args)
def call_hook(self, hook, args):
hook(*args)
def __get_hooks(self, timing):
if timing == 'pre':
return self._pre_update_hooks
elif timing == 'post':
return self._post_update_hooks
raise ValueError('timing must be either \'pre\' or \'post\'')
class Hyperparameter(object):
"""Set of hyperparameter entries of an optimizer.
This is a utility class to provide a set of hyperparameter entries for
update rules and an optimizer. Each entry can be set as an attribute of a
hyperparameter object.
A hyperparameter object can hold a reference to its parent hyperparameter
object. When an attribute does not exist in the child hyperparameter, it
automatically refers to the parent. We typically set the hyperparameter of
the gradient method as the parent of the hyperparameter of each update
rule. It enables us to centralize the management of hyperparameters (e.g.
we can change the learning rate of all update rules just by modifying the
hyperparameter of the central optimizer object), while users can freely
customize the hyperparameter of each update rule if needed.
Args:
parent (Hyperparameter): Parent hyperparameter.
"""
def __init__(self, parent=None):
self._parent = parent
def __getattr__(self, name):
if '_parent' not in self.__dict__:
raise AttributeError('_parent is not set up yet')
return getattr(self._parent, name)
def __repr__(self):
d = self.get_dict()
keys = sorted(d.keys())
values_repr = ', '.join('%s=%s' % (k, d[k]) for k in keys)
return 'Hyperparameter(%s)' % values_repr
@property
def parent(self):
"""Parent hyperparameter object."""
return self._parent
def get_dict(self):
"""Converts the hyperparameter into a dictionary.
Returns:
Dictionary containing all entries that can be referred by this
hyperparameter object.
"""
d = {} if self._parent is None else self._parent.get_dict()
for k, v in six.iteritems(self.__dict__):
if k != '_parent':
d[k] = v
return d
class UpdateRule(object):
"""Base class of all update rules.
Update rule is an object that implements how to update one parameter
variable using the gradient of a loss function. This class provides the
interface and the common features of any update rules.
An update rule can be set to a :class:`~chainer.Variable` object that
represents a parameter array of a model. An :class:`~chainer.Optimizer`
instance defines which parameters to update, and the update rule instance
of each parameter defines how to update it.
Hook functions can be set to any update rule instance. The hook function is
called just before or after any updates (configurable) in the order of
registrations.
An implementation of update rule should override :meth:`update_core` or
its device-dependent variants (i.e., :meth:`update_core_cpu` and
:meth:`update_core_gpu`).
The state (e.g. a moving average of the gradient) of the update rule is
stored into the state dictionary. An implementation of update rule using
state should also override :meth:`init_state` to initialize the state at
the first update. The values of the state dictionary are automatically
copied to the appropriate device before the update based on the data and
grad arrays.
Args:
parent_hyperparam (Hyperparameter): Hyperparameter that provides the
default values.
Attributes:
enabled (bool): Flag to configure if this update rule is active. If the
update rule is not active (i.e., ``enabled = False``), the
:meth:`update` method does not update the parameter.
hyperparam (Hyperparameter): Hyperparameter of the update rule.
~UpdateRule.t (int): Number of updates made by this update rule.
"""
is_elementwise = False
def __init__(self, parent_hyperparam=None):
self._state = None
self.enabled = True
self.hyperparam = Hyperparameter(parent_hyperparam)
self.t = 0
self._use_fp32_update = False
self._fp32_param = None
self._hookable = _Hookable()
@property
def state(self):
"""State dictionary."""
return self._state
def add_hook(self, hook, name=None, timing='auto'):
"""Adds a hook function.
The hook function is called before or after any updates (see the timing
attribute).
Args:
hook (callable): Hook function to be added. It takes two
arguments: the update rule object and the parameter variable.
name (str): Name of the hook function. The name attribute of the
hook function is used by default.
timing (str): Specifies when the hook is called. If 'auto', the
timimg property of the hook will decide the timing.
If 'pre', the hook will be called before any updates.
If 'post', the hook will be called after any updates.
If 'auto' and the timing property of the hook is not
available, timing will default to 'pre'.
"""
self._hookable.add_hook(hook, name, timing)
def remove_hook(self, name):
"""Removes the specified hook function.
Args:
name (str): Name of the hook function to be removed. The hook
function registered with this name will be removed.
"""
self._hookable.remove_hook(name)
def update(self, param):
"""Invokes hook functions and updates the parameter.
Args:
param (~chainer.Variable): Variable to be updated.
"""
if not self.enabled:
return
self.t += 1
with chainer.using_device(param.device):
with variable._AllowArrayAccessWithNonstandardLayout():
self.__update(param)
def __update(self, param):
try:
param_dtype = param.dtype
except RuntimeError:
param_dtype = None # uninitialized and dtype is not determined
is_initialized = param.array is not None
loss_scale = param._loss_scale
# Apply use_fp32_update
if self._use_fp32_update and param_dtype == numpy.float16:
# Create fp32 parameter if not created yet.
if self._fp32_param is None:
if is_initialized:
self._fp32_param = variable.Parameter(
param.array.astype(numpy.float32),
name=param.name)
else:
self._fp32_param = self._create_uninitialized_parameter(
numpy.float32, name=param.name)
fp32_param = self._fp32_param
# Convert the gradient
if is_initialized:
fp32_param.grad = param.grad.astype(numpy.float32)
param_ = fp32_param
fp32_converted = True
else:
param_ = param
fp32_converted = False
if is_initialized:
# Init states
self._init_states(param_)
# Apply loss scaling
if (loss_scale is not None
and not isinstance(param_.array, chainerx.ndarray)):
param_.grad /= loss_scale
# Call update_core
self._hookable.call_hooks('pre', args=(self, param_,))
self.update_core(param_)
self._hookable.call_hooks('post', args=(self, param_,))
# Convert back to the original dtype
if fp32_converted:
if is_initialized:
if isinstance(param.array, chainerx.ndarray):
param.array[:] = fp32_param.array.astype(param.dtype)
else:
param.array = fp32_param.array.astype(param.dtype)
fp32_param.grad = None
def _create_uninitialized_parameter(self, dtype, name):
# Creates an uninitialized parameter with given dtype.
# This is somewhat tricky but the parameter is created with a
# dummy initializer with the dtype.
def initializer(array):
assert False # the parameter should never be initialized.
initializer.dtype = dtype
param = variable.Parameter(initializer, name=name)
assert param.dtype == dtype
assert param.array is None
return param
def update_core(self, param):
"""Updates the parameter.
Implementation of UpdateRule should override this method or both of
:meth:`update_core_cpu` and :meth:`update_core_gpu`.
Args:
param (~chainer.Variable): Variable to be updated.
"""
device = param.device
with chainer.using_device(device):
if device.xp is chainerx:
self.update_core_chainerx(param)
elif device.xp is numpy:
self.update_core_cpu(param)
else:
self.update_core_gpu(param)
def update_core_cpu(self, param):
"""Updates the parameter on CPU.
See :meth:`update_core` for details.
Args:
param (~chainer.Variable): Variable to be updated.
"""
raise NotImplementedError
def update_core_gpu(self, param):
"""Updates the parameter on GPU.
See :meth:`update_core` for details.
Args:
param (~chainer.Variable): Variable to be updated.
"""
raise NotImplementedError
def update_core_chainerx(self, param):
"""Updates the ChainerX parameter.
This method can be overridden to implement custom update logic.
The default implementation is to convert the parameter to a
memory-shared NumPy/CuPy parameter and call the corresponding update
method.
See :meth:`update_core` for details.
Args:
param (~chainer.Variable): Variable to be updated.
"""
device = param.device
fallback_device = device.fallback_device
# Convert state arrays to NumPy/CuPy
chainerx_state_arrays = None
state = self.state
if state is not None:
chainerx_state_arrays = {}
for state_name, st in state.items():
if isinstance(st, chainerx.ndarray):
fallback_arr = fallback_device.send(st)
state[state_name] = fallback_arr
chainerx_state_arrays[state_name] = (st, fallback_arr)
# Create a temporary parameter with memory-shared NumPy/CuPy array
# If the ChainerX parameter has a cached NumPy/CuPy copy, use the
# cache and avoid redundant conversion. Else, create the cache here
# and use it.
if param._chainerx_fallback_array is None:
param._chainerx_fallback_array = fallback_device.send(param.array)
temp_param = variable.Variable._init_unchecked(
param._chainerx_fallback_array,
device=fallback_device,
is_chainerx_array=False)
# TODO(niboshi): Avoid accessing private attribute
if param._grad_valid:
temp_param._set_grad_without_check(
fallback_device.send(param.grad))
# Update
self.update_core(temp_param)
# Restore state arrays
if chainerx_state_arrays:
for state_name, (arr, fallback_arr) in (
chainerx_state_arrays.items()):
cur_arr = state[state_name]
if cur_arr is not fallback_arr:
# The optimizer altered the reference of the state, instead
# of updating it in-place. We need to convert the new state
# back to ChainerX.
arr = device.send(cur_arr)
state[state_name] = arr
def init_state(self, param):
"""Initializes the state.
Any implementations that use the state should override this mehtod.
This method is called at the first update.
Args:
param (~chainer.Variable): Parameter variable. It can be used to
extract the shape and the data type of the parameter.
"""
pass
def serialize(self, serializer):
"""Serializes the update rule state.
Be careful that this method only saves/loads the state of the update
rule. The parameters of the target link is not saved/loaded by this
method, and so you need to serialize the target link separately if you
want to fully recover the training state including parameters.
Args:
serializer (~chainer.AbstractSerializer): Serializer object.
"""
self.t = serializer('t', self.t)
if self.state is None:
if isinstance(serializer, serializer_module.Deserializer):
# try to initialize the state to retrieve state entries
self._state = {}
self_copy = copy.copy(self)
arr = numpy.empty(1, dtype=numpy.float32)
self_copy.init_state(variable.Variable(arr, grad=arr))
for key in self._state:
try:
value = serializer(key, None)
except KeyError:
if self.enabled:
raise
value = None
# leave the update rule state as `None` if the keys are not
# contained in the snapshot, so that these states can be
# automatically initialized with the `_init_states` method
if value is None:
self._state = None
break
else:
self._state[key] = value
else:
for key in self._state:
self._state[key] = serializer(key, self._state[key])
def _init_states(self, param):
device = param.device
with chainer.using_device(device):
state = self.state
if state is None:
state = self._state = {}
self.init_state(param)
for name, value in six.iteritems(state):
if not isinstance(value, chainer.get_array_types()):
continue
state[name] = device.send(value)
def use_fp32_update(self, flag=True):
"""Enables use of parameter update in fp32.
This method enables use of parameter update in fp32.
When it is enabled and data type of original parameter variable is
fp16, fp32 copy of parameter variable is automatically created and
retained at self.fp32_param. And the parameter is update in fp32 in
the following way.
1. copies the grad of original parameter variable to the grad of fp32
parameter variable, converting its data type from fp16 to fp32.
2. updates the parameter in fp32.
3. copies the data of fp32 parameter variable to the data of original
parameter variable, converting its data type from fp32 to fp16.
See :meth:`update` for details.
"""
self._use_fp32_update = flag
class _OptimizerHookable(_Hookable):
def __init__(self, optimizer):
super(_OptimizerHookable, self).__init__(
invalid_timing_fallback=True)
self.optimizer = optimizer
def call_hook(self, hook, args):
assert args == ()
self.optimizer.call_hook(hook)
class Optimizer(object):
"""Base class of all numerical optimizers.
This class provides basic features for all optimization methods. It
optimizes parameters of a *target link*. The target link is registered via
the :meth:`setup` method, and then the :meth:`update` method updates its
parameters based on a given loss function.
Each optimizer implementation must be defined as a child class of
Optimizer. It must override :meth:`update` method.
If the optimizer is based on single gradient computation (like
most first-order methods), then it should inherit :class:`GradientMethod`,
which adds some features dedicated for the first order methods, including
the support of :class:`~chainer.UpdateRule`.
Optimizer instance also supports *hook functions*. Hook function is
registered by the :meth:`add_hook` method. Each hook function is called
in registration order before of after the actual parameter update
(configurable). If the hook function has an attribute
``call_for_each_param`` and its value is ``True``, the hook function is
used as a hook function of all update rules (i.e., it is invoked for every
parameter by passing the corresponding update rule and the parameter).
Attributes:
~Optimizer.target: Target link object.
It is set by the :meth:`setup` method.
~Optimizer.t: Number of update steps. It must be incremented by the
:meth:`update` method.
~Optimizer.epoch: Current epoch. It is incremented by the
:meth:`new_epoch` method.
~Optimizer.use_auto_new_epoch: Boolean flag to indicate if
:meth:`new_epoch` will be called by the updater. Updater should
set this flag to ``True`` if it automatically calls
:meth:`new_epoch`.
"""
target = None
t = 0
epoch = 0
_pre_update_hooks = None
_post_update_hooks = None
_loss_scale = None
_loss_scale_max = 65504 # max representable value with fp16
_loss_scaling_is_dynamic = False
use_auto_new_epoch = False
_hookable = None
def setup(self, link):
"""Sets a target link and initializes the optimizer states.
Given link is set to the :attr:`target` attribute. It also prepares the
optimizer state dictionaries corresponding to all parameters in the
link hierarchy. The existing states are discarded.
Args:
link (~chainer.Link): Target link object.
Returns:
The optimizer instance.
.. note::
As of v4.0.0, this function returns the optimizer instance itself
so that you can instantiate and setup the optimizer in one line,
e.g., ``optimizer = SomeOptimizer().setup(link)``.
"""
if not isinstance(link, link_module.Link):
raise TypeError('optimization target must be a link')
self.target = link
self.t = 0
self.epoch = 0
self._hookable = _OptimizerHookable(self)
return self
def update(self, lossfun=None, *args, **kwds):
"""Updates the parameters.
This method updates the parameters of the target link. The behavior of
this method is different for the cases either ``lossfun`` is given or
not.
If ``lossfun`` is given, this method typically clears the gradients,
calls the loss function with given extra arguments, and calls the
:meth:`~chainer.Variable.backward` method of its output to compute the
gradients. The actual implementation might call ``lossfun`` more than
once.
If ``lossfun`` is not given, then this method assumes that the
gradients of all parameters are already computed. An implementation
that requires multiple gradient computations might raise an error on
this case.
In both cases, this method invokes the update procedure for all
parameters.
Args:
lossfun (callable):
Loss function.
You can specify one of loss functions from
:doc:`built-in loss functions </reference/functions>`, or
your own loss function.
It should not be an
:doc:`loss functions with parameters </reference/links>`
(i.e., :class:`~chainer.Link` instance).
The function must accept arbitrary arguments
and return one :class:`~chainer.Variable` object that
represents the loss (or objective) value.
Returned value must be a Variable derived from the input
Variable object.
``lossfun`` can be omitted for single gradient-based methods.
In this case, this method assumes gradient arrays computed.
args, kwds: Arguments for the loss function.
"""
raise NotImplementedError
def new_epoch(self, auto=False):
"""Starts a new epoch.
This method increments the :attr:`epoch` count. Note that if the
optimizer depends on the epoch count, then user should call this method
appropriately at the beginning of each epoch.
Args:
auto (bool): Should be ``True`` if this method is called by an
updater. In this case, :attr:`use_auto_new_epoch` should be set
to ``True`` by the updater.
"""
if auto:
if not self.use_auto_new_epoch:
raise RuntimeError(
'invalid new_epoch call with auto=True.\n'
'Fix the updater to set '
'optimizer.use_auto_new_epoch = True.')
else:
if self.use_auto_new_epoch:
raise RuntimeError(
'duplicated new_epoch with the updater.\n'
'Pass auto_new_epoch=False to the updater or stop calling '
'new_epoch outside the updater.')
self.epoch += 1
def _check_set_up(self):
if self._hookable is None:
raise RuntimeError('Optimizer is not set up. Call `setup` method.')
def add_hook(self, hook, name=None, timing='auto'):
"""Registers a hook function.
Hook function is typically called right after the gradient computation,
though the timing depends on the optimization method, and the timing
attribute.
Args:
hook (callable): Hook function. If ``hook.call_for_each_param`` is
true, this hook function is called for each parameter by
passing the update rule and the parameter. Otherwise, this hook
function is called only once each iteration by passing the
optimizer.
name (str): Name of the registration. If omitted, ``hook.name`` is
used by default.
timing (str): Specifies when the hook is called. If 'auto', the
timimg property of the hook will decide the timing.
If 'pre', the hook will be called before any updates.
If 'post', the hook will be called after any updates.
"""
self._check_set_up()
self._hookable.add_hook(hook, name, timing)
def remove_hook(self, name):
"""Removes a hook function.
Args:
name (str): Registered name of the hook function to remove.
"""
self._check_set_up()
self._hookable.remove_hook(name)
def call_hooks(self, timing='pre'):
"""Invokes hook functions in registration order."""
self._check_set_up()
self._hookable.call_hooks(timing, ())
def call_hook(self, hook):
if getattr(hook, 'call_for_each_param', False):
for param in self.target.params():
hook(param.update_rule, param)
else:
hook(self)
def serialize(self, serializer):
"""Serializes or deserializes the optimizer.
It only saves or loads the following things:
- Optimizer states
- Global states (:attr:`t` and :attr:`epoch`)
**It does not saves nor loads the parameters of the target link.** They
should be separately saved or loaded.
Args:
serializer (~chainer.AbstractSerializer): Serializer or
deserializer object.
"""
self.t = serializer('t', self.t)
self.epoch = serializer('epoch', self.epoch)
for name, param in self.target.namedparams():
rule = getattr(param, 'update_rule', None)
if rule is not None:
rule.serialize(serializer[name])
def loss_scaling(self, interval=1000, scale=None):
"""Configures the loss scaling algorithm.
Args:
interval (int): Number of iterations until scaling factor gets
doubled. This is effective when "dynamic" loss scaling is used.
scale (float): Loss scaling factor. If ``None``, "dynamic" loss
scaling is used, otherwise "static" loss scaling is used.
"""
if scale is None:
self._loss_scaling_is_dynamic = True
if interval < 1:
raise ValueError('interval must be greater than or equal to 1.'
' Actual: {}'.format(interval))
self._loss_scale = 1.0
self._loss_scaling_multiplier = math.pow(2.0, 1.0 / interval)
self._loss_scaling_isnan_ever = False
else:
if scale <= 0:
raise ValueError('loss_scale must be a positive number. '
'Actual: {}'.format(scale))
self._loss_scale = scale
def set_loss_scale(self, loss_scale):
"""Sets loss scaling factor."""
self.loss_scaling(scale=loss_scale)
def check_nan_in_grads(self):
"""Checks if there is NaN in grads when dynamic loss scaling used."""
self._loss_scaling_isnan = False
if not self._loss_scaling_is_dynamic:
return
for name, param in self.target.namedparams():
xp = param.device.xp
if not xp.all(xp.isfinite(param.grad)):
self._loss_scaling_isnan = True
self._loss_scaling_isnan_ever = True
warnings.warn(
'Non finite number found in param.grad of {}'
' (iteration: {}, loss_scale: {})'
.format(name, self.t, self._loss_scale))
def is_safe_to_update(self):
return not self._loss_scaling_isnan
def update_loss_scale(self):
if not self._loss_scaling_is_dynamic:
return
if self._loss_scaling_isnan:
multiplier = 0.5
elif self._loss_scaling_isnan_ever:
multiplier = self._loss_scaling_multiplier
else:
multiplier = 2.0
self._loss_scale = max(1, min(self._loss_scale_max,
self._loss_scale * multiplier))
class GradientMethod(Optimizer):
"""Base class of all single gradient-based optimizers.
This is an extension of the :class:`Optimizer` class. Typical gradient
methods that just require the gradient at the current parameter vector on
an update can be implemented as its child class.
This class uses :class:`~chainer.UpdateRule` to manage the update rule of
each parameter. A child class of GradientMethod should override
:meth:`create_update_rule` to create the default update rule of each
parameter.
This class also provides :attr:`hyperparam`, which is the hyperparameter
used as the default configuration of each update rule. All built-in
gradient method implementations also provide proxy properties that act
as aliases to the attributes of :attr:`hyperparam`. It is recommended that
you provide such an alias to each attribute. It can be done by only adding
one line for each attribute using :class:`HyperparameterProxy`.
Attributes:
hyperparam (Hyperparameter): The hyperparameter of the gradient
method. It is used as the default configuration of each update
rule (i.e., the hyperparameter of each update rule refers this
hyperparameter as its parent).
"""
def __init__(self):
super(GradientMethod, self).__init__()
self.hyperparam = Hyperparameter()
self._use_fp32_update = False
def setup(self, link):
super(GradientMethod, self).setup(link)
for param in link.params():
param.update_rule = self.create_update_rule()
if self._use_fp32_update:
param.update_rule.use_fp32_update()
return self
def reallocate_cleared_grads(self):
"""Reallocate gradients cleared by :meth:`~chainer.Variable.cleargrad`.
This method allocates arrays for all gradients which have :obj:`None`.
This method is called before and after every optimizer hook.
If an inheriting optimizer does not require this allocation,
the optimizer can override this method with a blank function.
"""
for name, param in self.target.namedparams(False):
with variable._AllowArrayAccessWithNonstandardLayout():
has_grad = param.grad is not None
if not has_grad:
device = param.device
with chainer.using_device(device):
param._set_grad(
device.xp.zeros_like(param.raw_array),
layout_check=False)
def call_hook(self, hook):
super(GradientMethod, self).call_hook(hook)
self.reallocate_cleared_grads()
def update(self, lossfun=None, *args, **kwds):
"""Updates parameters based on a loss function or computed gradients.
This method runs in two ways.
- If ``lossfun`` is given, then it is used as a loss function to
compute gradients.
- Otherwise, this method assumes that the gradients are already
computed.
In both cases, the computed gradients are used to update parameters.
The actual update routines are defined by the update rule of each
parameter.
"""
if lossfun is not None:
use_cleargrads = getattr(self, '_use_cleargrads', True)
loss = lossfun(*args, **kwds)
if use_cleargrads:
self.target.cleargrads()
else:
self.target.zerograds()
loss.backward(loss_scale=self._loss_scale)
del loss
self.reallocate_cleared_grads()
self.check_nan_in_grads()
self.call_hooks('pre')
self.t += 1
if self.is_safe_to_update():
for param in self.target.params():
param.update()
self.reallocate_cleared_grads()
self.call_hooks('post')
self.update_loss_scale()
def use_cleargrads(self, use=True):
"""Enables or disables use of :func:`~chainer.Link.cleargrads` in `update`.
Args:
use (bool): If ``True``, this function enables use of
`cleargrads`. If ``False``, disables use of `cleargrads`
(`zerograds` is used).
.. deprecated:: v2.0
Note that :meth:`update` calls :meth:`~Link.cleargrads` by default.
:meth:`~Link.cleargrads` is more efficient than
:meth:`~Link.zerograds`, so one does not have to call
:meth:`use_cleargrads`. This method remains for backward
compatibility.
"""
warnings.warn(
'GradientMethod.use_cleargrads is deprecated.',
DeprecationWarning)
self._use_cleargrads = use
def create_update_rule(self):
"""Creates a new update rule object.
This method creates an update rule object. It is called by
:meth:`setup` to set up an update rule of each parameter.
Each implementation of the gradient method should override this method
to provide the default update rule implementation.
Return:
UpdateRule: Update rule object.
"""
raise NotImplementedError
def use_fp32_update(self, flag=True):
"""Enables use of parameter update in fp32."""
self._use_fp32_update = flag
link = getattr(self, 'target', None)
if link is not None:
for param in link.params():
param.update_rule.use_fp32_update()
class HyperparameterProxy(object):
"""Property that acts as an alias to an attribute of the hyperparameter.
This class is used to define a property of an implementation of
:class:`GradientMethod` that acts as an alias to an attribute of the
hyperparameter.
Args:
attr_name (str): Name of the attribute of the hyperparameter.
"""
def __init__(self, attr_name):
self._attr_name = attr_name
self.__doc__ = 'Alias to ``self.hyperparam.{}``'.format(attr_name)
def __get__(self, obj, type=None):
if obj is None:
return self
return getattr(obj.hyperparam, self._attr_name)
def __set__(self, obj, value):
setattr(obj.hyperparam, self._attr_name, value)
def make_deprecation_message(module_name):
return ('chainer.optimizer.{0} is deprecated from v4. '
'Use chainer.optimizer_hooks.{0} instead.'
.format(module_name))
class WeightDecay(optimizer_hooks.WeightDecay):
def __init__(self, *args, **kwargs):
warnings.warn(make_deprecation_message('WeightDecay'),
DeprecationWarning)
return super(WeightDecay, self).__init__(*args, **kwargs)
class Lasso(optimizer_hooks.Lasso):
def __init__(self, *args, **kwargs):
warnings.warn(make_deprecation_message('Lasso'),
DeprecationWarning)
return super(Lasso, self).__init__(*args, **kwargs)
class GradientClipping(optimizer_hooks.GradientClipping):
def __init__(self, *args, **kwargs):
warnings.warn(make_deprecation_message('GradientClipping'),
DeprecationWarning)
return super(GradientClipping, self).__init__(*args, **kwargs)
class GradientNoise(optimizer_hooks.GradientNoise):
def __init__(self, *args, **kwargs):
warnings.warn(make_deprecation_message('GradientNoise'),
DeprecationWarning)
return super(GradientNoise, self).__init__(*args, **kwargs)
class GradientHardClipping(optimizer_hooks.GradientHardClipping):
def __init__(self, *args, **kwargs):
warnings.warn(make_deprecation_message('GradientHardClipping'),
DeprecationWarning)
return super(GradientHardClipping, self).__init__(*args, **kwargs)
| 37,224
| 35.81998
| 83
|
py
|
chainer
|
chainer-master/chainer/functions/__init__.py
|
"""Collection of function implementations.
Functions are either implemented as :class:`~chainer.Function`\\ s or
:class:`~chainer.FunctionNode`\\ s.
"""
from chainer.functions.activation.clipped_relu import clipped_relu # NOQA
from chainer.functions.activation.clipped_relu import relu6 # NOQA
from chainer.functions.activation.crelu import crelu # NOQA
from chainer.functions.activation.elu import elu # NOQA
from chainer.functions.activation.hard_sigmoid import hard_sigmoid # NOQA
from chainer.functions.activation.leaky_relu import leaky_relu # NOQA
from chainer.functions.activation.log_softmax import log_softmax # NOQA
from chainer.functions.activation.maxout import maxout # NOQA
from chainer.functions.activation.prelu import prelu # NOQA
from chainer.functions.activation.relu import relu # NOQA
from chainer.functions.activation.rrelu import rrelu # NOQA
from chainer.functions.activation.selu import selu # NOQA
from chainer.functions.activation.sigmoid import sigmoid # NOQA
from chainer.functions.activation.softmax import softmax # NOQA
from chainer.functions.activation.softplus import softplus # NOQA
from chainer.functions.activation.swish import swish # NOQA
from chainer.functions.activation.tanh import tanh # NOQA
from chainer.functions.array.as_strided import as_strided # NOQA
from chainer.functions.array.broadcast import broadcast # NOQA
from chainer.functions.array.broadcast import broadcast_to # NOQA
from chainer.functions.array.cast import cast # NOQA
from chainer.functions.array.concat import concat # NOQA
from chainer.functions.array.copy import copy # NOQA
from chainer.functions.array.depth2space import depth2space # NOQA
from chainer.functions.array.diagonal import diagonal # NOQA
from chainer.functions.array.dstack import dstack # NOQA
from chainer.functions.array.expand_dims import expand_dims # NOQA
from chainer.functions.array.flatten import flatten # NOQA
from chainer.functions.array.flip import flip # NOQA
from chainer.functions.array.fliplr import fliplr # NOQA
from chainer.functions.array.flipud import flipud # NOQA
from chainer.functions.array.get_item import get_item # NOQA
from chainer.functions.array.hstack import hstack # NOQA
from chainer.functions.array.im2col import im2col # NOQA
from chainer.functions.array.moveaxis import moveaxis # NOQA
from chainer.functions.array.pad import pad # NOQA
from chainer.functions.array.pad_sequence import pad_sequence # NOQA
from chainer.functions.array.permutate import permutate # NOQA
from chainer.functions.array.repeat import repeat # NOQA
from chainer.functions.array.reshape import reshape # NOQA
from chainer.functions.array.resize_images import resize_images # NOQA
from chainer.functions.array.rollaxis import rollaxis # NOQA
from chainer.functions.array.scatter_add import scatter_add # NOQA
from chainer.functions.array.select_item import select_item # NOQA
from chainer.functions.array.separate import separate # NOQA
from chainer.functions.array.space2depth import space2depth # NOQA
from chainer.functions.array.spatial_transformer_grid import spatial_transformer_grid # NOQA
from chainer.functions.array.spatial_transformer_sampler import spatial_transformer_sampler # NOQA
from chainer.functions.array.split_axis import split_axis # NOQA
from chainer.functions.array.squeeze import squeeze # NOQA
from chainer.functions.array.stack import stack # NOQA
from chainer.functions.array.swapaxes import swapaxes # NOQA
from chainer.functions.array.tile import tile # NOQA
from chainer.functions.array.transpose import transpose # NOQA
from chainer.functions.array.transpose_sequence import transpose_sequence # NOQA
from chainer.functions.array.vstack import vstack # NOQA
from chainer.functions.array.where import where # NOQA
from chainer.functions.connection.bilinear import bilinear # NOQA
from chainer.functions.connection.convolution_2d import convolution_2d # NOQA
from chainer.functions.connection.convolution_nd import convolution_1d # NOQA
from chainer.functions.connection.convolution_nd import convolution_3d # NOQA
from chainer.functions.connection.convolution_nd import convolution_nd # NOQA
from chainer.functions.connection.deconvolution_2d import deconvolution_2d # NOQA
from chainer.functions.connection.deconvolution_nd import deconvolution_1d # NOQA
from chainer.functions.connection.deconvolution_nd import deconvolution_3d # NOQA
from chainer.functions.connection.deconvolution_nd import deconvolution_nd # NOQA
from chainer.functions.connection.deformable_convolution_2d_sampler import deformable_convolution_2d_sampler # NOQA
from chainer.functions.connection.depthwise_convolution_2d import depthwise_convolution_2d # NOQA
from chainer.functions.connection.dilated_convolution_2d import dilated_convolution_2d # NOQA
from chainer.functions.connection.embed_id import embed_id # NOQA
from chainer.functions.connection.linear import linear # NOQA
from chainer.functions.connection.local_convolution_2d import local_convolution_2d # NOQA
from chainer.functions.connection.shift import shift # NOQA
from chainer.functions.evaluation.accuracy import accuracy # NOQA
from chainer.functions.evaluation.binary_accuracy import binary_accuracy # NOQA
from chainer.functions.evaluation.classification_summary import classification_summary # NOQA
from chainer.functions.evaluation.classification_summary import f1_score # NOQA
from chainer.functions.evaluation.classification_summary import precision # NOQA
from chainer.functions.evaluation.classification_summary import recall # NOQA
from chainer.functions.evaluation.r2_score import r2_score # NOQA
from chainer.functions.loss.absolute_error import absolute_error # NOQA
from chainer.functions.loss.black_out import black_out # NOQA
from chainer.functions.loss.contrastive import contrastive # NOQA
from chainer.functions.loss.crf1d import argmax_crf1d # NOQA
from chainer.functions.loss.crf1d import crf1d # NOQA
from chainer.functions.loss.cross_covariance import cross_covariance # NOQA
from chainer.functions.loss.ctc import connectionist_temporal_classification # NOQA
from chainer.functions.loss.decov import decov # NOQA
from chainer.functions.loss.discriminative_loss import discriminative_margin_based_clustering_loss # NOQA
from chainer.functions.loss.hinge import hinge # NOQA
from chainer.functions.loss.huber_loss import huber_loss # NOQA
from chainer.functions.loss.mean_absolute_error import mean_absolute_error # NOQA
from chainer.functions.loss.mean_squared_error import mean_squared_error # NOQA
from chainer.functions.loss.negative_sampling import negative_sampling # NOQA
from chainer.functions.loss.sigmoid_cross_entropy import sigmoid_cross_entropy # NOQA
from chainer.functions.loss.softmax_cross_entropy import softmax_cross_entropy # NOQA
from chainer.functions.loss.squared_error import squared_difference # NOQA
from chainer.functions.loss.squared_error import squared_error # NOQA
from chainer.functions.loss.triplet import triplet # NOQA
from chainer.functions.loss.vae import bernoulli_nll # NOQA
from chainer.functions.loss.vae import gaussian_kl_divergence # NOQA
from chainer.functions.loss.vae import gaussian_nll # NOQA
from chainer.functions.math.arctanh import arctanh # NOQA
from chainer.functions.math.average import average # NOQA
from chainer.functions.math.basic_math import absolute # NOQA
from chainer.functions.math.basic_math import add # NOQA
from chainer.functions.math.batch_l2_norm_squared import batch_l2_norm_squared # NOQA
from chainer.functions.math.bias import bias # NOQA
from chainer.functions.math.ceil import ceil # NOQA
from chainer.functions.math.cholesky import cholesky # NOQA
from chainer.functions.math.clip import clip # NOQA
from chainer.functions.math.cumprod import cumprod # NOQA
from chainer.functions.math.cumsum import cumsum # NOQA
from chainer.functions.math.det import batch_det # NOQA
from chainer.functions.math.det import det # NOQA
from chainer.functions.math.digamma import digamma # NOQA
from chainer.functions.math.einsum import einsum # NOQA
from chainer.functions.math.erf import erf # NOQA
from chainer.functions.math.erfc import erfc # NOQA
from chainer.functions.math.erfcinv import erfcinv # NOQA
from chainer.functions.math.erfcx import erfcx # NOQA
from chainer.functions.math.erfinv import erfinv # NOQA
from chainer.functions.math.exponential import exp # NOQA
from chainer.functions.math.exponential import log # NOQA
from chainer.functions.math.exponential import log10 # NOQA
from chainer.functions.math.exponential import log2 # NOQA
from chainer.functions.math.exponential_m1 import expm1 # NOQA
from chainer.functions.math.fft import fft # NOQA
from chainer.functions.math.fft import ifft # NOQA
from chainer.functions.math.fix import fix # NOQA
from chainer.functions.math.floor import floor # NOQA
from chainer.functions.math.fmod import fmod # NOQA
from chainer.functions.math.hyperbolic import cosh # NOQA
from chainer.functions.math.hyperbolic import sinh # NOQA
from chainer.functions.math.identity import identity # NOQA
from chainer.functions.math.inv import batch_inv # NOQA
from chainer.functions.math.inv import inv # NOQA
from chainer.functions.math.lgamma import lgamma # NOQA
from chainer.functions.math.linear_interpolate import linear_interpolate # NOQA
from chainer.functions.math.log_ndtr import log_ndtr # NOQA
from chainer.functions.math.logarithm_1p import log1p # NOQA
from chainer.functions.math.logsumexp import logsumexp # NOQA
from chainer.functions.math.matmul import batch_matmul # NOQA
from chainer.functions.math.matmul import matmul # NOQA
from chainer.functions.math.maximum import maximum # NOQA
from chainer.functions.math.minimum import minimum # NOQA
from chainer.functions.math.minmax import argmax # NOQA
from chainer.functions.math.minmax import argmin # NOQA
from chainer.functions.math.minmax import max # NOQA
from chainer.functions.math.minmax import min # NOQA
from chainer.functions.math.ndtr import ndtr # NOQA
from chainer.functions.math.ndtri import ndtri # NOQA
from chainer.functions.math.polygamma import polygamma # NOQA
from chainer.functions.math.prod import prod # NOQA
from chainer.functions.math.scale import scale # NOQA
from chainer.functions.math.sign import sign # NOQA
from chainer.functions.math.sparse_matmul import sparse_matmul # NOQA
from chainer.functions.math.sqrt import rsqrt # NOQA
from chainer.functions.math.sqrt import sqrt # NOQA
from chainer.functions.math.square import square # NOQA
from chainer.functions.math.sum import sum # NOQA
from chainer.functions.math.sum import sum_to # NOQA
from chainer.functions.math.tensordot import tensordot # NOQA
from chainer.functions.math.trigonometric import arccos # NOQA
from chainer.functions.math.trigonometric import arcsin # NOQA
from chainer.functions.math.trigonometric import arctan # NOQA
from chainer.functions.math.trigonometric import arctan2 # NOQA
from chainer.functions.math.trigonometric import cos # NOQA
from chainer.functions.math.trigonometric import sin # NOQA
from chainer.functions.math.trigonometric import tan # NOQA
from chainer.functions.math.zeta import zeta # NOQA
from chainer.functions.noise.dropout import dropout # NOQA
from chainer.functions.noise.gaussian import gaussian # NOQA
from chainer.functions.noise.gumbel_softmax import gumbel_softmax # NOQA
from chainer.functions.noise.simplified_dropconnect import simplified_dropconnect # NOQA
from chainer.functions.noise.zoneout import zoneout # NOQA
from chainer.functions.normalization.batch_normalization import batch_normalization # NOQA
from chainer.functions.normalization.batch_normalization import fixed_batch_normalization # NOQA
from chainer.functions.normalization.batch_renormalization import batch_renormalization # NOQA
from chainer.functions.normalization.batch_renormalization import fixed_batch_renormalization # NOQA
from chainer.functions.normalization.decorrelated_batch_normalization import decorrelated_batch_normalization # NOQA
from chainer.functions.normalization.decorrelated_batch_normalization import fixed_decorrelated_batch_normalization # NOQA
from chainer.functions.normalization.group_normalization import group_normalization # NOQA
from chainer.functions.normalization.l2_normalization import normalize # NOQA
from chainer.functions.normalization.layer_normalization import layer_normalization # NOQA
from chainer.functions.normalization.local_response_normalization import local_response_normalization # NOQA
from chainer.functions.rnn.lstm import lstm # NOQA
from chainer.functions.rnn.n_step_gru import n_step_bigru # NOQA
from chainer.functions.rnn.n_step_gru import n_step_gru # NOQA
from chainer.functions.rnn.n_step_lstm import n_step_bilstm # NOQA
from chainer.functions.rnn.n_step_lstm import n_step_lstm # NOQA
from chainer.functions.rnn.n_step_rnn import n_step_birnn # NOQA
from chainer.functions.rnn.n_step_rnn import n_step_rnn # NOQA
from chainer.functions.rnn.slstm import slstm # NOQA
from chainer.functions.rnn.tree_lstm import tree_lstm # NOQA
from chainer.functions.pooling.average_pooling_2d import average_pooling_2d # NOQA
from chainer.functions.pooling.average_pooling_nd import average_pooling_1d # NOQA
from chainer.functions.pooling.average_pooling_nd import average_pooling_3d # NOQA
from chainer.functions.pooling.average_pooling_nd import average_pooling_nd # NOQA
from chainer.functions.pooling.max_pooling_nd import max_pooling_1d # NOQA
from chainer.functions.pooling.max_pooling_nd import max_pooling_2d # NOQA
from chainer.functions.pooling.max_pooling_nd import max_pooling_3d # NOQA
from chainer.functions.pooling.max_pooling_nd import max_pooling_nd # NOQA
from chainer.functions.pooling.roi_average_align_2d import roi_average_align_2d # NOQA
from chainer.functions.pooling.roi_average_pooling_2d import roi_average_pooling_2d # NOQA
from chainer.functions.pooling.roi_max_align_2d import roi_max_align_2d # NOQA
from chainer.functions.pooling.roi_max_pooling_2d import roi_max_pooling_2d # NOQA
from chainer.functions.pooling.roi_pooling_2d import roi_pooling_2d # NOQA
from chainer.functions.pooling.spatial_pyramid_pooling_2d import spatial_pyramid_pooling_2d # NOQA
from chainer.functions.pooling.unpooling_2d import unpooling_2d # NOQA
from chainer.functions.pooling.unpooling_nd import unpooling_1d # NOQA
from chainer.functions.pooling.unpooling_nd import unpooling_3d # NOQA
from chainer.functions.pooling.unpooling_nd import unpooling_nd # NOQA
from chainer.functions.pooling.upsampling_2d import upsampling_2d # NOQA
from chainer.functions.util.forget import forget # NOQA
# Aliases
from chainer.functions.math.average import average as mean # NOQA
| 14,841
| 62.157447
| 123
|
py
|
chainer
|
chainer-master/chainer/functions/evaluation/r2_score.py
|
from chainer import backend
from chainer import function
from chainer.utils import type_check
class R2_score(function.Function):
def __init__(self, sample_weight, multioutput):
if sample_weight is not None:
raise NotImplementedError()
if multioutput in ['uniform_average', 'raw_values']:
self.multioutput = multioutput
else:
raise ValueError('invalid multioutput argument')
def check_type_forward(self, in_types):
type_check._argname(in_types, ('pred', 'true'))
pred_type, true_type = in_types
type_check.expect(
pred_type.dtype.kind == 'f',
true_type.dtype.kind == 'f'
)
type_check.expect(
pred_type.shape == true_type.shape,
)
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
pred, true = inputs
SS_res = xp.asarray(
xp.sum((pred - true) ** 2, axis=0))
SS_tot = xp.asarray(
xp.sum((true - xp.mean(true, axis=0)) ** 2, axis=0))
SS_tot_iszero = SS_tot == 0
SS_tot[SS_tot_iszero] = 1 # Assign dummy value to avoid zero-division
ret = xp.where(
SS_tot_iszero, 0.0, 1 - SS_res / SS_tot
).astype(pred.dtype, copy=False)
if self.multioutput == 'uniform_average':
return xp.asarray(ret.mean()),
elif self.multioutput == 'raw_values':
return ret,
def r2_score(pred, true, sample_weight=None, multioutput='uniform_average'):
"""Computes R^2(coefficient of determination) regression score function.
Args:
pred (:class:`~chainer.Variable` or :ref:`ndarray`): Variable holding a
vector, matrix or tensor of estimated target values.
true (:class:`~chainer.Variable` or :ref:`ndarray`): Variable holding a
vector, matrix or tensor of correct target values.
sample_weight: This argument is for compatibility with scikit-learn's
implementation of r2_score. Current implementation admits None
only.
multioutput(string): ['uniform_average', 'raw_values']. if
'uniform_average', this function returns an average of R^2
score of multiple output. If 'raw_average', this function
return a set of R^2 score of multiple output.
Returns:
~chainer.Variable: A Variable holding a scalar array of the R^2 score
if 'multioutput' is 'uniform_average' or a vector of R^2 scores if
'multioutput' is 'raw_values'.
.. note:: This function is non-differentiable.
"""
return R2_score(sample_weight=sample_weight,
multioutput=multioutput)(pred, true)
| 2,745
| 37.138889
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/evaluation/accuracy.py
|
import numpy
import six
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
import chainerx
class Accuracy(function_node.FunctionNode):
def __init__(self, ignore_label=None):
self.ignore_label = ignore_label
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 't'))
x_type, t_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
t_type.dtype.kind == 'i'
)
t_ndim = type_check.eval(t_type.ndim)
type_check.expect(
x_type.ndim >= t_type.ndim,
x_type.shape[0] == t_type.shape[0],
x_type.shape[2: t_ndim + 1] == t_type.shape[1:]
)
for i in six.moves.range(t_ndim + 1, type_check.eval(x_type.ndim)):
type_check.expect(x_type.shape[i] == 1)
def forward_chainerx(self, inputs):
return self._forward(chainerx, inputs)
def forward_cpu(self, inputs):
return self._forward(numpy, inputs)
def forward_gpu(self, inputs):
return self._forward(cuda.cupy, inputs)
def _forward(self, xp, inputs):
y, t = inputs
if self.ignore_label is not None:
mask = (t == self.ignore_label)
ignore_cnt = mask.sum()
# will always be true when the true label is ignore_label
# TODO(henry0312)
# If cupy.where returns indexes, we could make the code better.
# Also, we would need Advanced Indexing.
pred = xp.where(mask, self.ignore_label,
y.argmax(axis=1).reshape(t.shape))
count = (pred == t).sum() - ignore_cnt
total = t.size - ignore_cnt
if xp is numpy:
# Avoid warning of `divide by zero`
if total == 0:
acc = xp.asarray(0.0, dtype=y.dtype)
else:
acc = xp.asarray(float(count) / total, dtype=y.dtype)
else:
acc = xp.where(total == 0,
xp.asarray(0.0, dtype=y.dtype),
xp.asarray(count / total, dtype=y.dtype))
else:
pred = y.argmax(axis=1).reshape(t.shape)
if xp is chainerx:
# TODO(niboshi): ChainerX mean() does not support dtype
# argument. Support it.
acc = xp.asarray((pred == t).astype(y.dtype, False).mean())
else:
acc = xp.asarray((pred == t).mean(dtype=y.dtype))
return acc,
def accuracy(y, t, ignore_label=None):
"""Computes multiclass classification accuracy of the minibatch.
Args:
y (:class:`~chainer.Variable` or :ref:`ndarray`):
Array whose (i, j, k, ...)-th element indicates the score of
the class j at the (i, k, ...)-th sample.
The prediction label :math:`\\hat t` is calculated by the formula
:math:`\\hat t(i, k, ...) = \\operatorname{\\mathrm{argmax}}_j \
y(i, j, k, ...)`.
t (:class:`~chainer.Variable` or :ref:`ndarray`):
Array of ground truth labels.
ignore_label (int or None): Skip calculating accuracy
if the true label is ``ignore_label``.
Returns:
~chainer.Variable: A variable holding a scalar array of the accuracy.
.. note:: This function is non-differentiable.
.. admonition:: Example
We show the most common case, when ``y`` is the two dimensional array.
>>> y = np.array([[0.1, 0.7, 0.2], # prediction label is 1
... [8.0, 1.0, 2.0], # prediction label is 0
... [-8.0, 1.0, 2.0], # prediction label is 2
... [-8.0, -1.0, -2.0]]) # prediction label is 1
>>> t = np.array([1, 0, 2, 1], np.int32)
>>> F.accuracy(y, t).array \
# 100% accuracy because all samples are correct
array(1.)
>>> t = np.array([1, 0, 0, 0], np.int32)
>>> F.accuracy(y, t).array \
# 50% accuracy because 1st and 2nd samples are correct.
array(0.5)
>>> F.accuracy(y, t, ignore_label=0).array \
# 100% accuracy because of ignoring the 2nd, 3rd and 4th samples.
array(1.)
"""
acc, = Accuracy(ignore_label=ignore_label).apply((y, t))
return acc
| 4,360
| 34.745902
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/evaluation/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/functions/evaluation/binary_accuracy.py
|
from __future__ import division
from chainer import backend
from chainer import function
from chainer.utils import type_check
class BinaryAccuracy(function.Function):
ignore_label = -1
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 't'))
x_type, t_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
t_type.dtype.kind == 'i',
t_type.shape == x_type.shape,
)
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
y, t = inputs
# flatten
y = y.ravel()
t = t.ravel()
c = (y >= 0)
count = xp.maximum(1, (t != self.ignore_label).sum())
return xp.asarray((c == t).sum() / count, dtype=y.dtype),
def binary_accuracy(y, t):
"""Computes binary classification accuracy of the minibatch.
Args:
y (:class:`~chainer.Variable` or :ref:`ndarray`):
Array whose i-th element indicates the score of
positive at the i-th sample.
The prediction label :math:`\\hat t[i]` is ``1`` if
``y[i] >= 0``, otherwise ``0``.
t (:class:`~chainer.Variable` or :ref:`ndarray`):
Array holding a signed integer vector of ground truth labels.
If ``t[i] == 1``, it indicates that i-th sample is positive.
If ``t[i] == 0``, it indicates that i-th sample is negative.
If ``t[i] == -1``, corresponding ``y[i]`` is ignored.
Accuracy is zero if all ground truth labels are ``-1``.
Returns:
~chainer.Variable: A variable holding a scalar array of the accuracy.
.. note:: This function is non-differentiable.
.. admonition:: Example
We show the most common case, when ``y`` is the two dimensional array.
>>> y = np.array([[-2.0, 0.0], # prediction labels are [0, 1]
... [3.0, -5.0]]) # prediction labels are [1, 0]
>>> t = np.array([[0, 1],
... [1, 0]], np.int32)
>>> F.binary_accuracy(y, t).array \
# 100% accuracy because all samples are correct.
array(1.)
>>> t = np.array([[0, 0],
... [1, 1]], np.int32)
>>> F.binary_accuracy(y, t).array \
# 50% accuracy because y[0][0] and y[1][0] are correct.
array(0.5)
>>> t = np.array([[0, -1],
... [1, -1]], np.int32)
>>> F.binary_accuracy(y, t).array \
# 100% accuracy because of ignoring y[0][1] and y[1][1].
array(1.)
"""
return BinaryAccuracy()(y, t)
| 2,597
| 32.307692
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/evaluation/classification_summary.py
|
from __future__ import division
import six
import chainer
from chainer import backend
from chainer import function
from chainer.utils import type_check
def _fbeta_score(precision, recall, beta):
beta_square = beta * beta
return (
(1 + beta_square) * precision * recall
/ (beta_square * precision + recall)
).astype(precision.dtype, copy=False)
class ClassificationSummary(function.Function):
def __init__(self, label_num, beta, ignore_label):
self.label_num = label_num
self.beta = beta
self.ignore_label = ignore_label
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 't'))
x_type, t_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
t_type.dtype.kind == 'i'
)
t_ndim = type_check.eval(t_type.ndim)
type_check.expect(
x_type.ndim >= t_type.ndim,
x_type.shape[0] == t_type.shape[0],
x_type.shape[2: t_ndim + 1] == t_type.shape[1:]
)
for i in six.moves.range(t_ndim + 1, type_check.eval(x_type.ndim)):
type_check.expect(x_type.shape[i] == 1)
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
y, t = inputs
# numpy.bincount requires int32 on Windows
t = t.astype(xp.int32, copy=False)
if self.label_num is None:
label_num = xp.amax(t) + 1
else:
label_num = self.label_num
if chainer.is_debug():
assert (t < label_num).all()
mask = (t == self.ignore_label).ravel()
pred = xp.where(mask, label_num, y.argmax(axis=1).ravel())
true = xp.where(mask, label_num, t.ravel())
support = xp.bincount(true, minlength=label_num + 1)[:label_num]
relevant = xp.bincount(pred, minlength=label_num + 1)[:label_num]
tp_mask = xp.where(pred == true, true, label_num)
tp = xp.bincount(tp_mask, minlength=label_num + 1)[:label_num]
precision = tp / relevant
recall = tp / support
fbeta = _fbeta_score(precision, recall, self.beta)
return precision, recall, fbeta, support
def classification_summary(y, t, label_num=None, beta=1.0, ignore_label=-1):
"""Calculates Precision, Recall, F beta Score, and support.
This function calculates the following quantities for each class.
- Precision: :math:`\\frac{\\mathrm{tp}}{\\mathrm{tp} + \\mathrm{fp}}`
- Recall: :math:`\\frac{\\mathrm{tp}}{\\mathrm{tp} + \\mathrm{fn}}`
- F beta Score: The weighted harmonic average of Precision and Recall.
- Support: The number of instances of each ground truth label.
Here, ``tp``, ``fp``, ``tn``, and ``fn`` stand for the number of true
positives, false positives, true negatives, and false negatives,
respectively.
``label_num`` specifies the number of classes, that is,
each value in ``t`` must be an integer in the range of
``[0, label_num)``.
If ``label_num`` is ``None``, this function regards
``label_num`` as a maximum of in ``t`` plus one.
``ignore_label`` determines which instances should be ignored.
Specifically, instances with the given label are not taken
into account for calculating the above quantities.
By default, it is set to -1 so that all instances are taken
into consideration, as labels are supposed to be non-negative integers.
Setting ``ignore_label`` to a non-negative integer less than ``label_num``
is illegal and yields undefined behavior. In the current implementation,
it arises ``RuntimeWarning`` and ``ignore_label``-th entries in output
arrays do not contain correct quantities.
Args:
y (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable holding a vector of scores.
t (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable holding a vector of ground truth labels.
label_num (int): The number of classes.
beta (float): The parameter which determines the weight of
precision in the F-beta score.
ignore_label (int): Instances with this label are ignored.
Returns:
4-tuple of ~chainer.Variable of size ``(label_num,)``.
Each element represents precision, recall, F beta score,
and support of this minibatch.
"""
return ClassificationSummary(label_num, beta, ignore_label)(y, t)
def precision(y, t, label_num=None, ignore_label=-1):
ret = ClassificationSummary(label_num, 1.0, ignore_label)(y, t)
return ret[0], ret[-1]
def recall(y, t, label_num=None, ignore_label=-1):
ret = ClassificationSummary(label_num, 1.0, ignore_label)(y, t)
return ret[1], ret[-1]
def fbeta_score(y, t, label_num=None, beta=1.0, ignore_label=-1):
ret = ClassificationSummary(label_num, beta, ignore_label)(y, t)
return ret[2], ret[-1]
def f1_score(y, t, label_num=None, ignore_label=-1):
ret = ClassificationSummary(label_num, 1.0, ignore_label)(y, t)
return ret[2], ret[-1]
| 5,064
| 35.438849
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/pooling/roi_pooling_2d.py
|
# Modified work:
# -----------------------------------------------------------------------------
# Copyright (c) 2015 Preferred Infrastructure, Inc.
# Copyright (c) 2015 Preferred Networks, Inc.
# -----------------------------------------------------------------------------
# Original work of _roi_pooling_slice, forward_cpu and backward_cpu:
# -----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# Original work of forward_gpu and backward_gpu:
# -----------------------------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see fast-rcnn/LICENSE for details]
# Written by Ross Girshick
# -----------------------------------------------------------------------------
import numpy
import six
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
def _roi_pooling_slice(size, stride, max_size, roi_offset):
start = int(numpy.floor(size * stride))
end = int(numpy.ceil((size + 1) * stride))
start = min(max(start + roi_offset, 0), max_size)
end = min(max(end + roi_offset, 0), max_size)
return slice(start, end), end - start
class ROIPooling2D(function_node.FunctionNode):
"""RoI pooling over a set of 2d planes."""
def __init__(self, outh, outw, spatial_scale):
self.outh, self.outw = outh, outw
self.spatial_scale = spatial_scale
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, roi_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4,
x_type.dtype == roi_type.dtype,
roi_type.ndim == 2,
roi_type.shape[1] == 5,
)
def forward_cpu(self, inputs):
self.retain_inputs((1,))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois = inputs
channels, height, width = bottom_data.shape[1:]
n_rois = bottom_rois.shape[0]
# `numpy.zeros` needs to be used because the arrays can be
# returned without having some of its values updated.
top_data = numpy.zeros((n_rois, channels, self.outh, self.outw),
dtype=bottom_data.dtype)
self.argmax_data = numpy.zeros(top_data.shape, numpy.int32)
for i_roi in six.moves.range(n_rois):
idx, xmin, ymin, xmax, ymax = bottom_rois[i_roi]
xmin = int(round(xmin * self.spatial_scale))
xmax = int(round(xmax * self.spatial_scale))
ymin = int(round(ymin * self.spatial_scale))
ymax = int(round(ymax * self.spatial_scale))
roi_width = max(xmax - xmin + 1, 1)
roi_height = max(ymax - ymin + 1, 1)
strideh = 1. * roi_height / self.outh
stridew = 1. * roi_width / self.outw
for outh in six.moves.range(self.outh):
sliceh, lenh = _roi_pooling_slice(
outh, strideh, height, ymin)
if sliceh.stop <= sliceh.start:
continue
for outw in six.moves.range(self.outw):
slicew, lenw = _roi_pooling_slice(
outw, stridew, width, xmin)
if slicew.stop <= slicew.start:
continue
roi_data = bottom_data[int(idx), :, sliceh, slicew]\
.reshape(channels, -1)
top_data[i_roi, :, outh, outw] =\
numpy.max(roi_data, axis=1)
# get the max idx respect to feature_maps coordinates
max_idx_slice = numpy.unravel_index(
numpy.argmax(roi_data, axis=1), (lenh, lenw))
max_idx_slice_h = max_idx_slice[0] + sliceh.start
max_idx_slice_w = max_idx_slice[1] + slicew.start
max_idx_slice = max_idx_slice_h * width + max_idx_slice_w
self.argmax_data[i_roi, :, outh, outw] = max_idx_slice
return top_data,
def forward_gpu(self, inputs):
self.retain_inputs((1,))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois = inputs
channels, height, width = bottom_data.shape[1:]
n_rois = bottom_rois.shape[0]
top_data = cuda.cupy.empty((n_rois, channels, self.outh,
self.outw), dtype=bottom_data.dtype)
self.argmax_data = cuda.cupy.empty(top_data.shape, numpy.int32)
cuda.elementwise(
'''
raw T bottom_data, T spatial_scale, int32 channels,
int32 height, int32 width, int32 pooled_height, int32 pooled_width,
raw T bottom_rois
''',
'T top_data, int32 argmax_data',
'''
// pos in output filter
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int num = i / pooled_width / pooled_height / channels;
int roi_batch_ind = bottom_rois[num * 5 + 0];
int roi_start_w = round(bottom_rois[num * 5 + 1] * spatial_scale);
int roi_start_h = round(bottom_rois[num * 5 + 2] * spatial_scale);
int roi_end_w = round(bottom_rois[num * 5 + 3] * spatial_scale);
int roi_end_h = round(bottom_rois[num * 5 + 4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
float bin_size_h = static_cast<float>(roi_height)
/ static_cast<float>(pooled_height);
float bin_size_w = static_cast<float>(roi_width)
/ static_cast<float>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<float>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<float>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<float>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<float>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
float maxval = is_empty ? 0 : -1E+37;
// If nothing is pooled, argmax=-1 causes nothing to be backprop'd
int maxidx = -1;
int data_offset = (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[data_offset + bottom_index] > maxval) {
maxval = bottom_data[data_offset + bottom_index];
maxidx = bottom_index;
}
}
}
top_data = maxval;
argmax_data = maxidx;
''', 'roi_pooling_2d_fwd'
)(bottom_data, self.spatial_scale, channels, height, width,
self.outh, self.outw, bottom_rois, top_data,
self.argmax_data)
return top_data,
def backward(self, indexes, grad_outputs):
bottom_rois, = self.get_retained_inputs()
gtop_data, = grad_outputs
f = ROIPooling2DGrad(self.outh, self.outw, self.spatial_scale,
self._bottom_data_shape, self.argmax_data)
return f.apply((bottom_rois, gtop_data))
class ROIPooling2DGrad(function_node.FunctionNode):
def __init__(self, outh, outw, spatial_scale, bottom_data_shape,
argmax_data):
self.outh, self.outw = outh, outw
self.spatial_scale = spatial_scale
self._bottom_data_shape = bottom_data_shape
self.argmax_data = argmax_data
def forward_cpu(self, inputs):
bottom_rois, gtop_data = inputs
channels, height, width = self._bottom_data_shape[1:]
n_rois = bottom_rois.shape[0]
bottom_delta = numpy.zeros(self._bottom_data_shape, bottom_rois.dtype)
for i_roi in six.moves.range(n_rois):
idx, xmin, ymin, xmax, ymax = bottom_rois[i_roi]
idx = int(idx)
xmin = int(round(xmin * self.spatial_scale))
xmax = int(round(xmax * self.spatial_scale))
ymin = int(round(ymin * self.spatial_scale))
ymax = int(round(ymax * self.spatial_scale))
roi_width = max(xmax - xmin + 1, 1)
roi_height = max(ymax - ymin + 1, 1)
strideh = float(roi_height) / float(self.outh)
stridew = float(roi_width) / float(self.outw)
# iterate all the w, h (from feature map) that fall into this ROIs
for w in six.moves.range(xmin, xmax + 1):
for h in six.moves.range(ymin, ymax + 1):
phstart = int(numpy.floor(float(h - ymin) / strideh))
phend = int(numpy.ceil(float(h - ymin + 1) / strideh))
pwstart = int(numpy.floor(float(w - xmin) / stridew))
pwend = int(numpy.ceil(float(w - xmin + 1) / stridew))
phstart = min(max(phstart, 0), self.outh)
phend = min(max(phend, 0), self.outh)
pwstart = min(max(pwstart, 0), self.outw)
pwend = min(max(pwend, 0), self.outw)
for ph in six.moves.range(phstart, phend):
for pw in six.moves.range(pwstart, pwend):
max_idx_tmp = self.argmax_data[i_roi, :, ph, pw]
for c in six.moves.range(channels):
if max_idx_tmp[c] == (h * width + w):
bottom_delta[idx, c, h, w] += \
gtop_data[i_roi, c, ph, pw]
return bottom_delta, None
def forward_gpu(self, inputs):
bottom_rois, gtop_data = inputs
channels, height, width = self._bottom_data_shape[1:]
bottom_diff = cuda.cupy.zeros(
self._bottom_data_shape, bottom_rois.dtype)
cuda.elementwise(
'''
raw T top_diff, raw int32 argmax_data, int32 num_rois,
T spatial_scale, int32 channels, int32 height, int32 width,
int32 pooled_height, int32 pooled_width, raw T bottom_rois
''',
'T bottom_diff',
'''
int w = i % width;
int h = (i / width) % height;
int c = (i / (width * height)) % channels;
int num = i / (width * height * channels);
float gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
// Skip if ROI's batch index doesn't match num
if (num != static_cast<int>(bottom_rois[roi_n * 5])) {
continue;
}
int roi_start_w = round(bottom_rois[roi_n * 5 + 1]
* spatial_scale);
int roi_start_h = round(bottom_rois[roi_n * 5 + 2]
* spatial_scale);
int roi_end_w = round(bottom_rois[roi_n * 5 + 3]
* spatial_scale);
int roi_end_h = round(bottom_rois[roi_n * 5 + 4]
* spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height
* pooled_width;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
float bin_size_h = static_cast<float>(roi_height)
/ static_cast<float>(pooled_height);
float bin_size_w = static_cast<float>(roi_width)
/ static_cast<float>(pooled_width);
int phstart = floor(static_cast<float>(h - roi_start_h)
/ bin_size_h);
int phend = ceil(static_cast<float>(h - roi_start_h + 1)
/ bin_size_h);
int pwstart = floor(static_cast<float>(w - roi_start_w)
/ bin_size_w);
int pwend = ceil(static_cast<float>(w - roi_start_w + 1)
/ bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int index_ = ph * pooled_width + pw + offset;
if (argmax_data[index_] == (h * width + w)) {
gradient += top_diff[index_];
}
}
}
}
bottom_diff = gradient;
''', 'roi_pooling_2d_bwd'
)(gtop_data, self.argmax_data, bottom_rois.shape[0],
self.spatial_scale, channels, height, width, self.outh, self.outw,
bottom_rois, bottom_diff)
return bottom_diff, None
def backward(self, indexes, grad_outputs):
# No trivial way to implement double-backward for this function.
raise NotImplementedError
def roi_pooling_2d(x, rois, outh, outw, spatial_scale):
"""Spatial Region of Interest (ROI) pooling function.
This function acts similarly to :func:`~chainer.functions.max_pooling_2d`,
but it computes the maximum of input spatial patch for each channel with
the region of interest.
Args:
x (~chainer.Variable): Input variable. The shape is expected to be
4 dimensional: (n: batch, c: channel, h, height, w: width).
rois (~chainer.Variable): Input roi variable. The shape is expected to
be (n: data size, 5), and each datum is set as below:
(batch_index, x_min, y_min, x_max, y_max).
outh (int): Height of output image after pooled.
outw (int): Width of output image after pooled.
spatial_scale (float): Scale of the roi is resized.
Returns:
~chainer.Variable: Output variable.
See the original paper proposing ROIPooling:
`Fast R-CNN <https://arxiv.org/abs/1504.08083>`_.
"""
return ROIPooling2D(outh, outw, spatial_scale).apply((x, rois))[0]
| 16,408
| 43.110215
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/pooling/pooling_nd_kernel.py
|
from six import moves
from chainer.backends import cuda
from chainer.utils import conv_nd_kernel
class PoolingNDKernelForward(object):
def name(self):
raise NotImplementedError()
def in_params(self):
return []
def out_params(self):
return []
def before(self):
raise NotImplementedError()
def main(self, offset, xs):
raise NotImplementedError()
def after(self, out_xs):
raise NotImplementedError()
@classmethod
@cuda.memoize()
def generate(klass, ndim):
return klass()._generate(ndim)
def _generate(self, ndim):
self.ndim = ndim
self.ds = conv_nd_kernel.vars('d', ndim)
self.outs = conv_nd_kernel.vars('out', ndim)
self.ks = conv_nd_kernel.vars('k', ndim)
self.ss = conv_nd_kernel.vars('s', ndim)
self.ps = conv_nd_kernel.vars('p', ndim)
in_params = self._in_params()
out_params = self._out_params()
operation = self._operation()
name = '{}_pool_{}d_fwd'.format(self.name(), self.ndim)
return in_params, out_params, operation, name
def _in_params(self):
# 2D: raw T in, int32 d_0, int32 d_1, int32 out_0, int32 out_1,
# int32 k_0, int32 k_1, int32 s_0, int32 s_1, int32 p_0,
# int32 p_1, ...
def aux(x):
return 'int32 {}'.format(x)
in_params = self.in_params()
if type(in_params) is tuple:
raws = in_params[0]
in_params = in_params[1]
else:
raws = []
vars = self.ds + self.outs + self.ks + self.ss + self.ps
return ', '.join(
['raw T in'] + raws + conv_nd_kernel.map_(aux, vars) + in_params)
def _out_params(self):
# T out, ...
out_params = self.out_params()
return ', '.join(['T out'] + out_params)
def _compile_c0(self):
# 2D: int c0 = i / (out_0 * out_1);
return ['int c0 = i / ({});'.format(conv_nd_kernel.mulexp(self.outs))]
def _compile_out_x(self):
# 2D: int out_x_0 = i / (out_1) % out_0;
# int out_x_1 = i % out_1;
def aux(out_x, outs):
head = outs[0]
tail = outs[1:]
if tail:
return 'int {} = i / ({}) % {};'.format(
out_x, conv_nd_kernel.mulexp(tail), head)
else:
return 'int {} = i % {};'.format(out_x, head)
out_xs = conv_nd_kernel.vars('out_x', self.ndim)
out_xs_decls = conv_nd_kernel.map_(
aux, out_xs, conv_nd_kernel.succ_sublists(self.outs))
return out_xs_decls, out_xs
def _compile_loop(self, out_xs):
# 2D: int in_x0_0 = max(0, out_x_0 * s_0 - p_0);
# int in_x1_0 = min(d_0, out_x_0 * s_0 + k_0 - p_0);
# int in_x0_1 = max(0, out_x_1 * s_1 - p_1);
# int in_x1_1 = min(d_1, out_x_1 * s_1 + k_1 - p_1);
# ... Before-part here ...
# for (int x_0 = in_x0_0; x_0 < in_x1_0; ++x_0) {
# int offset_0 = d_1 * (x_0 + d_0 * c0);
# for (int x_1 = in_x0_1; x_1 < in_x1_1; ++x_1) {
# int offset_1 = 1 * (x_1 + offset_0);
# ... Main-part here ...
# }
# }
# ... After-part here ...
def aux(in_x0, in_x1, d, out, k, s, p):
return [
'int {} = max(0, {} * {} - {});'.format(in_x0, out, s, p),
'int {} = min({}, {} * {} + {} - {});'.format(
in_x1, d, out, s, k, p)]
in_x0s = conv_nd_kernel.vars('in_x0', self.ndim)
in_x1s = conv_nd_kernel.vars('in_x1', self.ndim)
bounds = sum(conv_nd_kernel.map_(
aux, in_x0s, in_x1s, self.ds, out_xs, self.ks, self.ss, self.ps
), [])
def _loop_main(main):
w = conv_nd_kernel.Writer()
# Loop openings.
xs = conv_nd_kernel.vars('x', self.ndim)
offsets = conv_nd_kernel.vars('offset', self.ndim)
ds1 = self.ds[1:] + [1]
offsets1 = ['d_0 * c0'] + offsets[:-1]
for x, in_x0, in_x1, offset, offset1, d1 in moves.zip(
xs, in_x0s, in_x1s, offsets, offsets1, ds1):
w.write('for (int {} = {}; {} < {}; ++{}) {{'.format(
x, in_x0, x, in_x1, x), 'inc')
w.write(
'int {} = {} * ({} + {});'.format(offset, d1, x, offset1))
# Write main-part.
offset = offsets[-1]
for l in main(offset, xs).split('\n'):
w.write(l)
# Loop closings.
for _ in xs:
w.write('}', 'dec')
return [w.get()]
return bounds, _loop_main
def _compile_procedure(self, out_xs):
def _main(offset, xs):
return self.main(offset, xs)
before = [self.before()]
after = [self.after(out_xs)]
return before, _main, after
def _operation(self):
c0 = self._compile_c0()
out_x, out_xs = self._compile_out_x()
loop_bounds, loop_main = self._compile_loop(out_xs)
before, main, after = self._compile_procedure(out_xs)
return '\n'.join(
c0 + out_x + loop_bounds + before + loop_main(main) + after)
class PoolingNDKernelBackward(object):
def name(self):
raise NotImplementedError()
def in_params(self):
return []
def out_params(self):
return []
def before(self):
raise NotImplementedError()
def main(self, offset, xs, out_xs):
raise NotImplementedError()
def after(self, xs):
raise NotImplementedError()
@classmethod
@cuda.memoize()
def generate(klass, ndim):
return klass()._generate(ndim)
def _generate(self, ndim):
self.ndim = ndim
self.ds = conv_nd_kernel.vars('d', ndim)
self.outs = conv_nd_kernel.vars('out', ndim)
self.ks = conv_nd_kernel.vars('k', ndim)
self.ss = conv_nd_kernel.vars('s', ndim)
self.ps = conv_nd_kernel.vars('p', ndim)
in_params = self._in_params()
out_params = self._out_params()
operation = self._operation()
name = '{}_pool_{}d_bwd'.format(self.name(), self.ndim)
return in_params, out_params, operation, name
def _in_params(self):
# 2D: raw T gy, int32 d_0, int32 d_1, int32 out_0, int32 out_1,
# int32 k_0, int32 k_1, int32 s_0, int32 s_1, int32 p_0,
# int32 p_1, ...
def aux(x):
return 'int32 {}'.format(x)
in_params = self.in_params()
if type(in_params) is tuple:
raws = in_params[0]
in_params = in_params[1]
else:
raws = []
vars = self.ds + self.outs + self.ks + self.ss + self.ps
return ', '.join(
['raw T gy'] + raws + conv_nd_kernel.map_(aux, vars) + in_params)
def _out_params(self):
# T gx, ...
out_params = self.out_params()
return ', '.join(['T gx'] + out_params)
def _compile_c0(self):
# 2D: int c0 = i / (d_0 * d_1);
return ['int c0 = i / ({});'.format(conv_nd_kernel.mulexp(self.ds))]
def _compile_x(self):
# 2D: int x_0 = i / (d_1) % d_0 + p_0;
# int x_1 = i % d_1 + p_1;
def aux(x, ds, p):
head = ds[0]
tail = ds[1:]
if tail:
return 'int {} = i / ({}) % {} + {};'.format(
x, conv_nd_kernel.mulexp(tail), head, p)
else:
return 'int {} = i % {} + {};'.format(x, head, p)
xs = conv_nd_kernel.vars('x', self.ndim)
xs_decls = conv_nd_kernel.map_(
aux, xs, conv_nd_kernel.succ_sublists(self.ds), self.ps)
return xs_decls, xs
def _compile_loop(self, xs):
# 2D: int out_x0_0 = max(0, (x_0 - k_0 + s_0) / s_0);
# int out_x1_0 = min(out_0, (x_0 + s_0) / s_0);
# int out_x0_1 = max(0, (x_1 - k_1 + s_1) / s_1);
# int out_x1_1 = min(out_1, (x_1 + s_1) / s_1);
# ... Before-part here ...
# for (int out_x_0 = out_x0_0; out_x_0 < out_x1_0; ++out_x_0) {
# int offset_0 = out_1 * (out_x_0 + out_0 * c0);
# for (int out_x_1 = out_x0_1; out_x_1 < out_x1_1; ++out_x_1) {
# int offset_1 = 1 * (out_x_1 + offset_0);
# ... Main-part here ...
# }
# }
# ... After-part here ...
def aux(out_x0, out_x1, x, out, k, s):
return [
'int {} = max(0, ({} - {} + {}) / {});'.format(
out_x0, x, k, s, s),
'int {} = min({}, ({} + {}) / {});'.format(
out_x1, out, x, s, s)]
out_x0s = conv_nd_kernel.vars('out_x0', self.ndim)
out_x1s = conv_nd_kernel.vars('out_x1', self.ndim)
bounds = sum(conv_nd_kernel.map_(
aux, out_x0s, out_x1s, xs, self.outs, self.ks, self.ss), [])
def _loop_main(main):
w = conv_nd_kernel.Writer()
# Loop openings.
out_xs = conv_nd_kernel.vars('out_x', self.ndim)
offsets = conv_nd_kernel.vars('offset', self.ndim)
outs1 = self.outs[1:] + [1]
offsets1 = ['out_0 * c0'] + offsets[:-1]
for out_x, out_x0, out_x1, offset, offset1, out1 in moves.zip(
out_xs, out_x0s, out_x1s, offsets, offsets1, outs1):
w.write('for (int {} = {}; {} < {}; ++{}) {{'.format(
out_x, out_x0, out_x, out_x1, out_x), 'inc')
w.write('int {} = {} * ({} + {});'.format(
offset, out1, out_x, offset1))
# Write main-part.
offset = offsets[-1]
for l in main(offset, xs, out_xs).split('\n'):
w.write(l)
# Loop closings.
for _ in out_xs:
w.write('}', 'dec')
return [w.get()]
return bounds, _loop_main
def _compile_procedure(self, xs):
def _main(offset, xs, out_xs):
return self.main(offset, xs, out_xs)
before = [self.before()]
after = [self.after(xs)]
return before, _main, after
def _operation(self):
c0 = self._compile_c0()
x, xs = self._compile_x()
loop_bounds, loop_main = self._compile_loop(xs)
before, main, after = self._compile_procedure(xs)
return '\n'.join(
c0 + x + loop_bounds + before + loop_main(main) + after)
| 10,653
| 34.161716
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/pooling/spatial_pyramid_pooling_2d.py
|
import math
import six
import chainer
def spatial_pyramid_pooling_2d(x, pyramid_height, pooling=None):
"""Spatial pyramid pooling function.
It outputs a fixed-length vector regardless of input feature map size.
It performs pooling operation to the input 4D-array ``x`` with different
kernel sizes and padding sizes, and then flattens all dimensions except
first dimension of all pooling results, and finally concatenates them along
second dimension.
At :math:`i`-th pyramid level, the kernel size
:math:`(k_h^{(i)}, k_w^{(i)})` and padding size
:math:`(p_h^{(i)}, p_w^{(i)})` of pooling operation are calculated as
below:
.. math::
k_h^{(i)} &= \\lceil b_h / 2^i \\rceil, \\\\
k_w^{(i)} &= \\lceil b_w / 2^i \\rceil, \\\\
p_h^{(i)} &= (2^i k_h^{(i)} - b_h) / 2, \\\\
p_w^{(i)} &= (2^i k_w^{(i)} - b_w) / 2,
where :math:`\\lceil \\cdot \\rceil` denotes the ceiling function, and
:math:`b_h, b_w` are height and width of input variable ``x``,
respectively. Note that index of pyramid level :math:`i` is zero-based.
See detail in paper: `Spatial Pyramid Pooling in Deep Convolutional
Networks for Visual Recognition
<https://arxiv.org/abs/1406.4729>`_.
Args:
x (~chainer.Variable): Input variable. The shape of ``x`` should be
``(batchsize, # of channels, height, width)``.
pyramid_height (int): Number of pyramid levels
pooling (str):
Currently, only ``max`` is supported, which performs a 2d max
pooling operation.
Returns:
~chainer.Variable: Output variable. The shape of the output variable
will be :math:`(batchsize, c \\sum_{h=0}^{H-1} 2^{2h}, 1, 1)`,
where :math:`c` is the number of channels of input variable ``x``
and :math:`H` is the number of pyramid levels.
"""
bottom_c, bottom_h, bottom_w = x.shape[1:]
ys = []
# create pooling functions for different pyramid levels and apply it
for pyramid_level in six.moves.range(pyramid_height):
n_bins = int(2 ** pyramid_level)
ksize_h = int(math.ceil(bottom_h / (float(n_bins))))
remainder_h = ksize_h * n_bins - bottom_h
pad_h = remainder_h // 2
ksize_w = int(math.ceil(bottom_w / (float(n_bins))))
remainder_w = ksize_w * n_bins - bottom_w
pad_w = remainder_w // 2
ksize = (ksize_h, ksize_w)
pad = (pad_h, pad_w)
if pooling != 'max':
raise ValueError('Unsupported pooling operation: ', pooling)
y_var = chainer.functions.max_pooling_2d(
x, ksize=ksize, stride=None, pad=pad, cover_all=True)
n, c, h, w = y_var.shape
ys.append(y_var.reshape((n, c * h * w, 1, 1)))
return chainer.functions.concat(ys)
| 2,825
| 34.772152
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/pooling/roi_max_align_2d.py
|
# Modified work:
# -----------------------------------------------------------------------------
# Copyright (c) 2018 Preferred Infrastructure, Inc.
# Copyright (c) 2018 Preferred Networks, Inc.
# -----------------------------------------------------------------------------
# Original work:
# -----------------------------------------------------------------------------
# Copyright (c) 2015 by Contributors
# \file roi_pooling.cu
# \brief roi pooling operator
# \author Ross Girshick, Kye-Hyeon Kim, Jian Guo
# \changed to roi_align by Elaine Bao
# \file roi_align.cu
# \roi align operator described in Mask RCNN
# -----------------------------------------------------------------------------
import numbers
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import function
from chainer.functions.pooling.roi_average_align_2d \
import _GET_BILINEAR_INTERP_KERNEL
from chainer.functions.pooling.roi_average_align_2d \
import _get_bilinear_interp_params
from chainer.functions.pooling.roi_average_align_2d import _get_bounds
from chainer import utils
from chainer.utils import type_check
def _pair(x):
if isinstance(x, chainer.utils.collections_abc.Iterable):
return x
return x, x
class ROIMaxAlign2D(function.Function):
"""ROI max align over a set of 2d planes."""
def __init__(self, outsize, spatial_scale, sampling_ratio=None):
outh, outw = _pair(outsize)
if not (isinstance(outh, numbers.Integral) and outh > 0):
raise TypeError(
'outsize[0] must be positive integer: {}, {}'
.format(type(outh), outh))
if not (isinstance(outw, numbers.Integral) and outw > 0):
raise TypeError(
'outsize[1] must be positive integer: {}, {}'
.format(type(outw), outw))
if isinstance(spatial_scale, numbers.Integral):
spatial_scale = float(spatial_scale)
if not (isinstance(spatial_scale, numbers.Real) and
spatial_scale > 0):
raise TypeError(
'spatial_scale must be a positive float number: {}, {}'
.format(type(spatial_scale), spatial_scale))
sampling_ratio = _pair(sampling_ratio)
if not all((isinstance(s, numbers.Integral) and s >= 1) or
s is None for s in sampling_ratio):
raise TypeError(
'sampling_ratio must be integer >= 1 or a pair of it: {}'
.format(sampling_ratio))
self.outh, self.outw = outh, outw
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, roi_type, roi_index_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim == 4,
roi_type.dtype == numpy.float32,
roi_type.ndim == 2,
roi_type.shape[1] == 4,
roi_index_type.dtype == numpy.int32,
roi_index_type.ndim == 1,
roi_type.shape[0] == roi_index_type.shape[0],
)
def forward_cpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channels, height, width = bottom_data.shape[1:]
n_rois = bottom_rois.shape[0]
top_data = numpy.empty((n_rois, channels, self.outh,
self.outw), dtype=bottom_data.dtype)
self.argmax_data = numpy.empty(top_data.shape, numpy.int32)
pooled_width, pooled_height = self.outw, self.outh
spatial_scale = self.spatial_scale
for i in six.moves.range(top_data.size):
pw = i % pooled_width
ph = int(i / pooled_width) % pooled_height
c = int(i / pooled_width / pooled_height) % channels
n = int(i / pooled_width / pooled_height / channels)
roi_batch_ind = bottom_roi_indices[n]
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_width = max(roi_end_w - roi_start_w, 1.)
roi_height = max(roi_end_h - roi_start_h, 1.)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
if self.sampling_ratio[0] is None:
roi_bin_grid_h = int(numpy.ceil(roi_height / pooled_height))
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = int(numpy.ceil(roi_width / pooled_width))
else:
roi_bin_grid_w = self.sampling_ratio[1]
max_val = - numpy.inf
max_index = -1
for iy in six.moves.range(roi_bin_grid_h):
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
for ix in six.moves.range(roi_bin_grid_w):
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
# bilinear interpolation {{
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
tmp_val = 0.
if w1 > 0 and y_low >= 0 and x_low >= 0:
v1 = bottom_data[roi_batch_ind, c, y_low, x_low]
tmp_val += w1 * v1
if w2 > 0 and y_low >= 0 and x_high <= width - 1:
v2 = bottom_data[roi_batch_ind, c, y_low, x_high]
tmp_val += w2 * v2
if w3 > 0 and y_high <= height - 1 and x_low >= 0:
v3 = bottom_data[roi_batch_ind, c, y_high, x_low]
tmp_val += w3 * v3
if w4 > 0 and y_high <= height - 1 and x_high <= width - 1:
v4 = bottom_data[roi_batch_ind, c, y_high, x_high]
tmp_val += w4 * v4
tmp_index = iy * roi_bin_grid_w + ix
if tmp_val > max_val:
max_val = tmp_val
max_index = tmp_index
# }}
top_data[n, c, ph, pw] = max_val
self.argmax_data[n, c, ph, pw] = max_index
return top_data,
def forward_gpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channels, height, width = bottom_data.shape[1:]
n_rois = bottom_rois.shape[0]
top_data = cuda.cupy.empty((n_rois, channels, self.outh,
self.outw), dtype=bottom_data.dtype)
self.argmax_data = cuda.cupy.empty(top_data.shape, numpy.int32)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T bottom_data, T spatial_scale, int32 channels,
int32 height, int32 width, int32 pooled_height, int32 pooled_width,
int32 sampling_ratio_h, int32 sampling_ratio_w,
raw T bottom_rois, raw int32 bottom_roi_indices
''',
'T top_data, int32 argmax_data',
'''
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height)
/ static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width)
/ static_cast<T>(pooled_width);
int bottom_data_offset =
(roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
T max_val = - (T) (1.0 / 0.0);
int max_index = -1;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g. iy = 0, 1
{
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
int y_low, y_high;
bool y_ret = get_bounds(y, height, y_low, y_high);
if (!y_ret) continue;
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
int x_low, x_high;
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret) continue;
// bilinear_interpolation {{
T w1, w2, w3, w4;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
T tmp_val = 0.;
if (w1 > 0 && y_low >= 0 && x_low >= 0) {
T v1 = bottom_data[
bottom_data_offset + y_low * width + x_low];
tmp_val += w1 * v1;
}
if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {
T v2 = bottom_data[
bottom_data_offset + y_low * width + x_high];
tmp_val += w2 * v2;
}
if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {
T v3 = bottom_data[
bottom_data_offset + y_high * width + x_low];
tmp_val += w3 * v3;
}
if (w4 > 0 && y_high <= height - 1 &&
x_high <= width - 1) {
T v4 = bottom_data[
bottom_data_offset + y_high * width + x_high];
tmp_val += w4 * v4;
}
int tmp_index = iy * roi_bin_grid_w + ix;
if (tmp_val > max_val) {
max_val = tmp_val;
max_index = tmp_index;
}
// }}
}
}
top_data = max_val;
argmax_data = max_index;
''',
'roi_max_align_2d_fwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(bottom_data, self.spatial_scale, channels, height, width,
self.outh, self.outw, sampling_ratio_h, sampling_ratio_w,
bottom_rois, bottom_roi_indices, top_data, self.argmax_data)
return top_data,
def backward_cpu(self, inputs, gy):
bottom_rois, bottom_roi_indices = inputs[1:]
channels, height, width = self._bottom_data_shape[1:]
bottom_diff = numpy.zeros(self._bottom_data_shape, gy[0].dtype)
spatial_scale = self.spatial_scale
pooled_height = self.outh
pooled_width = self.outw
top_diff = gy[0]
for i in six.moves.range(top_diff.size):
pw = i % pooled_width
ph = int(i / pooled_width) % pooled_height
c = int(i / pooled_width / pooled_height) % channels
n = int(i / pooled_width / pooled_height / channels)
roi_batch_ind = bottom_roi_indices[n]
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 1.)
roi_width = max(roi_end_w - roi_start_w, 1.)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
top_diff_this_bin = top_diff[n, c, ph, pw]
max_index = self.argmax_data[n, c, ph, pw]
if max_index != -1:
if self.sampling_ratio[0] is None:
roi_bin_grid_h = numpy.ceil(roi_height / pooled_height)
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = numpy.ceil(roi_width / pooled_width)
else:
roi_bin_grid_w = self.sampling_ratio[1]
iy = int(max_index / roi_bin_grid_w)
ix = max_index % roi_bin_grid_w
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
# bilinear_interpolation_gradient {{
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
if w1 > 0 and y_low >= 0 and x_low >= 0:
g1 = top_diff_this_bin * w1
bottom_diff[roi_batch_ind, c, y_low, x_low] += g1
if w2 > 0 and y_low >= 0 and x_high <= width - 1:
g2 = top_diff_this_bin * w2
bottom_diff[roi_batch_ind, c, y_low, x_high] += g2
if w3 > 0 and y_high <= height - 1 and x_low >= 0:
g3 = top_diff_this_bin * w3
bottom_diff[roi_batch_ind, c, y_high, x_low] += g3
if w4 > 0 and y_high <= height - 1 and x_high <= width - 1:
g4 = top_diff_this_bin * w4
bottom_diff[roi_batch_ind, c, y_high, x_high] += g4
# }}
return bottom_diff, None, None
def backward_gpu(self, inputs, gy):
utils.nondeterministic('atomicAdd')
bottom_rois, bottom_roi_indices = inputs[1:]
channels, height, width = self._bottom_data_shape[1:]
bottom_diff = cuda.cupy.zeros(self._bottom_data_shape, gy[0].dtype)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T top_diff, T spatial_scale,
int32 channels, int32 height, int32 width,
int32 pooled_height, int32 pooled_width,
int32 sampling_ratio_h, int32 sampling_ratio_w,
raw T bottom_rois, raw int32 bottom_roi_indices
''',
'raw T bottom_diff, raw int32 argmax_data',
'''
// (n, c, h, w) coords in bottom data
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
// Do not using rounding; this implementation detail is critical
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) /
static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) /
static_cast<T>(pooled_width);
int bottom_diff_offset =
(roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
int max_index = argmax_data[top_offset + ph * pooled_width + pw];
if (max_index != -1) {
T top_diff_this_bin =
top_diff[top_offset + ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
int iy = max_index / roi_bin_grid_w;
int ix = max_index % roi_bin_grid_w;
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// bilinear_interpolation_gradient {{
int y_low, x_low, y_high, x_high;
T w1, w2, w3, w4;
bool y_ret = get_bounds(y, height, y_low, y_high);
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret || !y_ret) continue;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
if (w1 > 0 && y_low >= 0 && x_low >= 0) {
T g1 = top_diff_this_bin * w1;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_low * width + x_low], g1);
}
if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {
T g2 = top_diff_this_bin * w2;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_low * width + x_high], g2);
}
if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {
T g3 = top_diff_this_bin * w3;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_high * width + x_low], g3);
}
if (w4 > 0 && y_high <= height - 1 && x_high <= width - 1) {
T g4 = top_diff_this_bin * w4;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_high * width + x_high], g4);
}
}
// }}
''',
'roi_max_align_2d_bwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(gy[0], self.spatial_scale, channels, height, width,
self.outh, self.outw, sampling_ratio_h, sampling_ratio_w,
bottom_rois, bottom_roi_indices, bottom_diff, self.argmax_data,
size=gy[0].size)
return bottom_diff, None, None
def roi_max_align_2d(
x, rois, roi_indices, outsize, spatial_scale, sampling_ratio=None
):
"""Spatial Region of Interest (ROI) max align function.
This function acts similarly to
:func:`~chainer.functions.roi_max_pooling_2d`, but it computes maximum
of input spatial patch with bilinear interpolation for each channel with
the region of interest.
Args:
x (~chainer.Variable): Input variable. The shape is expected to be
4 dimensional: ``(n: batch, c: channel, h, height, w: width)``.
rois (~chainer.Variable): Input roi variable. The shape is expected to
be ``(n: data size, 4)``, and each datum is set as below:
``(y_min, x_min, y_max, x_max)``.
roi_indices (~chainer.Variable): Input roi variable. The shape is
expected to be ``(n: data size, )``.
outsize ((int, int) or int): Expected output size after pooled
(height, width). ``outsize=o`` and ``outsize=(o, o)``
are equivalent.
spatial_scale (float): Scale of the roi is resized.
sampling_ratio ((int, int) or int): Sampling step for the alignment.
It must be an integer over :math:`1` or :obj:`None`, and the value
is automatically decided when :obj:`None` is passed. Use of
different ratio in height and width axis is also supported by
passing tuple of int as ``(sampling_ratio_h, sampling_ratio_w)``.
``sampling_ratio=s`` and ``sampling_ratio=(s, s)`` are equivalent.
Returns:
~chainer.Variable: Output variable.
See the original paper proposing ROIAlign:
`Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.
"""
return ROIMaxAlign2D(outsize, spatial_scale, sampling_ratio)(
x, rois, roi_indices)
| 22,686
| 41.564728
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/pooling/roi_average_pooling_2d.py
|
# Modified work:
# -----------------------------------------------------------------------------
# Copyright (c) 2015 Preferred Infrastructure, Inc.
# Copyright (c) 2015 Preferred Networks, Inc.
# -----------------------------------------------------------------------------
# Original work of _roi_pooling_slice, forward_cpu and backward_cpu:
# -----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# Original work of forward_gpu and backward_gpu:
# -----------------------------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see fast-rcnn/LICENSE for details]
# Written by Ross Girshick
# -----------------------------------------------------------------------------
import numbers
import numpy
import six
from chainer.backends import cuda
from chainer import function
from chainer.functions.pooling.roi_pooling_2d import _roi_pooling_slice
from chainer import utils
from chainer.utils import collections_abc
from chainer.utils import type_check
def _pair(x):
if isinstance(x, collections_abc.Iterable):
return x
return x, x
class ROIAveragePooling2D(function.Function):
"""RoI average pooling over a set of 2d planes."""
def __init__(self, outsize, spatial_scale):
outh, outw = _pair(outsize)
if not (isinstance(outh, numbers.Integral) and outh > 0):
raise TypeError(
'outsize[0] must be positive integer: {}, {}'
.format(type(outh), outh))
if not (isinstance(outw, numbers.Integral) and outw > 0):
raise TypeError(
'outsize[1] must be positive integer: {}, {}'
.format(type(outw), outw))
if isinstance(spatial_scale, numbers.Integral):
spatial_scale = float(spatial_scale)
if not (isinstance(spatial_scale, numbers.Real) and
spatial_scale > 0):
raise TypeError(
'spatial_scale must be a positive float number: {}, {}'
.format(type(spatial_scale), spatial_scale))
self.outh, self.outw = outh, outw
self.spatial_scale = spatial_scale
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, roi_type, roi_index_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4,
x_type.dtype == roi_type.dtype,
roi_type.ndim == 2,
roi_type.shape[1] == 4,
roi_index_type.dtype == numpy.int32,
roi_index_type.ndim == 1,
roi_type.shape[0] == roi_index_type.shape[0],
)
def forward_cpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channels, height, width = bottom_data.shape[1:]
n_rois = bottom_rois.shape[0]
top_data = numpy.zeros((n_rois, channels, self.outh, self.outw),
dtype=bottom_data.dtype)
for i_roi in six.moves.range(n_rois):
idx = bottom_roi_indices[i_roi]
ymin, xmin, ymax, xmax = bottom_rois[i_roi]
ymin = int(round(ymin * self.spatial_scale))
xmin = int(round(xmin * self.spatial_scale))
ymax = int(round(ymax * self.spatial_scale))
xmax = int(round(xmax * self.spatial_scale))
roi_height = max(ymax - ymin, 1)
roi_width = max(xmax - xmin, 1)
strideh = 1. * roi_height / self.outh
stridew = 1. * roi_width / self.outw
for outh in six.moves.range(self.outh):
sliceh, lenh = _roi_pooling_slice(
outh, strideh, height, ymin)
if sliceh.stop <= sliceh.start:
continue
for outw in six.moves.range(self.outw):
slicew, lenw = _roi_pooling_slice(
outw, stridew, width, xmin)
if slicew.stop <= slicew.start:
continue
roi_data = bottom_data[int(idx), :, sliceh, slicew]\
.reshape(channels, -1)
top_data[i_roi, :, outh, outw] =\
numpy.average(roi_data, axis=1)
return top_data,
def forward_gpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channels, height, width = bottom_data.shape[1:]
n_rois = bottom_rois.shape[0]
top_data = cuda.cupy.empty((n_rois, channels, self.outh,
self.outw), dtype=bottom_data.dtype)
cuda.elementwise(
'''
raw T bottom_data, raw T bottom_rois, raw int32 bottom_roi_indices,
T spatial_scale, int32 channels, int32 height, int32 width,
int32 pooled_height, int32 pooled_width
''',
'T top_data',
'''
// pos in output filter
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
int roi_batch_ind = bottom_roi_indices[n];
int roi_start_h = round(bottom_rois[n * 4 + 0] * spatial_scale);
int roi_start_w = round(bottom_rois[n * 4 + 1] * spatial_scale);
int roi_end_h = round(bottom_rois[n * 4 + 2] * spatial_scale);
int roi_end_w = round(bottom_rois[n * 4 + 3] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_height = max(roi_end_h - roi_start_h, 1);
int roi_width = max(roi_end_w - roi_start_w, 1);
T bin_size_h = static_cast<T>(roi_height)
/ static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width)
/ static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
T sumval = 0.;
T count = (hend - hstart) * (wend - wstart);
int data_offset = (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
sumval += bottom_data[data_offset + bottom_index];
}
}
top_data = is_empty ? 0. : sumval / count;
''', 'roi_average_pooling_2d_fwd'
)(bottom_data, bottom_rois, bottom_roi_indices, self.spatial_scale,
channels, height, width, self.outh, self.outw, top_data)
return top_data,
def backward_cpu(self, inputs, gy):
bottom_rois, bottom_roi_indices = inputs[1:]
channels, height, width = self._bottom_data_shape[1:]
n_rois = bottom_rois.shape[0]
bottom_diff = numpy.zeros(self._bottom_data_shape, gy[0].dtype)
for i_roi in six.moves.range(n_rois):
idx = bottom_roi_indices[i_roi]
ymin, xmin, ymax, xmax = bottom_rois[i_roi]
ymin = int(round(ymin * self.spatial_scale))
xmin = int(round(xmin * self.spatial_scale))
ymax = int(round(ymax * self.spatial_scale))
xmax = int(round(xmax * self.spatial_scale))
roi_height = max(ymax - ymin, 1)
roi_width = max(xmax - xmin, 1)
strideh = 1. * roi_height / self.outh
stridew = 1. * roi_width / self.outw
for outh in six.moves.range(self.outh):
sliceh, lenh = _roi_pooling_slice(
outh, strideh, height, ymin)
if sliceh.stop <= sliceh.start:
continue
for outw in six.moves.range(self.outw):
slicew, lenw = _roi_pooling_slice(
outw, stridew, width, xmin)
if slicew.stop <= slicew.start:
continue
diff_val = gy[0][i_roi, :, outh, outw]\
.reshape(channels, 1, 1)
diff_val = diff_val / lenh / lenw
bottom_diff[int(idx), :, sliceh, slicew] \
+= diff_val
return bottom_diff, None, None
def backward_gpu(self, inputs, gy):
utils.nondeterministic('atomicAdd')
bottom_rois, bottom_roi_indices = inputs[1:]
channels, height, width = self._bottom_data_shape[1:]
bottom_diff = cuda.cupy.zeros(
self._bottom_data_shape, gy[0].dtype)
cuda.elementwise(
'''
raw T top_diff, raw T bottom_rois, raw int32 bottom_roi_indices,
T spatial_scale, int32 channels, int32 height, int32 width,
int32 pooled_height, int32 pooled_width
''',
'raw T bottom_diff',
'''
// pos in output filter
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
int roi_batch_ind = bottom_roi_indices[n];
int roi_start_h = round(bottom_rois[n * 4 + 0] * spatial_scale);
int roi_start_w = round(bottom_rois[n * 4 + 1] * spatial_scale);
int roi_end_h = round(bottom_rois[n * 4 + 2] * spatial_scale);
int roi_end_w = round(bottom_rois[n * 4 + 3] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_height = max(roi_end_h - roi_start_h, 1);
int roi_width = max(roi_end_w - roi_start_w, 1);
T bin_size_h = static_cast<T>(roi_height)
/ static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width)
/ static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int bottom_diff_offset =
(roi_batch_ind * channels + c) * height * width;
int top_offset =
(n * channels + c) * pooled_height * pooled_width;
T count = (hend - hstart) * (wend - wstart);
T diff_val = is_empty ? 0. :
top_diff[top_offset + ph * pooled_width + pw] / count;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
atomicAdd(
&bottom_diff[bottom_diff_offset + bottom_index],
diff_val);
}
}
''', 'roi_average_pooling_2d_bwd'
)(gy[0], bottom_rois, bottom_roi_indices, self.spatial_scale,
channels, height, width, self.outh, self.outw,
bottom_diff, size=gy[0].size)
return bottom_diff, None, None
def roi_average_pooling_2d(x, rois, roi_indices, outsize, spatial_scale):
"""Spatial Region of Interest (ROI) average pooling function.
This function acts similarly to
:func:`~chainer.functions.average_pooling_2d`, but it computes the average
of input spatial patch for each channel with the region of interest.
Args:
x (~chainer.Variable): Input variable. The shape is expected to be
4 dimensional: (n: batch, c: channel, h, height, w: width).
rois (~chainer.Variable): Input roi variable. The shape is expected to
be (n: data size, 4), and each datum is set as below:
(y_min, x_min, y_max, x_max).
roi_indices (~chainer.Variable): Input roi variable. The shape is
expected to be (n: data size, ).
outsize ((int, int) or int): Expected output size after pooled
(height, width). ``outsize=o`` and ``outsize=(o, o)``
are equivalent.
spatial_scale (float): Scale of the roi is resized.
Returns:
~chainer.Variable: Output variable.
See the original paper proposing ROIPooling:
`Fast R-CNN <https://arxiv.org/abs/1504.08083>`_.
"""
return ROIAveragePooling2D(outsize, spatial_scale)(x, rois, roi_indices)
| 14,751
| 42.774481
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/pooling/pooling_nd.py
|
import six
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import conv
from chainer.utils import conv_nd
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
class _PoolingND(function_node.FunctionNode):
"""Base class of pooling function over a set of N-dimensional planes."""
_cudnn_pool = None
def __init__(self, ndim, ksize, stride=None, pad=0, cover_all=True,
return_indices=False):
if stride is None:
stride = ksize
if ndim <= 0:
raise ValueError(
'pooling operation requires at least one spatial dimension.')
super(_PoolingND, self).__init__()
self.ndim = ndim
self.ksize = conv_nd.as_tuple(ksize, ndim)
self.stride = conv_nd.as_tuple(stride, ndim)
self.pad = conv_nd.as_tuple(pad, ndim)
self.cover_all = cover_all
self.return_indices = return_indices
@property
def is_cudnn_used(self):
return self._cudnn_pool is not None
def get_cudnn_pool_mode(self):
raise NotImplementedError()
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1,
in_types[0].dtype.kind == 'f',
in_types[0].ndim == 2 + self.ndim,
in_types[0].size > 0,
)
def forward_cudnn(self, x):
self._cudnn_pool = _CudnnPoolingNDImpl(self)
return self._cudnn_pool.forward(x)
def backward_cudnn(self, gy):
assert self._cudnn_pool is not None
return self._cudnn_pool.backward(gy)
class _CudnnPoolingNDImpl(object):
"""cuDNN pooling implementation"""
def __init__(self, func):
assert isinstance(func, _PoolingND)
self.func = func
def forward(self, x):
func = self.func
ksize = func.ksize
stride = func.stride
pad = func.pad
cover_all = func.cover_all
pool_mode = func.get_cudnn_pool_mode()
x = x[0]
n, c = x.shape[:2]
dims = x.shape[2:]
ys = tuple(conv.get_conv_outsize(d, k, s, p, cover_all)
for d, k, s, p in six.moves.zip(dims, ksize, stride, pad))
y_shape = (n, c) + ys
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
cudnn.pooling_forward(x, y, ksize, stride, pad, pool_mode)
func.retain_inputs((0,))
func.retain_outputs((0,))
return y,
def backward(self, gy):
func = self.func
ksize = func.ksize
stride = func.stride
pad = func.pad
pool_mode = func.get_cudnn_pool_mode()
x = func.get_retained_inputs()[0].array
y = func.get_retained_outputs()[0].array
gx = cudnn.pooling_backward(x, y, gy[0], ksize, stride, pad, pool_mode)
return gx,
| 2,849
| 26.669903
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/pooling/average_pooling_nd_kernel.py
|
from chainer.functions.pooling import pooling_nd_kernel
class AveragePoolingNDKernelForward(pooling_nd_kernel.PoolingNDKernelForward):
def name(self):
# avg_pool_{N}d_fwd
return 'avg'
def in_params(self):
# 2D: raw T in, int32 d_0, int32 d_1, int32 out_0, int32 out_1,
# int32 k_0, int32 k_1, int32 s_0, int32 s_1, int32 p_0,
# int32 p_1, T coeff
return ['T coeff']
def before(self):
return 'T val = 0;'
def main(self, offset, xs):
# 2D: val = val + in[offset_1];
return 'val = val + in[{}];'.format(offset)
def after(self, out_xs):
return 'out = val * coeff;'
class AveragePoolingNDKernelBackward(
pooling_nd_kernel.PoolingNDKernelBackward):
def name(self):
# avg_pool_{N}d_bwd
return 'avg'
def before(self):
return 'T val = 0;'
def main(self, offset, xs, out_xs):
# 2D: val = val + gy[offset_1];
return 'val = val + gy[{}];'.format(offset)
def after(self, xs):
return 'gx = val;'
| 1,075
| 24.023256
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/pooling/average_pooling_nd.py
|
import functools
import operator
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer.functions.pooling import average_pooling_nd_kernel
from chainer.functions.pooling import pooling_nd
from chainer.utils import conv
from chainer.utils import conv_nd
import chainerx
def _get_conv_slices(
size, k, s, p, cover_all=False, d=1, include_pad=True, dtype='l'):
"""Returns the patch slices.
Returns:
A tuple of two 1-D :class:`numpy.ndarrays`\\ s.
Each represents starting and ending indices of the patches.
"""
n = conv.get_conv_outsize(size, k, s, p, cover_all, d)
starts = -p + numpy.arange(n, dtype=dtype) * s
ends = starts + k
if not include_pad:
starts = numpy.maximum(starts, 0)
ends = numpy.minimum(ends, size)
return starts, ends
class AveragePoolingND(pooling_nd._PoolingND):
"""Average pooling over a set of N-dimensional planes.
.. warning::
This feature is experimental. The interface can change in the future.
"""
def __init__(
self, ndim, ksize, stride=None, pad=0, cover_all=False,
pad_value=0):
if not (pad_value is None or pad_value == 0):
raise ValueError(
'pad_value must be either 0 or None, not {}.'.format(
pad_value))
# TODO(takagi) Support cover_all mode.
if cover_all is True:
raise ValueError('`cover_all` mode is not supported yet.')
super(AveragePoolingND, self).__init__(
ndim, ksize, stride=stride, pad=pad, cover_all=cover_all)
self.pad_value = pad_value
def _get_pooling_width(self, xp, dims, dtype):
width = None
for d, k, s, p in six.moves.zip(
dims, self.ksize, self.stride, self.pad):
starts, ends = _get_conv_slices(
d, k, s, p, cover_all=self.cover_all, include_pad=False,
dtype=dtype)
w = ends - starts
if width is None:
width = w
else:
width = numpy.tensordot(width[..., None], w[None, ...], axes=1)
if xp is cuda.cupy:
width = cuda.cupy.array(width)
return width
def forward_chainerx(self, inputs):
ndim = self.ndim
ksize = self.ksize
stride = self.stride
pad = self.pad
pad_value = self.pad_value
x, = inputs
if x.device.backend.name == 'cuda' and ndim not in (2, 3):
return chainer.Fallback
if pad_value == 0:
pad_mode = 'zero'
elif pad_value is None:
pad_mode = 'ignore'
else:
assert False
y = chainerx.average_pool(x, ksize, stride, pad, pad_mode)
return y,
def forward_cpu(self, inputs):
ksize = self.ksize
stride = self.stride
pad = self.pad
pad_value = self.pad_value
cover_all = self.cover_all
x, = inputs
in_shape = x.shape
in_dtype = x.dtype
col = conv_nd.im2col_nd_cpu(x, ksize, stride, pad, cover_all=cover_all)
# mean along (_, _, k_1, k_2, ..., k_N, _, ..., _)
y_axis = tuple(six.moves.range(2, 2 + len(ksize)))
if pad_value is None:
dims = x.shape[2:]
width = self._get_pooling_width(numpy, dims, x.dtype)
y = col.sum(axis=y_axis) / width
else:
assert pad_value == 0
y = col.mean(axis=y_axis)
width = None
self.width = width
self._in_shape = in_shape
self._in_dtype = in_dtype
return y,
def forward_gpu(self, inputs):
if chainer.should_use_cudnn('>=auto') and 2 <= self.ndim <= 3:
# With cuDNN v3 or greater, use cuDNN implementation for inputs
# with spatial dimensions of two or more.
return self.forward_cudnn(inputs)
ndim = self.ndim
ksize = self.ksize
stride = self.stride
pad = self.pad
pad_value = self.pad_value
cover_all = self.cover_all
x, = inputs
in_shape = x.shape
in_dtype = x.dtype
n, c = in_shape[:2]
idims = in_shape[2:]
odims = tuple(
conv.get_conv_outsize(d, k, s, p, cover_all=cover_all)
for (d, k, s, p) in six.moves.zip(idims, ksize, stride, pad))
# (n, c, y_1, y_2, ..., y_N)
y_shape = (n, c) + odims
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
if pad_value is None:
coeff = self._get_pooling_width(cuda.cupy, idims, x.dtype)
coeff = cuda.cupy.reciprocal(coeff, out=coeff)
else:
assert pad_value == 0
coeff = 1. / functools.reduce(operator.mul, ksize)
in_params, out_params, operation, name = \
average_pooling_nd_kernel.AveragePoolingNDKernelForward.generate(
ndim)
cuda.elementwise(in_params, out_params, operation, name)(
x.reduced_view(),
*(idims + odims + ksize + stride + pad + (coeff, y)))
self.coeff = coeff
self._in_shape = in_shape
self._in_dtype = in_dtype
return y,
def backward(self, indexes, gy):
return AveragePoolingNDGrad(self).apply(gy)
def get_cudnn_pool_mode(self):
if self.pad_value is None:
return cuda.cuda.cudnn.CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING
else:
assert self.pad_value == 0
return cuda.cuda.cudnn.CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING
class AveragePoolingNDGrad(function_node.FunctionNode):
def __init__(self, apoolnd):
self.func = apoolnd
def forward_cpu(self, gys):
func = self.func
pad_value = func.pad_value
ksize = func.ksize
stride = func.stride
pad = func.pad
in_shape = func._in_shape
gy, = gys
idims = in_shape[2:]
odims = gy.shape[2:]
colon = slice(None, None, None)
is_pad_value_none = pad_value is None
if is_pad_value_none:
numpy.divide(gy, func.width, out=gy)
gy_index = (colon, colon) + (None,) * len(idims)
gcol_reps = (1, 1) + ksize + (1,) * len(odims)
gcol = numpy.tile(gy[gy_index], gcol_reps)
gx = conv_nd.col2im_nd_cpu(gcol, stride, pad, idims)
if not is_pad_value_none:
gx /= functools.reduce(operator.mul, ksize)
return gx,
def forward_gpu(self, gys):
func = self.func
if func.is_cudnn_used:
return func.backward_cudnn(gys)
ndim = func.ndim
pad_value = func.pad_value
ksize = func.ksize
stride = func.stride
pad = func.pad
in_shape = func._in_shape
in_dtype = func._in_dtype
is_pad_value_none = pad_value is None
gy, = gys
n, c = in_shape[:2]
idims = in_shape[2:]
odims = gy.shape[2:]
if is_pad_value_none:
# This conversion from chainerx to cupy exists here for
# double backward of chainerx on cuda.
coeff = backend.from_chx(func.coeff)
gy *= coeff
gx = cuda.cupy.empty(in_shape, in_dtype)
in_params, out_params, operation, name = \
average_pooling_nd_kernel.AveragePoolingNDKernelBackward.generate(
ndim)
cuda.elementwise(in_params, out_params, operation, name)(
gy.reduced_view(),
*(idims + odims + ksize + stride + pad + (gx,)))
if not is_pad_value_none:
gx /= functools.reduce(operator.mul, ksize)
return gx,
def backward(self, indexes, grad_outputs):
func = self.func
ndim = func.ndim
pad_value = func.pad_value
ksize = func.ksize
stride = func.stride
pad = func.pad
return AveragePoolingND(
ndim, ksize, stride, pad, cover_all=False, pad_value=pad_value
).apply(grad_outputs)
def average_pooling_nd(x, ksize, stride=None, pad=0, pad_value=0):
"""N-dimensionally spatial average pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
This function provides a N-dimensionally generalized version of
:func:`~chainer.functions.average_pooling_2d`. This acts similarly to
:func:`~chainer.functions.convolution_nd`, but it computes the average of
input spatial patch for each channel without any parameter instead of
computing the inner products.
Args:
x(~chainer.Variable): Input variable.
ksize (int or tuple of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent. If
``None`` is specified, then it uses same stride as the pooling
window size.
pad (int or tuple of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
pad_value (0 or None):
Value to fill the padded region when calculating average.
If ``None`` is specified, such region is ignored.
The default value is ``0``, therefore the averages are biased
towards zero.
Returns:
~chainer.Variable: Output variable.
.. note::
This function currently does not support ``cover_all`` mode as
:func:`max_pooling_nd`. Average pooling runs in non-cover-all mode.
"""
ndim = len(x.shape[2:])
return AveragePoolingND(
ndim, ksize, stride=stride, pad=pad, pad_value=pad_value
).apply((x,))[0]
def average_pooling_1d(x, ksize, stride=None, pad=0, pad_value=0):
"""1-dimensional spatial average pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
.. note::
This function calls :func:`~chainer.functions.average_pooling_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.average_pooling_nd`.
"""
if len(x.shape[2:]) != 1:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 1. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return average_pooling_nd(x, ksize, stride, pad, pad_value)
def average_pooling_3d(x, ksize, stride=None, pad=0, pad_value=0):
"""3-dimensional spatial average pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
.. note::
This function calls :func:`~chainer.functions.average_pooling_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.average_pooling_nd`.
"""
if len(x.shape[2:]) != 3:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 3. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return average_pooling_nd(x, ksize, stride, pad, pad_value)
| 11,393
| 31.741379
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/pooling/pooling_2d.py
|
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import collections_abc
from chainer.utils import conv
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
def _pair(x):
if isinstance(x, collections_abc.Iterable):
return x
return x, x
class Pooling2D(function_node.FunctionNode):
"""Base class of pooling function over a set of 2d planes."""
def __init__(self, ksize, stride=None, pad=0, cover_all=True,
return_indices=False):
if stride is None:
stride = ksize
self.kh, self.kw = _pair(ksize)
self.sy, self.sx = _pair(stride)
self.ph, self.pw = _pair(pad)
self.cover_all = cover_all
self.return_indices = return_indices
self._used_cudnn = False
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1,
in_types[0].dtype.kind == 'f',
in_types[0].ndim == 4
)
def forward_gpu(self, x):
self.retain_inputs((0,))
self._used_cudnn = True
# Implementation using cudnn
x = x[0]
n, c, h, w = x.shape
y_h = conv.get_conv_outsize(
h, self.kh, self.sy, self.ph, self.cover_all)
assert y_h > 0, 'Height in the output should be positive.'
y_w = conv.get_conv_outsize(
w, self.kw, self.sx, self.pw, self.cover_all)
assert y_w > 0, 'Width in the output should be positive.'
y = cuda.cupy.empty((n, c, y_h, y_w), dtype=x.dtype)
cudnn.pooling_forward(
x, y,
(self.kh, self.kw), (self.sy, self.sx), (self.ph, self.pw),
self._get_pool_mode())
self.retain_outputs((0,))
return y,
def backward_gpu(self, x, gy):
# Implementation using cudnn
x = x[0]
y = self.get_retained_outputs()[0].array
gx = cudnn.pooling_backward(
x, y, gy[0],
(self.kh, self.kw), (self.sy, self.sx), (self.ph, self.pw),
self._get_pool_mode())
return gx,
def _get_pool_mode(self):
raise NotImplementedError()
| 2,192
| 27.855263
| 71
|
py
|
chainer
|
chainer-master/chainer/functions/pooling/roi_max_pooling_2d.py
|
# Modified work:
# -----------------------------------------------------------------------------
# Copyright (c) 2015 Preferred Infrastructure, Inc.
# Copyright (c) 2015 Preferred Networks, Inc.
# -----------------------------------------------------------------------------
# Original work of _roi_pooling_slice, forward_cpu and backward_cpu:
# -----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# Original work of forward_gpu and backward_gpu:
# -----------------------------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see fast-rcnn/LICENSE for details]
# Written by Ross Girshick
# -----------------------------------------------------------------------------
import numbers
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import function
from chainer import utils
from chainer.utils import type_check
from chainer.functions.pooling.roi_pooling_2d import _roi_pooling_slice
def _pair(x):
if isinstance(x, chainer.utils.collections_abc.Iterable):
return x
return x, x
class ROIMaxPooling2D(function.Function):
"""RoI max pooling over a set of 2d planes."""
def __init__(self, outsize, spatial_scale):
outh, outw = _pair(outsize)
if not (isinstance(outh, numbers.Integral) and outh > 0):
raise TypeError(
'outsize[0] must be positive integer: {}, {}'
.format(type(outh), outh))
if not (isinstance(outw, numbers.Integral) and outw > 0):
raise TypeError(
'outsize[1] must be positive integer: {}, {}'
.format(type(outw), outw))
if isinstance(spatial_scale, numbers.Integral):
spatial_scale = float(spatial_scale)
if not (isinstance(spatial_scale, numbers.Real) and
spatial_scale > 0):
raise TypeError(
'spatial_scale must be a positive float number: {}, {}'
.format(type(spatial_scale), spatial_scale))
self.outh, self.outw = outh, outw
self.spatial_scale = spatial_scale
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, roi_type, roi_index_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4,
x_type.dtype == roi_type.dtype,
roi_type.ndim == 2,
roi_type.shape[1] == 4,
roi_index_type.dtype == numpy.int32,
roi_index_type.ndim == 1,
roi_type.shape[0] == roi_index_type.shape[0],
)
def forward_cpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channels, height, width = bottom_data.shape[1:]
n_rois = bottom_rois.shape[0]
top_data = numpy.full(
(n_rois, channels, self.outh, self.outw),
- numpy.inf, dtype=bottom_data.dtype)
self.argmax_data = - numpy.ones(top_data.shape, numpy.int32)
for i_roi in six.moves.range(n_rois):
idx = bottom_roi_indices[i_roi]
ymin, xmin, ymax, xmax = bottom_rois[i_roi]
ymin = int(round(ymin * self.spatial_scale))
xmin = int(round(xmin * self.spatial_scale))
ymax = int(round(ymax * self.spatial_scale))
xmax = int(round(xmax * self.spatial_scale))
roi_height = max(ymax - ymin, 1)
roi_width = max(xmax - xmin, 1)
strideh = 1. * roi_height / self.outh
stridew = 1. * roi_width / self.outw
for outh in six.moves.range(self.outh):
sliceh, lenh = _roi_pooling_slice(
outh, strideh, height, ymin)
if sliceh.stop <= sliceh.start:
continue
for outw in six.moves.range(self.outw):
slicew, lenw = _roi_pooling_slice(
outw, stridew, width, xmin)
if slicew.stop <= slicew.start:
continue
roi_data = bottom_data[int(idx), :, sliceh, slicew]\
.reshape(channels, -1)
top_data[i_roi, :, outh, outw] =\
numpy.max(roi_data, axis=1)
# get the max idx respect to feature_maps coordinates
max_idx_slice = numpy.unravel_index(
numpy.argmax(roi_data, axis=1), (lenh, lenw))
max_idx_slice_h = max_idx_slice[0] + sliceh.start
max_idx_slice_w = max_idx_slice[1] + slicew.start
max_idx_slice = max_idx_slice_h * width + max_idx_slice_w
self.argmax_data[i_roi, :, outh, outw] = max_idx_slice
return top_data,
def forward_gpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channels, height, width = bottom_data.shape[1:]
n_rois = bottom_rois.shape[0]
top_data = cuda.cupy.empty((n_rois, channels, self.outh,
self.outw), dtype=bottom_data.dtype)
self.argmax_data = cuda.cupy.empty(top_data.shape, numpy.int32)
cuda.elementwise(
'''
raw T bottom_data, raw T bottom_rois, raw int32 bottom_roi_indices,
T spatial_scale, int32 channels, int32 height, int32 width,
int32 pooled_height, int32 pooled_width
''',
'T top_data, int32 argmax_data',
'''
// pos in output filter
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
int roi_batch_ind = bottom_roi_indices[n];
int roi_start_h = round(bottom_rois[n * 4 + 0] * spatial_scale);
int roi_start_w = round(bottom_rois[n * 4 + 1] * spatial_scale);
int roi_end_h = round(bottom_rois[n * 4 + 2] * spatial_scale);
int roi_end_w = round(bottom_rois[n * 4 + 3] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_height = max(roi_end_h - roi_start_h , 1);
int roi_width = max(roi_end_w - roi_start_w, 1);
T bin_size_h = static_cast<T>(roi_height)
/ static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width)
/ static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
// Define an empty pooling region to be zero
T maxval = - (T) (1.0 / 0.0);
// If nothing is pooled, argmax=-1 causes nothing to be backprop'd
int maxidx = -1;
int data_offset = (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[data_offset + bottom_index] > maxval) {
maxval = bottom_data[data_offset + bottom_index];
maxidx = bottom_index;
}
}
}
top_data = maxval;
argmax_data = maxidx;
''', 'roi_max_pooling_2d_fwd'
)(bottom_data, bottom_rois, bottom_roi_indices,
self.spatial_scale, channels, height, width,
self.outh, self.outw, top_data, self.argmax_data)
return top_data,
def backward_cpu(self, inputs, gy):
bottom_rois, bottom_roi_indices = inputs[1:]
channels, height, width = self._bottom_data_shape[1:]
bottom_diff = numpy.zeros(self._bottom_data_shape, bottom_rois.dtype)
pooled_height = self.outh
pooled_width = self.outw
top_diff = gy[0]
for i in six.moves.range(top_diff.size):
pw = i % pooled_width
ph = int(i / pooled_width) % pooled_height
c = int(i / pooled_width / pooled_height) % channels
n = int(i / pooled_width / pooled_height / channels)
roi_batch_ind = int(bottom_roi_indices[n])
max_idx = self.argmax_data[n, c, ph, pw]
h = int(max_idx / width)
w = max_idx % width
if max_idx != -1:
bottom_diff[roi_batch_ind, c, h, w] += top_diff[
n, c, ph, pw]
return bottom_diff, None, None
def backward_gpu(self, inputs, gy):
utils.nondeterministic('atomicAdd')
bottom_rois, bottom_roi_indices = inputs[1:]
channels, height, width = self._bottom_data_shape[1:]
bottom_diff = cuda.cupy.zeros(
self._bottom_data_shape, bottom_rois.dtype)
cuda.elementwise(
'''
raw T top_diff, raw int32 argmax_data,
raw T bottom_rois, raw int32 bottom_roi_indices, int32 num_rois,
T spatial_scale, int32 channels, int32 height, int32 width,
int32 pooled_height, int32 pooled_width
''',
'raw T bottom_diff',
'''
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
int roi_batch_ind = bottom_roi_indices[n];
int bottom_diff_offset =
(roi_batch_ind * channels + c) * height * width;
int top_diff_offset =
(n * channels + c) * pooled_height * pooled_width;
int max_index =
argmax_data[top_diff_offset + ph * pooled_width + pw];
if (max_index != -1) {
atomicAdd(
&bottom_diff[bottom_diff_offset + max_index],
top_diff[top_diff_offset + ph * pooled_width + pw]);
}
''', 'roi_max_pooling_2d_bwd'
)(gy[0], self.argmax_data, bottom_rois, bottom_roi_indices,
bottom_rois.shape[0], self.spatial_scale, channels, height, width,
self.outh, self.outw, bottom_diff, size=gy[0].size)
return bottom_diff, None, None
def roi_max_pooling_2d(x, rois, roi_indices, outsize, spatial_scale):
"""Spatial Region of Interest (ROI) max pooling function.
This function acts similarly to :func:`~chainer.functions.max_pooling_2d`,
but it computes the maximum of input spatial patch for each channel with
the region of interest.
Args:
x (~chainer.Variable): Input variable. The shape is expected to be
4 dimensional: (n: batch, c: channel, h, height, w: width).
rois (~chainer.Variable): Input roi variable. The shape is expected to
be (n: data size, 4), and each datum is set as below:
(y_min, x_min, y_max, x_max).
roi_indices (~chainer.Variable): Input roi variable. The shape is
expected to be (n: data size, ).
outsize ((int, int) or int): Expected output size after pooled
(height, width). ``outsize=o`` and ``outsize=(o, o)``
are equivalent.
spatial_scale (float): Scale of the roi is resized.
Returns:
~chainer.Variable: Output variable.
See the original paper proposing ROIPooling:
`Fast R-CNN <https://arxiv.org/abs/1504.08083>`_.
"""
return ROIMaxPooling2D(outsize, spatial_scale)(x, rois, roi_indices)
| 13,192
| 41.834416
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/pooling/max_pooling_nd_kernel.py
|
import six
from chainer.functions.pooling import pooling_nd_kernel
from chainer.utils import conv_nd_kernel
class MaxPoolingNDKernelForward(pooling_nd_kernel.PoolingNDKernelForward):
def name(self):
# max_pool_{N}d_fwd
return 'max'
def out_params(self):
# T out, S indexes
return ['S indexes']
def before(self):
# 2D: T maxval = (T)-INFINITY;
# int argmax_0 = 0;
# int argmax_1 = 0;
def aux(argmax):
return 'int {} = 0;'.format(argmax)
self.argmaxs = conv_nd_kernel.vars('argmax', self.ndim)
argmax_decls = conv_nd_kernel.map_(aux, self.argmaxs)
return '\n'.join(['T maxval = (T)-(1.0/0.0);'] + argmax_decls)
def main(self, offset, xs):
# 2D: T v = in[offset_1];
# if (maxval < v) {
# maxval = v;
# argmax_0 = x_0;
# argmax_1 = x_1;
# }
w = conv_nd_kernel.Writer()
w.write('T v = in[{}];'.format(offset))
w.write('if (maxval < v) {', 'inc')
w.write('maxval = v;')
for argmax, x in six.moves.zip(self.argmaxs, xs):
w.write('{} = {};'.format(argmax, x))
w.write('}', 'dec')
return w.get()
def after(self, out_xs):
# 2D: out = maxval;
# int argmax_k_0 = argmax_0 + p_0 - out_x_0 * s_0;
# int argmax_k_1 = argmax_1 + p_1 - out_x_1 * s_1;
# indexes = (argmax_k_1 + k_1 * argmax_k_0);
def aux(argmax_k, argmax, p, out_x, s):
return 'int {} = {} + {} - {} * {};'.format(
argmax_k, argmax, p, out_x, s)
argmax_ks = conv_nd_kernel.vars('argmax_k', self.ndim)
argmax_k_decls = conv_nd_kernel.map_(
aux, argmax_ks, self.argmaxs, self.ps, out_xs, self.ss)
indexes_set = 'indexes = {};'.format(
conv_nd_kernel.muladdexp(self.ks[1:], argmax_ks[1:], argmax_ks[0]))
return '\n'.join(['out = maxval;'] + argmax_k_decls + [indexes_set])
class MaxPoolingNDKernelBackward(pooling_nd_kernel.PoolingNDKernelBackward):
def name(self):
# max_pool_{N}d_bwd
return 'max'
def in_params(self):
# 2D: raw T gy, raw S indexes, int32 d_0, int32 d_1, int32 out_0,
# int32 out_1, int32 k_0, int32 k_1, int32 s_0, int32 s_1,
# int32 p_0, int32 p_1
return (['raw S indexes'], [])
def before(self):
return 'T val = 0;'
def main(self, offset, xs, out_xs):
# 2D: int kx = (x_1 - out_x_1 * s_1 + k_1 *
# (x_0 - out_x_0 * s_0 + k_0 * 0));
# if (indexes[offset_1] == kx) {
# val = val + gy[offset_1];
# }
def aux(x, out_x, s):
return '{} - {} * {}'.format(x, out_x, s)
w = conv_nd_kernel.Writer()
w.write('int kx = {};'.format(
conv_nd_kernel.muladdexp(self.ks, conv_nd_kernel.map_(
aux, xs, out_xs, self.ss), '0')))
w.write('if (indexes[{}] == kx) {{'.format(offset), 'inc')
w.write('val = val + gy[{}];'.format(offset))
w.write('}', 'dec')
return w.get()
def after(self, xs):
return 'gx = val;'
class MaxPoolingNDKernelForwardWithIndexes(
pooling_nd_kernel.PoolingNDKernelForward):
def name(self):
# max_index_pool_{N}d_fwd
return 'max_index'
def in_params(self):
# 2D: raw T in, int32 d_0, int32 d_1, int32 out_0, int32 out_1,
# int32 k_0, int32 k_1, int32 s_0, int32 s_1, int32 p_0, int32 p_1,
# raw S indexes
return ['raw S indexes']
def out_params(self):
# T out
return []
def _compile_max_x(self):
def aux(max_val, out_val, stride_val, pad_val, ksize_vals):
head = ksize_vals[0]
tail = ksize_vals[1:]
if tail:
command = 'int {} = max(0, {} * {} - {} + index / ({}) % {});'
return command.format(
max_val, out_val, stride_val, pad_val,
conv_nd_kernel.mulexp(tail), head)
else:
return 'int {} = max(0, {} * {} - {} + index % {});'.format(
max_val, out_val, stride_val, pad_val, head)
max_vals = conv_nd_kernel.vars('max', self.ndim)
out_vals = conv_nd_kernel.vars('out_x', self.ndim)
stride_vals = conv_nd_kernel.vars('s', self.ndim)
pad_vals = conv_nd_kernel.vars('p', self.ndim)
ksize_vals = conv_nd_kernel.vars('k', self.ndim)
offset_ks_decls = conv_nd_kernel.map_(
aux, max_vals, out_vals, stride_vals, pad_vals,
conv_nd_kernel.succ_sublists(ksize_vals))
return offset_ks_decls
def _compile_out(self):
def aux(offset, d_val, max_val, offset1):
return 'int {} = {} * ({} + {});'.format(
offset, d_val, max_val, offset1)
d_vals = conv_nd_kernel.vars('d', self.ndim)[1:] + [1]
max_vals = conv_nd_kernel.vars('max', self.ndim)
offsets = conv_nd_kernel.vars('offset', self.ndim)
offsets1 = ['d_0 * c0'] + offsets[:-1]
offset_strs = conv_nd_kernel.map_(
aux, offsets, d_vals, max_vals, offsets1)
offset_strs.append('out = in[offset_{}];'.format(self.ndim - 1))
return offset_strs
def _operation(self):
# In the case of 2D, the kernel is the following:
#
# // result by self._compile_c0()
# int c0 = i / (out_0 * out_1);
#
# // result by self._compile_max_x()
# int out_x_0 = i / (out_1) % out_0;
# int out_x_1 = i % out_1;
# int index = indexes[i];
#
# // result by self._compile_out()
# int max_0 = max(0, out_x_0 * s_0 - p_0 + index / (k_1) % k_0);
# int max_1 = max(0, out_x_1 * s_1 - p_1 + index % k_1);
# int offset_0 = d_1 * (max_0 + d_0 * c0);
# int offset_1 = 1 * (max_1 + offset_0);
# out = in[offset_1];
c0 = self._compile_c0()
out_x, out_xs = self._compile_out_x()
max_x = self._compile_max_x()
index = ['int index = indexes[i];']
out = self._compile_out()
return '\n'.join(c0 + out_x + index + max_x + out)
class MaxPoolingNDKernelForwardWithIndexes1(MaxPoolingNDKernelForward):
def name(self):
# max_index1_pool_{N}d_fwd
return 'max_index1'
def in_params(self):
# 2D: raw T gy, raw S indexes, int32 d_0, int32 d_1, int32 out_0,
# int32 out_1, int32 k_0, int32 k_1, int32 s_0, int32 s_1,
# int32 p_0, int32 p_1
return ['raw T ggx']
def out_params(self):
# T out
return []
def after(self, out_xs):
# 2D: int offset_0 = d_1 * (argmax_0 + d_0 * c0);
# int offset_1 = 1 * (argmax_1 + offset_0);
# out = ggx[offset_1];
def aux(offset, d_val, max_val, offset1):
return 'int {} = {} * ({} + {});'.format(
offset, d_val, max_val, offset1)
d_vals = conv_nd_kernel.vars('d', self.ndim)[1:] + [1]
max_vals = conv_nd_kernel.vars('argmax', self.ndim)
offsets = conv_nd_kernel.vars('offset', self.ndim)
offsets1 = ['d_0 * c0'] + offsets[:-1]
offset_strs = conv_nd_kernel.map_(
aux, offsets, d_vals, max_vals, offsets1)
offset_strs.append('out = ggx[offset_{}];'.format(self.ndim - 1))
return '\n'.join(offset_strs)
| 7,523
| 35.347826
| 79
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.