repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
chainer
|
chainer-master/chainer/functions/loss/mean_squared_error.py
|
import numpy
from chainer import function_node
import chainer.functions
from chainer.utils import type_check
class MeanSquaredError(function_node.FunctionNode):
"""Mean squared error (a.k.a. Euclidean loss) function."""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x0', 'x1'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].shape == in_types[1].shape
)
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
diff = (inputs[0] - inputs[1]).ravel()
return numpy.array(diff.dot(diff) / diff.size, dtype=diff.dtype),
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
diff = (inputs[0] - inputs[1]).ravel()
return diff.dot(diff) / diff.dtype.type(diff.size),
def backward(self, indexes, gy):
x0, x1 = self.get_retained_inputs()
ret = []
diff = x0 - x1
gy0 = chainer.functions.broadcast_to(gy[0], diff.shape)
gx0 = gy0 * diff * (2. / diff.size)
if 0 in indexes:
ret.append(gx0)
if 1 in indexes:
ret.append(-gx0)
return ret
def mean_squared_error(x0, x1):
"""Mean squared error function.
The function computes the mean squared error between two variables. The
mean is taken over the minibatch. Args ``x0`` and ``x1`` must have the
same dimensions. Note that the error is not scaled by 1/2.
Args:
x0 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
x1 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable:
A variable holding an array representing the mean squared
error of two inputs.
.. admonition:: Example
1D array examples:
>>> x = np.array([1, 2, 3, 4]).astype(np.float32)
>>> y = np.array([0, 0, 0, 0]).astype(np.float32)
>>> F.mean_squared_error(x, y)
variable(7.5)
>>> x = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
>>> y = np.array([7, 8, 9, 10, 11, 12]).astype(np.float32)
>>> F.mean_squared_error(x, y)
variable(36.)
2D array example:
In this example, there are 4 elements, and thus 4 errors
>>> x = np.array([[1, 2], [3, 4]]).astype(np.float32)
>>> y = np.array([[8, 8], [8, 8]]).astype(np.float32)
>>> F.mean_squared_error(x, y)
variable(31.5)
3D array example:
In this example, there are 8 elements, and thus 8 errors
>>> x = np.reshape(np.array([1, 2, 3, 4, 5, 6, 7, 8]), (2, 2, 2))
>>> y = np.reshape(np.array([8, 8, 8, 8, 8, 8, 8, 8]), (2, 2, 2))
>>> x = x.astype(np.float32)
>>> y = y.astype(np.float32)
>>> F.mean_squared_error(x, y)
variable(17.5)
"""
return MeanSquaredError().apply((x0, x1))[0]
| 2,966
| 31.25
| 75
|
py
|
chainer
|
chainer-master/chainer/functions/loss/negative_sampling.py
|
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import argument
from chainer.utils import precision
from chainer.utils import type_check
def _sigmoid_grad(x, y, gy):
return chainer.functions.activation.sigmoid.SigmoidGrad((x,)).apply(
(y, gy))[0]
class NegativeSamplingFunction(function_node.FunctionNode):
ignore_label = -1
samples = None
def __init__(self, sampler, sample_size, reduce='sum'):
if reduce not in ('sum', 'no'):
raise ValueError(
'only \'sum\' and \'no\' are valid for \'reduce\', but \'%s\' '
'is given' % reduce)
self.sampler = sampler
self.sample_size = sample_size
self.reduce = reduce
self.wx = None
def _make_samples(self, t):
size = int(t.shape[0])
# first one is the positive, and others are sampled negatives
samples = self.sampler((size, self.sample_size + 1))
samples = backend.from_chx(samples)
samples[:, 0] = t
return samples
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 't', 'W'))
x_type, t_type, w_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 2,
t_type.dtype == numpy.int32,
t_type.ndim == 1,
x_type.shape[0] == t_type.shape[0],
w_type.dtype == x_type.dtype,
w_type.ndim == 2,
)
# Avoid fp16 computation to keep the precision in reduction operations.
@precision._fp16_mixed_precision_helper
def forward_cpu(self, inputs):
self.retain_inputs((0, 1, 2))
x, t, W = inputs
self.ignore_mask = (t != self.ignore_label)
samples = self._make_samples(t)
w = W[samples]
wx = numpy.einsum(
'ij,ikj->ik', x[self.ignore_mask], w[self.ignore_mask])
wx[:, 0] *= -1
loss = numpy.zeros(len(x), x.dtype)
loss[self.ignore_mask] = numpy.sum(numpy.logaddexp(wx, 0), axis=1)
if self.reduce == 'sum':
loss = numpy.array(loss.sum(), x.dtype)
self.samples = samples
return loss,
@precision._fp16_mixed_precision_helper
def forward_gpu(self, inputs):
self.retain_inputs((0, 1, 2))
x, t, W = inputs
self.ignore_mask = (t != self.ignore_label)
samples = self._make_samples(t)
n_in = x.shape[1]
self.wx = cuda.elementwise(
'raw T W, raw T x, bool mask, S k, int32 c, int32 m', 'T wx',
'''
T f = 0;
if (mask == 1) {
for (int j = 0; j < c; ++j) {
int x_ind[] = {(i / m), j};
int w_ind[] = {k, j};
f += x[x_ind] * W[w_ind];
}
}
wx = f;
''',
'negative_sampling_wx'
)(W, x, self.ignore_mask[:, None], samples, n_in,
self.sample_size + 1)
loss = cuda.elementwise(
'T wx, int32 c, int32 m, bool mask', 'T y',
'''
if (mask) {
T f = wx;
if (i % m == 0) {
f = -f;
}
if (f < 0) {
y = __logf(1 + __expf(f));
} else {
y = f + __logf(1 + __expf(-f));
}
} else {
y = 0;
}
''',
'negative_sampling_forward'
)(self.wx, n_in, self.sample_size + 1, self.ignore_mask[:, None])
if self.reduce == 'sum':
loss = loss.sum()
else: # 'no':
loss = loss.sum(axis=1)
self.samples = samples
return loss,
def backward(self, indexes, grad_outputs):
x, t, W = self.get_retained_inputs()
gy, = grad_outputs
return NegativeSamplingFunctionGrad(
self.reduce, self.ignore_mask, self.sample_size, self.samples,
self.wx).apply((x, W, gy))
class NegativeSamplingFunctionGrad(function_node.FunctionNode):
def __init__(self, reduce, ignore_mask, sample_size, samples, wx):
self.reduce = reduce
self.ignore_mask = ignore_mask
self.sample_size = sample_size
self.samples = samples
self.wx = wx
def forward_cpu(self, inputs):
self.retain_inputs((0, 1, 2))
x, W, gloss = inputs
samples = self.samples
gx = numpy.zeros_like(x)
gW = numpy.zeros_like(W)
for i in numpy.arange(len(self.ignore_mask))[self.ignore_mask]:
ix = x[i]
k = samples[i]
if self.reduce == 'sum':
igy = gloss
else:
igy = gloss[i]
w = W[k]
f = w.dot(ix)
# g == -y * gloss / (1 + exp(yf))
f[0] *= -1
g = igy / (1 + numpy.exp(-f))
g[0] *= -1
gx[i] = g.dot(w)
for ik, ig in six.moves.zip(k, g):
gW[ik] += ig * ix
return gx, None, gW
def forward_gpu(self, inputs):
utils.nondeterministic('atomicAdd')
self.retain_inputs((0, 1, 2))
x, W, gy = inputs
if self.reduce == 'no':
gy = gy[:, None]
samples = self.samples
wx = self.wx.astype(x.dtype, copy=False)
g = cuda.elementwise(
'T wx, T gy, int32 m', 'T g',
'''
T y;
if (i % m == 0) {
y = 1;
} else {
y = -1;
}
g = -y * gy / (1.0f + __expf(wx * y));
''',
'negative_sampling_calculate_g'
)(wx, gy, self.sample_size + 1)
cupy = cuda.cupy
gx = cupy.zeros_like(x)
n_in = x.shape[1]
cuda.elementwise(
'raw T g, raw T W, bool mask, raw S k, int32 c, int32 m', 'T gx',
'''
int d = i / c;
T w = 0;
if (mask == 1){
for (int j = 0; j < m; ++j) {
w += g[d * m + j] * W[k[d * m + j] * c + i % c];
}
}
gx = w;
''',
'negative_sampling_calculate_gx'
)(g, W, self.ignore_mask[:, None], samples, n_in,
self.sample_size + 1, gx)
gW = cupy.zeros_like(W)
cuda.elementwise(
'T g, raw T x, S k, bool mask, int32 c, int32 m',
'raw T gW',
'''
T gi = g;
if (mask == 1) {
for (int j = 0; j < c; ++j) {
atomicAdd(&gW[k * c + j], gi * x[(i / m) * c + j]);
}
}
''',
'negative_sampling_calculate_gw'
)(g, x, samples, self.ignore_mask[:, None], n_in,
self.sample_size + 1, gW)
return gx, None, gW
def backward(self, indexes, grad_outputs):
x, W, gy = self.get_retained_inputs()
device = backend.get_device_from_array(x.data)
xp = device.xp
if 0 in indexes:
gx = chainer.Variable(xp.zeros_like(x.data))
if 1 in indexes:
gW = chainer.Variable(xp.zeros_like(W.data))
if 2 in indexes:
ggy = chainer.Variable(xp.zeros_like(gy.data))
ggx, _, ggW = grad_outputs
pos_neg_mask = xp.ones(self.sample_size + 1)
pos_neg_mask[0] *= -1
with chainer.using_device(device):
arange = xp.arange(len(self.ignore_mask))
for i in arange[self.ignore_mask]:
# Partial forward pass to obtain intermediate `Variable`s
ix = x[i]
k = self.samples[i]
if self.reduce == 'sum':
igy = gy
else:
igy = gy[i]
w = W[k]
f = chainer.functions.flatten(
chainer.functions.matmul(w, ix[:, None])) * pos_neg_mask
sigf = chainer.functions.sigmoid(f)
g = chainer.functions.broadcast_to(igy, f.shape) * sigf \
* pos_neg_mask
dgW_dg = chainer.functions.flatten(
chainer.functions.matmul(ggW[k], ix[:, None])) * pos_neg_mask
dgW_df = chainer.functions.broadcast_to(igy, f.shape) \
* _sigmoid_grad(f, sigf, dgW_dg) * pos_neg_mask
dgx_dg = chainer.functions.flatten(
chainer.functions.matmul(ggx[i][None, :], w, transb=True))
dgx_df = chainer.functions.broadcast_to(igy, f.shape) \
* _sigmoid_grad(f, sigf, dgx_dg)
if 0 in indexes:
# derivative of gx
dgx = chainer.functions.matmul(w, dgx_df[:, None], transa=True)
# derivative of gW
dgx += chainer.functions.matmul(g[None, :], ggW[k]).T
dgx += chainer.functions.matmul(
w, dgW_df[:, None], transa=True)
gx = chainer.functions.scatter_add(
gx, i, chainer.functions.flatten(dgx))
if 1 in indexes:
# derivative of gx
shape = ggx[i].shape
for ik, ig, idgx_df in six.moves.zip(k, g, dgx_df):
ig = chainer.functions.broadcast_to(ig, shape)
idgx_df = chainer.functions.broadcast_to(idgx_df, shape)
gW = chainer.functions.scatter_add(
gW, ik, ig * ggx[i] + idgx_df * ix)
# derivative of gW
gW = chainer.functions.scatter_add(
gW, k,
chainer.functions.matmul(dgW_df[:, None], ix[None, :]))
if 2 in indexes:
dgx_dg *= pos_neg_mask
dggy = chainer.functions.sum((dgx_dg + dgW_dg) * sigf)
if self.reduce == 'sum':
ggy += dggy
else:
ggy = chainer.functions.scatter_add(ggy, i, dggy)
ret = []
if 0 in indexes:
ret.append(gx)
if 1 in indexes:
ret.append(gW)
if 2 in indexes:
ret.append(ggy)
return ret
def negative_sampling(x, t, W, sampler, sample_size, reduce='sum', **kwargs):
"""negative_sampling(x, t, W, sampler, sample_size, reduce='sum', *, \
return_samples=False)
Negative sampling loss function.
In natural language processing, especially language modeling, the number of
words in a vocabulary can be very large.
Therefore, you need to spend a lot of time calculating the gradient of the
embedding matrix.
By using the negative sampling trick you only need to calculate the
gradient for a few sampled negative examples.
The loss is defined as follows.
.. math::
f(x, p) = - \\log \\sigma(x^\\top w_p) - \\
k E_{i \\sim P(i)}[\\log \\sigma(- x^\\top w_i)]
where :math:`\\sigma(\\cdot)` is a sigmoid function, :math:`w_i` is the
weight vector for the word :math:`i`, and :math:`p` is a positive example.
It is approximated with :math:`k` examples :math:`N` sampled from
probability :math:`P(i)`.
.. math::
f(x, p) \\approx - \\log \\sigma(x^\\top w_p) - \\
\\sum_{n \\in N} \\log \\sigma(-x^\\top w_n)
Each sample of :math:`N` is drawn from the word distribution
:math:`P(w) = \\frac{1}{Z} c(w)^\\alpha`, where :math:`c(w)` is the
unigram count of the word :math:`w`, :math:`\\alpha` is a hyper-parameter,
and :math:`Z` is the normalization constant.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Batch of input vectors.
t (:class:`~chainer.Variable` or :ref:`ndarray`):
Vector of ground truth labels.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Weight matrix.
sampler (~types.FunctionType): Sampling function. It takes a shape and
returns an integer array of the shape. Each element of this array
is a sample from the word distribution.
A :class:`~chainer.utils.WalkerAlias` object built with the power
distribution of word frequency is recommended.
sample_size (int): Number of samples.
reduce (str): Reduction option. Its value must be either
``'sum'`` or ``'no'``. Otherwise, :class:`ValueError` is raised.
return_samples (bool):
If ``True``, the sample array is also returned.
The sample array is a
:math:`(\\text{batch_size}, \\text{sample_size} + 1)`-array of
integers whose first column is fixed to the ground truth labels
and the other columns are drawn from the ``sampler``.
Returns:
~chainer.Variable or tuple:
If ``return_samples`` is ``False`` (default), the output
variable holding the loss value(s) calculated by the
above equation is returned. Otherwise, a tuple of the output
variable and the sample array is returned.
If ``reduce`` is ``'no'``, the output variable holds array
whose shape is same as one of (hence both of) input variables.
If it is ``'sum'``, the output variable holds a scalar value.
See: `Distributed Representations of Words and Phrases and their
Compositionality <https://arxiv.org/abs/1310.4546>`_
.. seealso::
:class:`~chainer.links.NegativeSampling` to manage the model parameter
``W``.
"""
return_samples = False
if kwargs:
return_samples, = argument.parse_kwargs(
kwargs, ('return_samples', return_samples))
func = NegativeSamplingFunction(sampler, sample_size, reduce)
out = func.apply((x, t, W))[0]
if return_samples:
return out, func.samples
return out
| 13,928
| 32.085511
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/loss/vae.py
|
import math
from chainer.functions.activation import softplus
from chainer.functions.math import average
from chainer.functions.math import exponential
from chainer.functions.math import sum
def gaussian_kl_divergence(mean, ln_var, reduce='sum'):
"""Computes the KL-divergence of Gaussian variables from the standard one.
Given two variable ``mean`` representing :math:`\\mu` and ``ln_var``
representing :math:`\\log(\\sigma^2)`, this function calculates
the KL-divergence in elementwise manner between the given multi-dimensional
Gaussian :math:`N(\\mu, S)` and the standard Gaussian :math:`N(0, I)`
.. math::
D_{\\mathbf{KL}}(N(\\mu, S) \\| N(0, I)),
where :math:`S` is a diagonal matrix such that :math:`S_{ii} = \\sigma_i^2`
and :math:`I` is an identity matrix.
The output is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds the elementwise
loss values. If it is ``'sum'`` or ``'mean'``, loss values are summed up
or averaged respectively.
Args:
mean (:class:`~chainer.Variable` or :ref:`ndarray`):
A variable representing mean of given
gaussian distribution, :math:`\\mu`.
ln_var (:class:`~chainer.Variable` or :ref:`ndarray`):
A variable representing logarithm of
variance of given gaussian distribution, :math:`\\log(\\sigma^2)`.
reduce (str): Reduction option. Its value must be either
``'sum'``, ``'mean'`` or ``'no'``. Otherwise, :class:`ValueError`
is raised.
Returns:
~chainer.Variable:
A variable representing KL-divergence between
given gaussian distribution and the standard gaussian.
If ``reduce`` is ``'no'``, the output variable holds array
whose shape is same as one of (hence both of) input variables.
If it is ``'sum'`` or ``'mean'``, the output variable holds a
scalar value.
"""
if reduce not in ('sum', 'mean', 'no'):
raise ValueError(
'only \'sum\', \'mean\' and \'no\' are valid for \'reduce\', but '
'\'%s\' is given' % reduce)
var = exponential.exp(ln_var)
mean_square = mean * mean
loss = (mean_square + var - ln_var - 1) * 0.5
if reduce == 'sum':
return sum.sum(loss)
elif reduce == 'mean':
return average.average(loss)
else:
return loss
def bernoulli_nll(x, y, reduce='sum'):
"""Computes the negative log-likelihood of a Bernoulli distribution.
This function calculates the negative log-likelihood of a Bernoulli
distribution.
.. math::
-\\log B(x; p) = -\\sum_i \\{x_i \\log(p_i) + \
(1 - x_i)\\log(1 - p_i)\\},
where :math:`p = \\sigma(y)`, :math:`\\sigma(\\cdot)` is a sigmoid
function, and :math:`B(x; p)` is a Bernoulli distribution.
The output is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds the elementwise
loss values. If it is ``'sum'`` or ``'mean'``, loss values are summed up
or averaged respectively.
.. note::
As this function uses a sigmoid function, you can pass a result of
fully-connected layer (that means :class:`Linear`) to this function
directly.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
y (:class:`~chainer.Variable` or :ref:`ndarray`): A variable
representing the parameter of Bernoulli distribution.
reduce (str): Reduction option. Its value must be either
``'sum'``, ``'mean'`` or ``'no'``. Otherwise, :class:`ValueError`
is raised.
Returns:
~chainer.Variable:
A variable representing the negative log-likelihood.
If ``reduce`` is ``'no'``, the output variable holds array
whose shape is same as one of (hence both of) input variables.
If it is ``'sum'`` or ``'mean'``, the output variable holds a
scalar value.
"""
if reduce not in ('sum', 'mean', 'no'):
raise ValueError(
'only \'sum\', \'mean\' and \'no\' are valid for \'reduce\', but '
'\'%s\' is given' % reduce)
loss = softplus.softplus(y) - x * y
if reduce == 'sum':
return sum.sum(loss)
elif reduce == 'mean':
return average.average(loss)
else:
return loss
def gaussian_nll(x, mean, ln_var, reduce='sum'):
"""Computes the negative log-likelihood of a Gaussian distribution.
Given two variable ``mean`` representing :math:`\\mu` and ``ln_var``
representing :math:`\\log(\\sigma^2)`, this function computes in
elementwise manner the negative log-likelihood of :math:`x` on a
Gaussian distribution :math:`N(\\mu, S)`,
.. math::
-\\log N(x; \\mu, \\sigma^2) =
\\log\\left(\\sqrt{(2\\pi)^D |S|}\\right) +
\\frac{1}{2}(x - \\mu)^\\top S^{-1}(x - \\mu),
where :math:`D` is a dimension of :math:`x` and :math:`S` is a diagonal
matrix where :math:`S_{ii} = \\sigma_i^2`.
The output is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds the elementwise
loss values. If it is ``'sum'`` or ``'mean'``, loss values are summed up
or averaged respectively.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
mean (:class:`~chainer.Variable` or :ref:`ndarray`): A variable
representing mean of a Gaussian distribution, :math:`\\mu`.
ln_var (:class:`~chainer.Variable` or :ref:`ndarray`): A variable
representing logarithm of variance of a Gaussian distribution,
:math:`\\log(\\sigma^2)`.
reduce (str): Reduction option. Its value must be either
``'sum'``, ``'mean'`` or ``'no'``. Otherwise, :class:`ValueError`
is raised.
Returns:
~chainer.Variable:
A variable representing the negative log-likelihood.
If ``reduce`` is ``'no'``, the output variable holds array
whose shape is same as one of (hence both of) input variables.
If it is ``'sum'`` or ``'mean'``, the output variable holds a
scalar value.
"""
if reduce not in ('sum', 'mean', 'no'):
raise ValueError(
'only \'sum\', \'mean\' and \'no\' are valid for \'reduce\', but '
'\'%s\' is given' % reduce)
x_prec = exponential.exp(-ln_var)
x_diff = x - mean
x_power = (x_diff * x_diff) * x_prec * -0.5
loss = (ln_var + math.log(2 * math.pi)) / 2 - x_power
if reduce == 'sum':
return sum.sum(loss)
elif reduce == 'mean':
return average.average(loss)
else:
return loss
| 6,812
| 36.85
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/loss/hinge.py
|
import numpy
from chainer import backend
from chainer.backends import cuda
from chainer import function
from chainer.utils import type_check
def _hinge_fwd_kernel():
return cuda.elementwise(
'S t', 'raw T bottom_diff',
'int ind[] = {i, t}; bottom_diff[ind] *= -1',
'hinge_fwd')
class Hinge(function.Function):
"""Hinge loss."""
def __init__(self, norm='L1', reduce='mean'):
if norm in ['L1', 'L2']:
self.norm = norm
else:
raise NotImplementedError('norm should be either \'L1\' or \'L2\'')
if reduce in ['mean', 'no']:
self.reduce = reduce
else:
raise ValueError(
'only \'mean\' and \'no\' are valid for \'reduce\', but '
'\'%s\' is given' % reduce)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 't'))
x_type, t_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
t_type.dtype.kind == 'i',
x_type.ndim == 2,
t_type.ndim == 1,
x_type.shape[0] == t_type.shape[0],
)
def forward_cpu(self, inputs):
x, t = inputs
num = len(x)
self.bottom_diff = numpy.copy(x)
self.bottom_diff[numpy.arange(num), t] *= -1
self.bottom_diff = numpy.maximum(0, 1 + self.bottom_diff)
if self.norm == 'L1':
loss = self.bottom_diff
elif self.norm == 'L2':
loss = self.bottom_diff ** 2
else:
raise NotImplementedError()
if self.reduce == 'mean':
loss = loss.sum() / num
return numpy.array(loss, dtype=x.dtype),
def forward_gpu(self, inputs):
x, t = inputs
num = x.dtype.type(len(x))
self.bottom_diff = cuda.cupy.maximum(
0, 1 + _hinge_fwd_kernel()(t, x.copy()))
if self.norm == 'L1':
loss = self.bottom_diff
elif self.norm == 'L2':
loss = self.bottom_diff ** 2
else:
raise NotImplementedError()
if self.reduce == 'mean':
loss = loss.sum() / num
return loss,
def backward_cpu(self, inputs, grad_outputs):
t, gloss = inputs[1], grad_outputs[0]
if self.reduce == 'mean':
gloss /= len(t)
self.bottom_diff[numpy.arange(len(t)), t] *= -1
if self.norm == 'L1':
gx = gloss * numpy.sign(self.bottom_diff)
elif self.norm == 'L2':
gx = 2 * gloss * self.bottom_diff
else:
raise NotImplementedError()
return gx, None
def backward_gpu(self, inputs, grad_outputs):
xp = backend.get_array_module(*inputs)
t, gloss = inputs[1], grad_outputs[0]
if self.reduce == 'mean':
gloss /= len(t)
self.bottom_diff = _hinge_fwd_kernel()(t, self.bottom_diff)
if self.norm == 'L1':
gx = gloss * xp.sign(self.bottom_diff)
elif self.norm == 'L2':
gx = 2 * gloss * self.bottom_diff
else:
raise NotImplementedError()
return gx, None
def hinge(x, t, norm='L1', reduce='mean'):
"""Computes the hinge loss for a one-of-many classification task.
.. math::
L = \\frac{1}{N} \\sum_{n=1}^N \\sum_{k=1}^K \\left[
\\max(0, 1 - \\delta\\{t_n = k\\} x_{nk}) \\right]^p
where :math:`N` denotes the batch size and :math:`K` is the number of
classes of interest,
.. math::
\\delta \\{ {\\rm condition} \\} = \\left \\{ \\begin{array}{cc}
1 & {\\rm if~condition\\ is\\ true} \\\\
-1 & {\\rm otherwise,}
\\end{array} \\right.
and
.. math::
p = \\left \\{ \\begin{array}{cc}
1 & {\\rm if~norm} = {\\rm L1} \\\\
2 & {\\rm if~norm} = {\\rm L2.}
\\end{array} \\right.
Let the hinge loss function :math:`l(x, \\delta)` be
:math:`\\left[\\max(0, 1 - \\delta x) \\right]^p`.
When :math:`x` and :math:`\\delta` have the same sign (meaning
:math:`x` predicts the proper score for classification) and
:math:`|x| \\geq 1`, the hinge loss :math:`l(x, \\delta) = 0`, but when
they have opposite sign, :math:`l(x, \\delta)` increases linearly
with :math:`x`.
The output is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds the elementwise
loss values. If it is ``'mean'``, it takes the mean of loss values.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
The shape of ``x`` should be (:math:`N`, :math:`K`).
t (:class:`~chainer.Variable` or :ref:`ndarray`):
The :math:`N`-dimensional label vector with values
:math:`t_n \\in \\{0, 1, 2, \\dots, K-1\\}`.
The shape of ``t`` should be (:math:`N`,).
norm (string): Specifies norm type. Either ``'L1'`` or ``'L2'`` is
acceptable.
reduce (str): Reduction option. Its value must be either
``'mean'`` or ``'no'``. Otherwise, :class:`ValueError` is raised.
Returns:
~chainer.Variable:
A variable object holding a scalar array of the
hinge loss :math:`L`.
If ``reduce`` is ``'no'``, the output variable holds array
whose shape is same as one of (hence both of) input variables.
If it is ``'mean'``, the output variable holds a scalar value.
.. admonition:: Example
In this case, the batch size ``N`` is 2 and the number of classes ``K``
is 3.
>>> x = np.array([[-2.0, 3.0, 0.5],
... [5.0, 2.0, -0.5]]).astype(np.float32)
>>> x
array([[-2. , 3. , 0.5],
[ 5. , 2. , -0.5]], dtype=float32)
>>> t = np.array([1, 0]).astype(np.int32)
>>> t
array([1, 0], dtype=int32)
>>> F.hinge(x, t)
variable(2.5)
>>> F.hinge(x, t, reduce='no')
variable([[0. , 0. , 1.5],
[0. , 3. , 0.5]])
>>> F.hinge(x, t, norm='L2')
variable(5.75)
"""
return Hinge(norm, reduce)(x, t)
| 6,291
| 31.43299
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/loss/huber_loss.py
|
import chainer
from chainer import backend
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class HuberLoss(function_node.FunctionNode):
def __init__(self, delta, reduce='sum_along_second_axis'):
self.delta = delta
if reduce not in ('sum_along_second_axis', 'no'):
raise ValueError(
'Only \'sum_along_second_axis\' and \'no\' are valid '
'for \'reduce\', but \'%s\' is given' % reduce)
self.reduce = reduce
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 't'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].shape == in_types[1].shape
)
def forward(self, inputs):
self.retain_inputs((0, 1))
xp = backend.get_array_module(*inputs)
x0, x1 = inputs
dtype = x0.dtype
linear_part = utils.force_array(x0 - x1, dtype)
delta = dtype.type(self.delta)
xp.abs(linear_part, out=linear_part)
square_part = utils.force_array(xp.square(linear_part), dtype)
linear_part *= 2 * delta
linear_part -= delta * delta
xp.maximum(linear_part, delta * delta, out=linear_part)
xp.minimum(square_part, linear_part, out=square_part)
y = square_part
y *= 0.5
if self.reduce == 'sum_along_second_axis':
return y.sum(axis=1),
else:
return y,
def backward(self, indexes, grad_outputs):
x0, x1 = self.get_retained_inputs()
gy, = grad_outputs
diff = x0 - x1
delta = self.delta
gx = chainer.functions.clip(diff, -delta, delta)
if self.reduce == 'sum_along_second_axis':
gy = chainer.functions.expand_dims(gy, 1)
gx = chainer.functions.broadcast_to(gy, gx.shape) * gx
return gx, -gx
def huber_loss(x, t, delta, reduce='sum_along_second_axis'):
"""Computes the Huber loss.
The Huber loss is similar to the :func:`mean_squared_error` but is less
sensitive to outliers in the data. It is defined as
.. math::
L_{\\delta}(a) = \\left \\{ \\begin{array}{cc}
\\frac{1}{2} a^2 & {\\rm if~|a| \\leq \\delta} \\\\
\\delta (|a| - \\frac{1}{2} \\delta) & {\\rm otherwise,}
\\end{array} \\right.
where :math:`a = x - t` is the difference between the input :math:`x`
and the target :math:`t`.
The loss is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds the elementwise
loss values. If it is ``'sum_along_second_axis'``, loss values are
summed up along the second axis (i.e. ``axis=1``).
See: `Huber loss - Wikipedia <https://en.wikipedia.org/wiki/Huber_loss>`_.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
The shape of ``x`` should be (:math:`N`, :math:`K`, ...) if
``reduce='sum_along_second_axis'``.
t (:class:`~chainer.Variable` or :ref:`ndarray`): Target variable for
regression. The shape of ``t`` should be
(:math:`N`, :math:`K`, ...) if ``reduce='sum_along_second_axis'``.
delta (float): Constant variable for Huber loss function
as used in definition.
reduce (str): Reduction option. Its value must be either
``'sum_along_second_axis'`` or ``'no'``. Otherwise,
:class:`ValueError` is raised.
Returns:
~chainer.Variable:
A variable object holding a scalar array of the
Huber loss :math:`L_{\\delta}`.
If ``reduce`` is ``'no'``, the output variable holds array
whose shape is same as one of (hence both of) input variables.
If it is ``'sum_along_second_axis'``, the shape of the array
is same as the input variables, except the second axis is removed.
.. admonition:: Example
Example without reduction, in which case the output ``y`` will have the
same shape as the inputs ``x`` and ``t``.
>>> import numpy as np
>>> from chainer import functions as F
>>> x = np.array([[-2.0, 3.0, 0.5], [5.0, 2.0, -0.5]]).\
astype(np.float32)
>>> x.shape
(2, 3)
>>> t = np.array([[-2.0, 3.0, 0.0], [10.0, 2.0, -0.5]]).\
astype(np.float32)
>>> t.shape
(2, 3)
>>> y = F.huber_loss(x, t, delta=1.0, reduce='no')
>>> y.shape
(2, 3)
>>> y
variable([[0. , 0. , 0.125],
[4.5 , 0. , 0. ]])
Example with reduction along the second axis.
>>> y = F.huber_loss(x, t, delta=1.0, reduce='sum_along_second_axis')
>>> y.shape
(2,)
>>> y
variable([0.125, 4.5 ])
"""
return HuberLoss(delta=delta, reduce=reduce).apply((x, t))[0]
| 4,951
| 34.120567
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/loss/triplet.py
|
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class Triplet(function_node.FunctionNode):
"""Triplet loss function."""
def __init__(self, margin, reduce='mean'):
if margin <= 0:
raise ValueError('margin should be positive value.')
self.margin = margin
if reduce not in ('mean', 'no'):
raise ValueError(
'only \'mean\' and \'no\' are valid for \'reduce\', but '
'\'%s\' is given' % reduce)
self.reduce = reduce
def check_type_forward(self, in_types):
type_check._argname(in_types, ('anchor', 'positive', 'negative'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].dtype == in_types[2].dtype,
in_types[0].shape == in_types[1].shape,
in_types[0].shape == in_types[2].shape,
in_types[0].shape[0] > 0
)
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
anchor, positive, negative = inputs
dist = xp.sum(
(anchor - positive) ** 2 - (anchor - negative) ** 2,
axis=1) + self.margin
self.dist_hinge = xp.maximum(dist, 0)
if self.reduce == 'mean':
N = anchor.shape[0]
loss = xp.sum(self.dist_hinge) / N
else:
loss = self.dist_hinge
self.retain_inputs((0, 1, 2))
return xp.array(loss, dtype=anchor.dtype),
def backward(self, indexes, grad_outputs):
anchor, positive, negative = self.get_retained_inputs()
N = anchor.shape[0]
x_dim = anchor.shape[1]
xp = backend.get_array_module(anchor)
tmp = xp.repeat(self.dist_hinge[:, None], x_dim, axis=1)
mask = xp.array(tmp > 0, dtype=anchor.dtype)
gy, = grad_outputs
if self.reduce == 'mean':
g = gy / N
else:
g = gy[:, None]
tmp = 2 * chainer.functions.broadcast_to(g, mask.shape) * mask
ret = []
if 0 in indexes:
ret.append(tmp * (negative - positive))
if 1 in indexes:
ret.append(tmp * (positive - anchor))
if 2 in indexes:
ret.append(tmp * (anchor - negative))
return ret
def triplet(anchor, positive, negative, margin=0.2, reduce='mean'):
"""Computes triplet loss.
It takes a triplet of variables as inputs, :math:`a`, :math:`p` and
:math:`n`: anchor, positive example and negative example respectively.
The triplet defines a relative similarity between samples.
Let :math:`N` and :math:`K` denote mini-batch size and the dimension of
input variables, respectively. The shape of all input variables should be
:math:`(N, K)`.
.. math::
L(a, p, n) = \\frac{1}{N} \\left( \\sum_{i=1}^N \\max \\{d(a_i, p_i)
- d(a_i, n_i) + {\\rm margin}, 0\\} \\right)
where :math:`d(x_i, y_i) = \\| {\\bf x}_i - {\\bf y}_i \\|_2^2`.
The output is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds the elementwise
loss values. If it is ``'mean'``, this function takes a mean of
loss values.
Args:
anchor (:class:`~chainer.Variable` or :ref:`ndarray`):
The anchor example variable. The shape
should be :math:`(N, K)`, where :math:`N` denotes the minibatch
size, and :math:`K` denotes the dimension of the anchor.
positive (:class:`~chainer.Variable` or :ref:`ndarray`):
The positive example variable. The shape
should be the same as anchor.
negative (:class:`~chainer.Variable` or :ref:`ndarray`):
The negative example variable. The shape
should be the same as anchor.
margin (float): A parameter for triplet loss. It should be a positive
value.
reduce (str): Reduction option. Its value must be either
``'mean'`` or ``'no'``. Otherwise, :class:`ValueError` is raised.
Returns:
~chainer.Variable:
A variable holding a scalar that is the loss value
calculated by the above equation.
If ``reduce`` is ``'no'``, the output variable holds array
whose shape is same as one of (hence both of) input variables.
If it is ``'mean'``, the output variable holds a scalar value.
.. note::
This cost can be used to train triplet networks. See `Learning
Fine-grained Image Similarity with Deep Ranking
<https://arxiv.org/abs/1404.4661>`_ for details.
.. admonition:: Example
>>> anchor = np.array([[-2.0, 3.0, 0.5], [5.0, 2.0, -0.5]]).\
astype(np.float32)
>>> pos = np.array([[-2.1, 2.8, 0.5], [4.9, 2.0, -0.4]]).\
astype(np.float32)
>>> neg = np.array([[-2.1, 2.7, 0.7], [4.9, 2.0, -0.7]]).\
astype(np.float32)
>>> F.triplet(anchor, pos, neg)
variable(0.14000003)
>>> y = F.triplet(anchor, pos, neg, reduce='no')
>>> y.shape
(2,)
>>> y.array
array([0.11000005, 0.17 ], dtype=float32)
>>> F.triplet(anchor, pos, neg, margin=0.5) # harder penalty
variable(0.44000003)
"""
return Triplet(margin, reduce).apply((anchor, positive, negative))[0]
| 5,403
| 34.788079
| 77
|
py
|
chainer
|
chainer-master/chainer/functions/loss/decov.py
|
from chainer import backend
from chainer import function
from chainer import utils
from chainer.utils import type_check
class DeCov(function.Function):
"""DeCov loss (https://arxiv.org/abs/1511.06068)"""
def __init__(self, reduce='half_squared_sum'):
self.h_centered = None
self.covariance = None
if reduce not in ('half_squared_sum', 'no'):
raise ValueError(
'only \'half_squared_sum\' and \'no\' are valid '
'for \'reduce\', but \'%s\' is given' % reduce)
self.reduce = reduce
def check_type_forward(self, in_types):
type_check._argname(in_types, ('h',))
h_type, = in_types
type_check.expect(
h_type.dtype.kind == 'f',
h_type.ndim == 2,
)
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
h, = inputs
self.h_centered = h - h.mean(axis=0, keepdims=True)
self.covariance = self.h_centered.T.dot(self.h_centered)
xp.fill_diagonal(self.covariance, 0.0)
self.covariance /= len(h)
if self.reduce == 'half_squared_sum':
cost = xp.vdot(self.covariance, self.covariance)
cost *= h.dtype.type(0.5)
return utils.force_array(cost),
else:
return self.covariance,
def backward(self, inputs, grad_outputs):
xp = backend.get_array_module(*inputs)
h, = inputs
gcost, = grad_outputs
gcost_div_n = gcost / gcost.dtype.type(len(h))
if self.reduce == 'half_squared_sum':
gh = 2.0 * self.h_centered.dot(self.covariance)
gh *= gcost_div_n
else:
xp.fill_diagonal(gcost_div_n, 0.0)
gh = self.h_centered.dot(gcost_div_n + gcost_div_n.T)
return gh,
def decov(h, reduce='half_squared_sum'):
"""Computes the DeCov loss of ``h``
The output is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds a matrix
whose size is same as the number of columns of ``y``.
If it is ``'half_squared_sum'``, it holds the half of the
squared Frobenius norm (i.e. squared of the L2 norm of a matrix flattened
to a vector) of the matrix.
Args:
h (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable holding a matrix where the first dimension
corresponds to the batches.
reduce (str): Reduction option. Its value must be either
``'half_squared_sum'`` or ``'no'``.
Otherwise, :class:`ValueError` is raised.
Returns:
~chainer.Variable:
A variable holding a scalar of the DeCov loss.
If ``reduce`` is ``'no'``, the output variable holds
2-dimensional array matrix of shape ``(N, N)`` where
``N`` is the number of columns of ``y``.
If it is ``'half_squared_sum'``, the output variable
holds a scalar value.
.. note::
See https://arxiv.org/abs/1511.06068 for details.
"""
return DeCov(reduce)(h)
| 3,092
| 32.989011
| 77
|
py
|
chainer
|
chainer-master/chainer/functions/loss/softmax_cross_entropy.py
|
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer.functions.activation import log_softmax
from chainer.utils import type_check
from chainer import variable
import chainerx
def _broadcast_to(array, shape):
if hasattr(numpy, 'broadcast_to'):
return numpy.broadcast_to(array, shape)
dummy = numpy.empty(shape, array.dtype)
return numpy.broadcast_arrays(array, dummy)[0]
def _check_class_weight_option(class_weight):
if class_weight is not None:
if class_weight.ndim != 1:
raise ValueError('class_weight.ndim should be 1')
if class_weight.dtype.kind != 'f':
raise ValueError('The dtype of class_weight should be \'f\'')
if isinstance(class_weight, variable.Variable):
raise ValueError('class_weight should be a numpy.ndarray or '
'cupy.ndarray, not a chainer.Variable')
def _check_reduce_option(reduce):
if reduce not in ('mean', 'no'):
raise ValueError(
'only \'mean\' and \'no\' are valid for \'reduce\', but \'%s\' is '
'given' % reduce)
def _check_input_values(x, t, ignore_label):
# Extract the raw ndarray as Variable.__ge__ is not implemented.
# We assume that t is already an ndarray.
if isinstance(x, variable.Variable):
x = x.data
if not (((0 <= t) &
(t < x.shape[1])) |
(t == ignore_label)).all():
msg = ('Each label `t` need to satisfy '
'`0 <= t < x.shape[1] or t == %d`' % ignore_label)
raise ValueError(msg)
def _reduction_dtype(x_dtype):
# Returns the dtype for accumulation and output of reduction.
# For float16 input, float32 is used.
# Otherwise the same dtype as the input is used.
if x_dtype == numpy.float16:
return numpy.float32
return x_dtype
class SoftmaxCrossEntropy(function_node.FunctionNode):
"""Softmax activation followed by a cross entropy loss."""
normalize = True
y = None
# Coefficient of normalization. Only used if reduce='mean'.
_coeff = None
soft_target = False
eps = 1e-7
def __init__(self, normalize=True, cache_score=True, class_weight=None,
ignore_label=-1, reduce='mean',
soft_target_loss='cross-entropy'):
self.normalize = normalize
self.cache_score = cache_score
_check_class_weight_option(class_weight)
self.class_weight = class_weight
self.ignore_label = ignore_label
_check_reduce_option(reduce)
self.reduce = reduce
self.soft_target_loss = soft_target_loss
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 't'))
x_type, t_type = in_types
if t_type.dtype.kind == 'i':
type_check.expect(
x_type.dtype.kind == 'f',
t_type.dtype.kind == 'i',
t_type.ndim == x_type.ndim - 1,
x_type.shape[0] == t_type.shape[0],
x_type.shape[2:] == t_type.shape[1:],
)
else:
# assume t is soft_target
type_check.expect(
x_type.dtype.kind == 'f',
t_type.dtype.kind == 'f',
x_type.shape == t_type.shape,
)
def _is_chainerx_supported(self, input_arrays):
# Determines if the specified configuration of inputs and parameters
# are supported in `forward_chainerx` implementation.
# TODO(niboshi): Support these conditions.
if self.class_weight is not None:
return False
if self.ignore_label != -1:
return False
x, t = input_arrays
if x.ndim != 2:
return False
return True
def forward_chainerx(self, inputs):
if self.reduce == 'mean' and self.normalize:
x, t = inputs
n_classes = x.shape[1]
score = chainerx.log_softmax(x, axis=1)
mask = (t[:, chainerx.newaxis] == chainerx.arange(
n_classes, dtype=t.dtype, device=x.device)).astype(score.dtype)
y = (score * mask).sum() * (-1 / mask.sum())
return y,
x, t = inputs
y = chainerx.softmax_cross_entropy(x, t)
if self.reduce == 'mean':
return y.mean(),
return y,
def forward_cpu(self, inputs):
class_weight = backend.from_chx(self.class_weight)
self.retain_inputs((0, 1))
x, t = inputs
if x.ndim == t.ndim and x.shape == t.shape:
self.soft_target = True
if chainer.is_debug() and not self.soft_target:
_check_input_values(x, t, self.ignore_label)
log_y = log_softmax._log_softmax(x)
if self.cache_score:
self.y = numpy.exp(log_y)
if self.soft_target:
return self._soft_target_loss(numpy, x, t, log_y)
if class_weight is not None:
shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
log_y *= _broadcast_to(class_weight.reshape(shape), x.shape)
log_yd = numpy.rollaxis(log_y, 1)
log_yd = log_yd.reshape(len(log_yd), -1)
t_valid = t != self.ignore_label
t = t * t_valid
log_p = log_yd[t.ravel(), numpy.arange(t.size)]
log_p *= t_valid.ravel()
if self.reduce == 'mean':
if self.normalize:
count = t_valid.sum()
else:
count = len(x)
self._coeff = 1.0 / max(count, 1)
# Perform reduction in a promoted dtype
reduc_dtype = _reduction_dtype(x.dtype)
y = log_p.sum(keepdims=True, dtype=reduc_dtype)
y = y * (-self._coeff)
y = y.astype(x.dtype, copy=False)
return y.reshape(()),
else:
return -log_p.reshape(t.shape),
def forward_gpu(self, inputs):
class_weight = backend.from_chx(self.class_weight)
self.retain_inputs((0, 1))
x, t = inputs
if x.ndim == t.ndim and x.shape == t.shape:
self.soft_target = True
cupy = cuda.cupy
if chainer.is_debug() and not self.soft_target:
_check_input_values(x, t, self.ignore_label)
if x.size == 0:
y = cupy.zeros(t.shape, dtype=x.dtype)
if self.cache_score:
self.y = y
if self.reduce == 'mean':
return y.sum(),
else:
return y,
log_y = log_softmax._log_softmax(x)
if self.cache_score:
self.y = cupy.exp(log_y)
if self.soft_target:
return self._soft_target_loss(cupy, x, t, log_y)
if class_weight is not None:
shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
log_y *= cupy.broadcast_to(class_weight.reshape(shape), x.shape)
log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
if self.reduce == 'mean':
# Reduction is performed in a promoted dtype
reduc_dtype = _reduction_dtype(x.dtype)
if self.normalize:
count = (t != self.ignore_label).sum(dtype=reduc_dtype)
count = cupy.maximum(1, count)
coeff = 1. / count
else:
coeff = cupy.array(1. / max(1, len(t)), dtype=reduc_dtype)
self._coeff = coeff
ret = cuda.reduce(
'S t, raw T log_y, int32 n_channel, raw U coeff, '
'S ignore_label',
'U out',
't == ignore_label ? T(0) : log_y[_j * n_channel + t]',
'a + b', 'out = static_cast<U>(a * -coeff[0])', '0',
'crossent_fwd'
)(t, log_y.reduced_view(), log_y.shape[-1],
self._coeff, self.ignore_label)
ret = ret.astype(log_y.dtype, copy=False)
else:
ret = cuda.elementwise(
'S t, raw T log_y, int32 n_channel, T ignore', 'T out',
'''
if (t == ignore) {
out = 0;
} else {
out = -log_y[i * n_channel + t];
}
''',
'softmax_crossent_no_reduce_fwd'
)(t, log_y.reduced_view(), log_y.shape[-1], self.ignore_label)
ret = ret.reshape(t.shape)
return ret,
def _soft_target_loss(self, xp, x, t, log_y):
if self.soft_target_loss == 'kl-divergence':
ret = xp.sum(t * (xp.log(t + self.eps) - log_y), axis=1)
else:
ret = -xp.sum(t * log_y, axis=1)
if self.reduce == 'mean':
self._coeff = 1.0 / (x.size / x.shape[1])
ret = ret.sum(keepdims=True) * self._coeff
return ret.reshape(()),
else:
return ret,
def backward(self, input_indexes, grad_outputs):
func_grad = _SoftmaxCrossEntropyGrad_NoDoubleBackprop(
self.ignore_label, self.class_weight, self.y, self._coeff,
self.soft_target)
inputs = self.get_retained_inputs()
return func_grad.apply(inputs + grad_outputs) + (None,)
class _SoftmaxCrossEntropyGrad_NoDoubleBackprop(function_node.FunctionNode):
# A backward implementation which does not support double-backprop.
def __init__(self, ignore_label, class_weight, y, coeff, soft_target):
self.ignore_label = ignore_label
self.class_weight = class_weight
self.y = y
self.coeff = coeff
self.soft_target = soft_target
def forward_cpu(self, inputs_and_grad_outputs):
x, t, gloss = inputs_and_grad_outputs
if x.size == 0:
return numpy.zeros(x.shape, dtype=x.dtype), None
if self.y is not None:
y = self.y.copy()
else:
y = log_softmax._log_softmax(x)
numpy.exp(y, out=y)
t_valid = t != self.ignore_label
t = t * t_valid
if self.soft_target:
gx = y - t
elif y.ndim == 2:
gx = y
gx[numpy.arange(len(t)), t] -= 1
if self.class_weight is not None:
shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
c = _broadcast_to(self.class_weight.reshape(shape), x.shape)
c = c[numpy.arange(len(t)), t]
gx *= _broadcast_to(numpy.expand_dims(c, 1), gx.shape)
gx *= t_valid.reshape((len(t), 1))
else:
# in the case where y.ndim is higher than 2,
# we think that a current implementation is inefficient
# because it yields two provisional arrays for indexing.
n_unit = t.size // len(t)
gx = y.reshape(y.shape[0], y.shape[1], -1)
fst_index = numpy.arange(t.size) // n_unit
trd_index = numpy.arange(t.size) % n_unit
gx[fst_index, t.ravel(), trd_index] -= 1
if self.class_weight is not None:
shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
c = _broadcast_to(self.class_weight.reshape(shape), x.shape)
c = c.reshape(gx.shape)
c = c[fst_index, t.ravel(), trd_index]
c = c.reshape(y.shape[0], 1, -1)
gx *= _broadcast_to(c, gx.shape)
gx *= t_valid.reshape((len(t), 1, -1))
gx = gx.reshape(y.shape)
if self.coeff is not None:
gx *= gloss * self.coeff
else:
gx *= gloss[:, None]
return gx,
def forward_gpu(self, inputs_and_grad_outputs):
class_weight = cuda.to_gpu(self.class_weight)
cupy = cuda.cupy
x, t, gloss = inputs_and_grad_outputs
if x.size == 0:
return cupy.zeros(x.shape, dtype=x.dtype), None
if self.y is not None:
y = self.y
else:
y = log_softmax._log_softmax(x)
cupy.exp(y, out=y)
n_unit = t.size // len(t)
if self.coeff is not None:
coeff = self.coeff
else:
gloss = gloss[:, None, ...]
coeff = cupy.array(1, dtype=gloss.dtype) # dtype does not matter
if self.soft_target:
gx = gloss * coeff * (y - t)
elif self.class_weight is None:
gx = cuda.elementwise(
'T y, S t, T gloss, U coeff, S n_channel, S n_unit, '
'S ignore_label',
'T gx',
'''
const int c = (i / n_unit % n_channel);
if (t == ignore_label) {
gx = T(0);
} else {
gx = static_cast<T>(gloss * coeff * (y - (c == t)));
}
''',
'softmax_crossent_bwd')(
y, cupy.expand_dims(t, 1), gloss, coeff, x.shape[1],
n_unit, self.ignore_label)
else:
gx = cuda.elementwise(
'T y, raw T w, S t, T gloss, U coeff, '
'S n_channel, S n_unit, S ignore_label',
'T gx',
'''
const int c = (i / n_unit % n_channel);
if (t == ignore_label) {
gx = T(0);
} else {
gx = static_cast<T>(
gloss * coeff * (y - (c == t)) * w[t]);
}
''',
'softmax_crossent_weight_bwd')(
y, class_weight, cupy.expand_dims(t, 1), gloss, coeff,
x.shape[1], n_unit, self.ignore_label)
return gx,
def backward(self, input_indexes, grad_outputs):
raise RuntimeError(
'F.softmax_cross_entropy was called with '
'\'enable_double_backprop=False\' argument, but double-backprop '
'is actually being performed. Please specify '
'\'enable_double_backprop=True\' explicitly.')
def _double_backward_softmax_cross_entropy(x, t, normalize, class_weight,
ignore_label, reduce, is_chainerx):
if isinstance(t, variable.Variable):
t = t.data
F = chainer.functions
_check_class_weight_option(class_weight)
_check_reduce_option(reduce)
if chainer.is_debug():
_check_input_values(x, t, ignore_label)
loss = -chainer.functions.log_softmax(x)
if class_weight is not None:
shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
class_weight = F.broadcast_to(class_weight.reshape(shape), x.shape)
# TODO(niboshi): Remove this workaround after ChainerX supports
# type promotion.
if is_chainerx:
class_weight = F.cast(class_weight, x.dtype)
loss = loss * class_weight
in_use = (t != ignore_label).astype(x.dtype)
loss = F.rollaxis(loss, 1, loss.ndim)
loss = F.reshape(loss, (-1, loss.shape[-1]))
# Replace ignore_label value with one valid for F.select_item below.
t = t.clip(0, loss.shape[1] - 1)
loss = F.select_item(loss, t.ravel())
loss = F.reshape(loss, t.shape)
loss = loss * in_use
if reduce == 'mean':
reduc_dtype = _reduction_dtype(x.dtype)
if normalize:
# TODO(niboshi): Use in_use.sum(dtype=reduc_dtype) once chainerx
# supports dtype argument.
count = in_use.astype(reduc_dtype, copy=False).sum()
else:
count = len(x)
count = max(count, 1.)
if reduc_dtype == loss.dtype:
loss = F.sum(loss / count)
else:
# Sum in a promoted dtype
loss = F.cast(loss, reduc_dtype)
loss = F.sum(loss / count)
loss = F.cast(loss, x.dtype)
return loss
def softmax_cross_entropy(
x, t, normalize=True, cache_score=True, class_weight=None,
ignore_label=-1, reduce='mean', enable_double_backprop=False,
soft_target_loss='cross-entropy'):
"""Computes cross entropy loss for pre-softmax activations.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable holding a multidimensional array whose element indicates
unnormalized log probability: the first axis of the variable
represents the number of samples, and the second axis represents
the number of classes. While this function computes a usual softmax
cross entropy if the number of dimensions is equal to 2, it
computes a cross entropy of the replicated softmax if the number of
dimensions is greater than 2.
t (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable holding a signed integer vector of ground truth
labels. If ``t[i] == ignore_label``, corresponding ``x[i]`` is
ignored.
When the dtype is float, this function treats ``t`` as an array
holding probability distribution of labels, in other words, soft
targets. In this case, the shape of ``t`` must be the same as the
shape of ``x``. Note that the loss is calculated using cross
entropy or KL divergence.
normalize (bool): If ``True``, this function normalizes the cross
entropy loss across all instances. If ``False``, it only
normalizes along a batch size.
cache_score (bool): When it is ``True``, the function stores result
of forward computation to use it on backward computation. It
reduces computational cost though consumes more memory.
If ``enable_double_backprop`` option is ``True``, this option
is forcibly turned off and the function does not cache
the intermediate value.
class_weight (:ref:`ndarray`):
An array that contains constant weights that will be multiplied
with the loss values along with the second dimension. The shape of
this array should be ``(x.shape[1],)``. If this is not ``None``,
each class weight ``class_weight[i]`` is actually multiplied to
``y[:, i]`` that is the corresponding log-softmax output of ``x``
and has the same shape as ``x`` before calculating the actual loss
value.
ignore_label (int): Label value you want to ignore. Its default value
is ``-1``. See description of the argument `t`.
reduce (str): A string that determines whether to reduce the loss
values. If it is ``'mean'``, it computes the sum of the individual
cross entropy and normalize it according to ``normalize`` option.
If it is ``'no'``, this function computes cross entropy for each
instance and does not normalize it (``normalize`` option is
ignored). In this case, the loss value of the ignored instance,
which has ``ignore_label`` as its target value, is set to ``0``.
enable_double_backprop (bool): If ``True``, this function uses
implementation that supports higher order differentiation.
If ``False``, it uses single-backprop implementation.
This function use the single-backprop version because we expect
it is faster. So, if you need second or higher derivatives,
you need to turn it on explicitly.
soft_target_loss (str): A string that determines what type of
method is used to calculate soft target loss. If
``'cross-entropy'`` and ``'kl-divergence'``, cross-entropy and
KL divergence are used for loss calculation.
Returns:
~chainer.Variable: A variable holding a scalar array of the cross
entropy loss. If ``reduce`` is ``'mean'``, it is a scalar array.
If ``reduce`` is ``'no'``, the shape is same as that of ``t``.
.. note::
This function is differentiable only by ``x``.
.. admonition:: Example
>>> x = np.array([[-1, 0, 1, 2], [2, 0, 1, -1]]).astype(np.float32)
>>> x
array([[-1., 0., 1., 2.],
[ 2., 0., 1., -1.]], dtype=float32)
>>> t = np.array([3, 0]).astype(np.int32)
>>> t
array([3, 0], dtype=int32)
>>> y = F.softmax_cross_entropy(x, t)
>>> y
variable(0.44018972)
>>> log_softmax = -F.log_softmax(x)
>>> expected_loss = np.mean([log_softmax[row, column].data \
for row, column in enumerate(t)])
>>> y.array == expected_loss
True
"""
is_chainerx = (
chainerx.is_available() and backend.get_array_module(x) is chainerx)
if soft_target_loss not in ('cross-entropy', 'kl-divergence'):
raise ValueError('soft_target_loss must be \'cross-entropy\' or '
'\'kl-divergence\'.')
if is_chainerx or not enable_double_backprop:
# Optimized implementation.
# For non-ChainerX, forward and backward are supported but
# double-backprop is not supported.
# For ChainerX, even forward is supported for only specific
# configuration of inputs and parameters, which is tested with
# `SoftmaxCrossEntropy._is_chainerx_supported()`.
func = SoftmaxCrossEntropy(
normalize, cache_score, class_weight, ignore_label, reduce,
soft_target_loss)
if not is_chainerx or func._is_chainerx_supported((x, t)):
loss, = func.apply((x, t))
return loss
# Generic double-backprop-enabled but unoptimized implementation
return _double_backward_softmax_cross_entropy(
x, t, normalize, class_weight, ignore_label, reduce, is_chainerx)
| 21,806
| 37.802491
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/loss/discriminative_loss.py
|
from chainer import backend
from chainer.functions.activation.relu import relu
from chainer.functions.array.broadcast import broadcast_to
from chainer.functions.math.basic_math import absolute
from chainer.functions.math.sqrt import sqrt
from chainer.functions.math.sum import sum as c_sum
class DiscriminativeMarginBasedClusteringLoss(object):
"""Discriminative margin-based clustering loss function
This is the implementation of the following paper:
https://arxiv.org/abs/1708.02551
This method is a semi-supervised solution to instance segmentation.
It calculates pixel embeddings, and calculates three different terms
based on those embeddings and applies them as loss.
The main idea is that the pixel embeddings
for same instances have to be closer to each other (pull force),
for different instances, they have to be further away (push force).
The loss also brings a weak regularization term to prevent overfitting.
This loss function calculates the following three parameters:
Variance Loss
Loss to penalize distances between pixels which are belonging
to the same instance. (Pull force)
Distance loss
Loss to penalize distances between the centers of instances.
(Push force)
Regularization loss
Small regularization loss to penalize weights against overfitting.
"""
def __init__(self, delta_v=0.5, delta_d=1.5,
max_embedding_dim=10, norm=1,
alpha=1.0, beta=1.0, gamma=0.001):
self.delta_v = delta_v
self.delta_d = delta_d
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.max_embedding_dim = max_embedding_dim
if self.max_embedding_dim <= 0:
raise ValueError('Max number of embeddings has to be positive!')
# L1 or L2 norm is allowed only
if norm == 1:
self.norm = lambda x, axis=None: c_sum(absolute(x), axis=axis)
elif norm == 2:
self.norm = lambda x, axis=None: sqrt(c_sum(x ** 2, axis=axis))
else:
raise ValueError('For discriminative loss, '
'norm can only be 1 or 2. '
'Obtained the value : {}'.format(norm))
def __call__(self, embeddings, labels):
"""
Args:
embeddings (:class:`~chainer.Variable` or :ref:`ndarray`):
predicted embedding vectors
(batch size, max embedding dimensions, height, width)
labels (:ref:`ndarray`):
instance segmentation ground truth
each unique value has to be denoting one instance
(batch size, height, width)
Returns:
:class:`tuple` of :class:`chainer.Variable`:
- *Variance loss*: Variance loss multiplied by ``alpha``
- *Distance loss*: Distance loss multiplied by ``beta``
- *Regularization loss*: Regularization loss multiplied by
``gamma``
"""
assert (self.max_embedding_dim == embeddings.shape[1])
l_dist = 0.0
count = 0
xp = backend.get_array_module(embeddings)
emb = embeddings[None, :]
emb = broadcast_to(emb, (emb.shape[1],
emb.shape[1],
emb.shape[2],
emb.shape[3],
emb.shape[4]))
ms = []
for c in range(self.max_embedding_dim):
# Create mask for instance
mask = xp.expand_dims(labels == c + 1, 1)
ms.append(mask)
if hasattr(xp, 'stack'):
ms = xp.stack(ms, 0)
else:
# Old numpy does not have numpy.stack.
ms = xp.concatenate([xp.expand_dims(x, 0) for x in ms], 0)
mns = c_sum(emb * ms, axis=(3, 4))
mns = mns / xp.maximum(xp.sum(ms, (2, 3, 4))[:, :, None], 1)
mns_exp = mns[:, :, :, None, None]
# Calculate regularization term
l_reg = c_sum(self.norm(mns, (1, 2)))
l_reg = l_reg / (self.max_embedding_dim * embeddings.shape[0])
# Calculate variance term
l_var = self.norm((mns_exp - emb) * ms, 2)
l_var = relu(l_var - self.delta_v) ** 2
l_var = c_sum(l_var, (1, 2, 3))
l_var = l_var / xp.maximum(xp.sum(ms, (1, 2, 3, 4)), 1)
l_var = c_sum(l_var) / self.max_embedding_dim
# Calculate distance loss
for c_a in range(len(mns)):
for c_b in range(c_a + 1, len(mns)):
m_a = mns[c_a]
m_b = mns[c_b]
dist = self.norm(m_a - m_b, 1) # N
l_dist += c_sum((relu(2 * self.delta_d - dist)) ** 2)
count += 1
l_dist /= max(count * embeddings.shape[0], 1)
rtn = self.alpha * l_var, self.beta * l_dist, self.gamma * l_reg
return rtn
def discriminative_margin_based_clustering_loss(
embeddings, labels,
delta_v, delta_d, max_embedding_dim,
norm=1, alpha=1.0, beta=1.0, gamma=0.001):
"""Discriminative margin-based clustering loss function
This is the implementation of the following paper:
https://arxiv.org/abs/1708.02551
This method is a semi-supervised solution to instance segmentation.
It calculates pixel embeddings, and calculates three different terms
based on those embeddings and applies them as loss.
The main idea is that the pixel embeddings
for same instances have to be closer to each other (pull force),
for different instances, they have to be further away (push force).
The loss also brings a weak regularization term to prevent overfitting.
This loss function calculates the following three parameters:
Variance Loss
Loss to penalize distances between pixels which are belonging
to the same instance. (Pull force)
Distance loss
Loss to penalize distances between the centers of instances.
(Push force)
Regularization loss
Small regularization loss to penalize weights against overfitting.
Args:
embeddings (:class:`~chainer.Variable` or :ref:`ndarray`):
predicted embedding vectors
(batch size, max embedding dimensions, height, width)
labels (:ref:`ndarray`):
instance segmentation ground truth
each unique value has to be denoting one instance
(batch size, height, width)
delta_v (float): Minimum distance to start penalizing variance
delta_d (float): Maximum distance to stop penalizing distance
max_embedding_dim (int): Maximum number of embedding dimensions
norm (int): Norm to calculate pixels and cluster center distances
alpha (float): Weight for variance loss
beta (float): Weight for distance loss
gamma (float): Weight for regularization loss
Returns:
:class:`tuple` of :class:`chainer.Variable`:
- *Variance loss*: Variance loss multiplied by ``alpha``
- *Distance loss*: Distance loss multiplied by ``beta``
- *Regularization loss*: Regularization loss multiplied by ``gamma``
"""
loss = DiscriminativeMarginBasedClusteringLoss(
delta_v, delta_d, max_embedding_dim, norm, alpha, beta, gamma)
return loss(embeddings, labels)
| 7,389
| 38.731183
| 76
|
py
|
chainer
|
chainer-master/chainer/functions/loss/black_out.py
|
from chainer.functions.array import broadcast
from chainer.functions.array import concat
from chainer.functions.array import expand_dims
from chainer.functions.array import reshape
from chainer.functions.connection import embed_id
from chainer.functions.math import average
from chainer.functions.math import exponential
from chainer.functions.math import logsumexp
from chainer.functions.math import matmul
from chainer.functions.math import sum as _sum
def black_out(x, t, W, samples, reduce='mean'):
"""BlackOut loss function.
BlackOut loss function is defined as
.. math::
-\\log(p(t)) - \\sum_{s \\in S} \\log(1 - p(s)),
where :math:`t` is the correct label, :math:`S` is a set of negative
examples and :math:`p(\\cdot)` is likelihood of a given label.
And, :math:`p` is defined as
.. math::
p(y) = \\frac{\\exp(W_y^\\top x)}{
\\sum_{s \\in samples} \\exp(W_s^\\top x)}.
The output is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds the
no loss values. If it is ``'mean'``, this function takes
a mean of loss values.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Batch of input vectors.
Its shape should be :math:`(N, D)`.
t (:class:`~chainer.Variable` or :ref:`ndarray`):
Vector of ground truth labels.
Its shape should be :math:`(N,)`. Each elements :math:`v`
should satisfy :math:`0 \\geq v \\geq V` or :math:`-1`
where :math:`V` is the number of label types.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Weight matrix.
Its shape should be :math:`(V, D)`
samples (~chainer.Variable): Negative samples.
Its shape should be :math:`(N, S)` where :math:`S` is
the number of negative samples.
reduce (str): Reduction option. Its value must be either
``'no'`` or ``'mean'``. Otherwise,
:class:`ValueError` is raised.
Returns:
~chainer.Variable:
A variable object holding loss value(s).
If ``reduce`` is ``'no'``, the output variable holds an
array whose shape is :math:`(N,)` .
If it is ``'mean'``, it holds a scalar.
See: `BlackOut: Speeding up Recurrent Neural Network Language Models With
Very Large Vocabularies <https://arxiv.org/abs/1511.06909>`_
.. seealso::
:class:`~chainer.links.BlackOut` to manage the model parameter ``W``.
"""
batch_size = x.shape[0]
neg_emb = embed_id.embed_id(samples, W)
neg_y = matmul.matmul(neg_emb, x[:, :, None])
neg_y = reshape.reshape(neg_y, neg_y.shape[:-1])
pos_emb = expand_dims.expand_dims(embed_id.embed_id(t, W), 1)
pos_y = matmul.matmul(pos_emb, x[:, :, None])
pos_y = reshape.reshape(pos_y, pos_y.shape[:-1])
logz = logsumexp.logsumexp(concat.concat([pos_y, neg_y]), axis=1)
blogz, bneg_y = broadcast.broadcast(
reshape.reshape(logz, (batch_size, 1)), neg_y)
ny = exponential.log(1 - exponential.exp(bneg_y - blogz))
py = reshape.reshape(pos_y, (batch_size,))
loss = -(py - logz + _sum.sum(ny, axis=1))
if reduce == 'mean':
loss = average.average(loss)
return loss
| 3,306
| 35.744444
| 77
|
py
|
chainer
|
chainer-master/chainer/functions/loss/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/functions/loss/cross_covariance.py
|
import chainer
from chainer import backend
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class CrossCovariance(function_node.FunctionNode):
"""Cross-covariance loss."""
def __init__(self, reduce='half_squared_sum'):
self.y_centered = None
self.z_centered = None
self.covariance = None
if reduce not in ('half_squared_sum', 'no'):
raise ValueError(
'Only \'half_squared_sum\' and \'no\' are valid '
'for \'reduce\', but \'%s\' is given' % reduce)
self.reduce = reduce
def check_type_forward(self, in_types):
type_check._argname(in_types, ('y', 'z'))
y_type, z_type = in_types
type_check.expect(
y_type.dtype.kind == 'f',
y_type.dtype == z_type.dtype,
y_type.ndim == 2,
z_type.ndim == 2,
y_type.shape[0] == z_type.shape[0]
)
def forward(self, inputs):
y, z = inputs
self.retain_inputs((0, 1))
y_centered = y - y.mean(axis=0, keepdims=True)
z_centered = z - z.mean(axis=0, keepdims=True)
covariance = y_centered.T.dot(z_centered)
covariance /= len(y)
if self.reduce == 'half_squared_sum':
xp = backend.get_array_module(*inputs)
cost = xp.vdot(covariance, covariance)
cost *= y.dtype.type(0.5)
return utils.force_array(cost),
else:
return covariance,
def backward(self, indexes, grad_outputs):
y, z = self.get_retained_inputs()
gcost, = grad_outputs
y_mean = chainer.functions.mean(y, axis=0, keepdims=True)
z_mean = chainer.functions.mean(z, axis=0, keepdims=True)
y_centered = y - chainer.functions.broadcast_to(y_mean, y.shape)
z_centered = z - chainer.functions.broadcast_to(z_mean, z.shape)
gcost_div_n = gcost / gcost.dtype.type(len(y))
ret = []
if self.reduce == 'half_squared_sum':
covariance = chainer.functions.matmul(y_centered.T, z_centered)
covariance /= len(y)
if 0 in indexes:
gy = chainer.functions.matmul(z_centered, covariance.T)
gy *= chainer.functions.broadcast_to(gcost_div_n, gy.shape)
ret.append(gy)
if 1 in indexes:
gz = chainer.functions.matmul(y_centered, covariance)
gz *= chainer.functions.broadcast_to(gcost_div_n, gz.shape)
ret.append(gz)
else:
if 0 in indexes:
gy = chainer.functions.matmul(z_centered, gcost_div_n.T)
ret.append(gy)
if 1 in indexes:
gz = chainer.functions.matmul(y_centered, gcost_div_n)
ret.append(gz)
return ret
def cross_covariance(y, z, reduce='half_squared_sum'):
"""Computes the sum-squared cross-covariance penalty between ``y`` and ``z``
The output is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds the covariant
matrix that has as many rows (resp. columns) as the dimension of
``y`` (resp.z).
If it is ``'half_squared_sum'``, it holds the half of the
Frobenius norm (i.e. L2 norm of a matrix flattened to a vector)
of the covarianct matrix.
Args:
y (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable holding a matrix where the first dimension
corresponds to the batches.
z (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable holding a matrix where the first dimension
corresponds to the batches.
reduce (str): Reduction option. Its value must be either
``'half_squared_sum'`` or ``'no'``.
Otherwise, :class:`ValueError` is raised.
Returns:
~chainer.Variable:
A variable holding the cross covariance loss.
If ``reduce`` is ``'no'``, the output variable holds
2-dimensional array matrix of shape ``(M, N)`` where
``M`` (resp. ``N``) is the number of columns of ``y``
(resp. ``z``).
If it is ``'half_squared_sum'``, the output variable
holds a scalar value.
.. note::
This cost can be used to disentangle variables.
See https://arxiv.org/abs/1412.6583v3 for details.
"""
return CrossCovariance(reduce).apply((y, z))[0]
| 4,497
| 35.569106
| 80
|
py
|
chainer
|
chainer-master/chainer/functions/loss/contrastive.py
|
from chainer import backend
from chainer import function_node
import chainer.functions
from chainer.utils import type_check
class Contrastive(function_node.FunctionNode):
"""Contrastive loss function."""
def __init__(self, margin, reduce='mean'):
if margin <= 0:
raise ValueError('margin should be positive value.')
self.margin = margin
if reduce not in ('mean', 'no'):
raise ValueError(
'only \'mean\' and \'no\' are valid for \'reduce\', but '
'\'%s\' is given' % reduce)
self.reduce = reduce
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x0', 'x1', 'y'))
x0_type, x1_type, y_type = in_types
type_check.expect(
x0_type.dtype.kind == 'f',
x0_type.dtype == x1_type.dtype,
y_type.dtype.kind == 'i',
x0_type.shape == x1_type.shape,
x1_type.shape[0] == y_type.shape[0],
x1_type.shape[0] > 0,
x0_type.ndim == 2,
x1_type.ndim == 2,
y_type.ndim == 1
)
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
self.retain_inputs((0, 1, 2))
x0, x1, y = inputs
diff = x0 - x1
dist_sq = xp.sum(diff ** 2, axis=1)
dist = xp.sqrt(dist_sq)
mdist = self.margin - dist
dist = xp.maximum(mdist, 0)
loss = (y * dist_sq + (1 - y) * dist * dist) * .5
if self.reduce == 'mean':
loss = xp.sum(loss) / x0.shape[0]
return xp.array(loss, dtype=x0.dtype),
def backward(self, indexes, grad_outputs):
x0, x1, y = self.get_retained_inputs()
gy, = grad_outputs
xp = backend.get_array_module(gy.data)
# Recompute intermediate variables as in forward.
diff = x0 - x1
dist_sq = chainer.functions.sum(diff ** 2, axis=1)
dist = chainer.functions.sqrt(dist_sq)
mdist = self.margin - dist
y = y.data
x_dim = x0.shape[1]
y = xp.repeat(y[:, None], x_dim, axis=1)
if self.reduce == 'mean':
alpha = gy / y.shape[0]
else:
alpha = gy[:, None]
alpha = chainer.functions.broadcast_to(alpha, y.shape)
dist = chainer.functions.repeat(dist[:, None], x_dim, axis=1)
# avoid division by zero, 1e-7 is not sufficiently small value because
# 1e7 cannot be represented in half precision.
eps = 5e-3 if dist.dtype == 'float16' else 1e-7
dist = chainer.functions.maximum(
dist, xp.full(dist.shape, eps, dtype=dist.dtype))
# similar pair
gx0 = alpha * y.astype(alpha.dtype) * diff
# dissimilar pair
d = chainer.functions.repeat(mdist[:, None], x_dim, axis=1)
mdist = chainer.functions.maximum(
d, xp.zeros(shape=d.shape, dtype=d.dtype))
gx0 += alpha * (1 - y) * mdist * -(diff / dist)
gx0 = chainer.functions.cast(gx0, x0.dtype)
return gx0, -gx0, None
def contrastive(x0, x1, y, margin=1, reduce='mean'):
"""Computes contrastive loss.
It takes a pair of samples and a label as inputs.
The label is :math:`1` when those samples are similar,
or :math:`0` when they are dissimilar.
Let :math:`N` and :math:`K` denote mini-batch size and the dimension
of input variables, respectively. The shape of both input variables
``x0`` and ``x1`` should be ``(N, K)``.
The loss value of the :math:`n`-th sample pair :math:`L_n` is
.. math::
L_n = \\frac{1}{2} \\left( y_n d_n^2
+ (1 - y_n) \\max ({\\rm margin} - d_n, 0)^2 \\right)
where :math:`d_n = \\| {\\bf x_0}_n - {\\bf x_1}_n \\|_2`,
:math:`{\\bf x_0}_n` and :math:`{\\bf x_1}_n` are :math:`n`-th
K-dimensional vectors of ``x0`` and ``x1``.
The output is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds the elementwise
loss values. If it is ``'mean'``, this function takes a mean of
loss values.
Args:
x0 (:class:`~chainer.Variable` or :ref:`ndarray`): The first input
variable. The shape should be (N, K), where N denotes the
mini-batch size, and K denotes the dimension of ``x0``.
x1 (:class:`~chainer.Variable` or :ref:`ndarray`): The second input
variable. The shape should be the same as ``x0``.
y (:class:`~chainer.Variable` or :ref:`ndarray`): Labels. All values
should be 0 or 1. The shape should be ``(N,)``, where N denotes the
mini-batch size.
margin (float): A parameter for contrastive loss. It should be positive
value.
reduce (str): Reduction option. Its value must be either
``'mean'`` or ``'no'``. Otherwise, :class:`ValueError` is raised.
Returns:
~chainer.Variable:
A variable holding the loss value(s) calculated by the
above equation.
If ``reduce`` is ``'no'``, the output variable holds array
whose shape is same as one of (hence both of) input variables.
If it is ``'mean'``, the output variable holds a scalar value.
.. note::
This cost can be used to train siamese networks. See `Learning a
Similarity Metric Discriminatively, with Application to Face
Verification <http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf>`_
for details.
.. admonition:: Example
>>> x0 = np.array([[-2.0, 3.0, 0.5], [5.0, 2.0, -0.5]]).\
astype(np.float32)
>>> x1 = np.array([[-1.0, 3.0, 1.0], [3.5, 0.5, -2.0]]).\
astype(np.float32)
>>> y = np.array([1, 0]).astype(np.int32)
>>> F.contrastive(x0, x1, y)
variable(0.3125)
>>> F.contrastive(x0, x1, y, margin=3.0) # harder penalty
variable(0.3528857)
>>> z = F.contrastive(x0, x1, y, reduce='no')
>>> z.shape
(2,)
>>> z.array
array([0.625, 0. ], dtype=float32)
"""
return Contrastive(margin, reduce).apply((x0, x1, y))[0]
| 6,113
| 36.740741
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/loss/squared_error.py
|
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class SquaredError(function_node.FunctionNode):
"""Squared error function."""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x0', 'x1'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].shape == in_types[1].shape
)
def forward(self, inputs):
x0, x1 = inputs
diff = x0 - x1
self.retain_inputs((0, 1))
return utils.force_array(diff * diff, dtype=x0.dtype),
def backward(self, indexes, grad_outputs):
x0, x1 = self.get_retained_inputs()
gy, = grad_outputs
gx = gy * 2 * (x0 - x1)
return gx, -gx
def squared_error(x0, x1):
"""Squared error function.
This function computes the squared error between two variables:
.. math::
(x_0 - x_1)^2
where operation is done in elementwise manner.
Note that the error is not scaled by 1/2:
Args:
x0 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
x1 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable:
A variable holding an array representing the squared error of
two inputs.
.. note::
:func:`~chainer.functions.squared_error` and
:func:`~chainer.functions.squared_difference` are identical functions,
aside from the different argument names.
They are both kept for backward compatibility.
.. seealso:: :func:`~chainer.functions.squared_difference`
.. admonition:: Example
>>> x1 = np.arange(6).astype(np.float32)
>>> x1
array([0., 1., 2., 3., 4., 5.], dtype=float32)
>>> x2 = np.array([5, 4, 3, 2, 1, 0]).astype(np.float32)
>>> x2
array([5., 4., 3., 2., 1., 0.], dtype=float32)
>>> y = F.squared_error(x1, x2)
>>> y.shape
(6,)
>>> y.array
array([25., 9., 1., 1., 9., 25.], dtype=float32)
.. seealso:: :func:`~chainer.functions.squared_difference`
"""
return SquaredError().apply((x0, x1))[0]
def squared_difference(x1, x2):
"""Squared difference function.
This functions is identical to :func:`~chainer.functions.squared_error`
except for the names of the arguments.
.. seealso:: :func:`~chainer.functions.squared_error`
"""
return squared_error(x1, x2)
| 2,541
| 26.934066
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/loss/crf1d.py
|
from chainer.functions.array import broadcast
from chainer.functions.array import concat
from chainer.functions.array import reshape
from chainer.functions.array import select_item
from chainer.functions.array import split_axis
from chainer.functions.connection import embed_id
from chainer.functions.math import logsumexp
from chainer.functions.math import minmax
from chainer.functions.math import sum as _sum
def crf1d(cost, xs, ys, reduce='mean'):
"""Calculates negative log-likelihood of linear-chain CRF.
It takes a transition cost matrix, a sequence of costs, and a sequence of
labels. Let :math:`c_{st}` be a transition cost from a label :math:`s` to
a label :math:`t`, :math:`x_{it}` be a cost of a label :math:`t` at
position :math:`i`, and :math:`y_i` be an expected label at position
:math:`i`. The negative log-likelihood of linear-chain CRF is defined as
.. math::
L = -\\left( \\sum_{i=1}^l x_{iy_i} + \\
\\sum_{i=1}^{l-1} c_{y_i y_{i+1}} - {\\log(Z)} \\right) ,
where :math:`l` is the length of the input sequence and :math:`Z` is the
normalizing constant called partition function.
.. note::
When you want to calculate the negative log-likelihood of sequences
which have different lengths, sort the sequences in descending order of
lengths and transpose the sequences.
For example, you have three input sequences:
>>> a1 = a2 = a3 = a4 = np.random.uniform(-1, 1, 3).astype(np.float32)
>>> b1 = b2 = b3 = np.random.uniform(-1, 1, 3).astype(np.float32)
>>> c1 = c2 = np.random.uniform(-1, 1, 3).astype(np.float32)
>>> a = [a1, a2, a3, a4]
>>> b = [b1, b2, b3]
>>> c = [c1, c2]
where ``a1`` and all other variables are arrays with ``(K,)`` shape.
Make a transpose of the sequences:
>>> x1 = np.stack([a1, b1, c1])
>>> x2 = np.stack([a2, b2, c2])
>>> x3 = np.stack([a3, b3])
>>> x4 = np.stack([a4])
and make a list of the arrays:
>>> xs = [x1, x2, x3, x4]
You need to make label sequences in the same fashion.
And then, call the function:
>>> cost = chainer.Variable(
... np.random.uniform(-1, 1, (3, 3)).astype(np.float32))
>>> ys = [np.zeros(x.shape[0:1], dtype=np.int32) for x in xs]
>>> loss = F.crf1d(cost, xs, ys)
It calculates mean of the negative log-likelihood of the three
sequences.
The output is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds the elementwise
loss values. If it is ``'mean'``, it holds mean of the loss values.
Args:
cost (:class:`~chainer.Variable` or :ref:`ndarray`):
A :math:`K \\times K` matrix which holds transition
cost between two labels, where :math:`K` is the number of labels.
xs (list of Variable): Input vector for each label.
``len(xs)`` denotes the length of the sequence,
and each :class:`~chainer.Variable` holds a :math:`B \\times K`
matrix, where :math:`B` is mini-batch size, :math:`K` is the number
of labels.
Note that :math:`B`\\ s in all the variables are not necessary
the same, i.e., it accepts the input sequences with different
lengths.
ys (list of Variable): Expected output labels. It needs to have the
same length as ``xs``. Each :class:`~chainer.Variable` holds a
:math:`B` integer vector.
When ``x`` in ``xs`` has the different :math:`B`, correspoding
``y`` has the same :math:`B`. In other words, ``ys`` must satisfy
``ys[i].shape == xs[i].shape[0:1]`` for all ``i``.
reduce (str): Reduction option. Its value must be either
``'mean'`` or ``'no'``. Otherwise, :class:`ValueError` is raised.
Returns:
~chainer.Variable: A variable holding the average negative
log-likelihood of the input sequences.
.. note::
See detail in the original paper: `Conditional Random Fields:
Probabilistic Models for Segmenting and Labeling Sequence Data
<https://repository.upenn.edu/cis_papers/159/>`_.
"""
if reduce not in ('mean', 'no'):
raise ValueError(
'only \'mean\' and \'no\' are valid for \'reduce\', but \'%s\' is '
'given' % reduce)
assert xs[0].shape[1] == cost.shape[0]
n_label = cost.shape[0]
n_batch = xs[0].shape[0]
alpha = xs[0]
alphas = []
for x in xs[1:]:
batch = x.shape[0]
if alpha.shape[0] > batch:
alpha, alpha_rest = split_axis.split_axis(alpha, [batch], axis=0)
alphas.append(alpha_rest)
b_alpha, b_cost = broadcast.broadcast(alpha[..., None], cost)
alpha = logsumexp.logsumexp(b_alpha + b_cost, axis=1) + x
if alphas:
alphas.append(alpha)
alpha = concat.concat(alphas[::-1], axis=0)
logz = logsumexp.logsumexp(alpha, axis=1)
cost = reshape.reshape(cost, (cost.size, 1))
score = select_item.select_item(xs[0], ys[0])
scores = []
for x, y, y_prev in zip(xs[1:], ys[1:], ys[:-1]):
batch = x.shape[0]
if score.shape[0] > batch:
y_prev, _ = split_axis.split_axis(y_prev, [batch], axis=0)
score, score_rest = split_axis.split_axis(score, [batch], axis=0)
scores.append(score_rest)
score += (select_item.select_item(x, y) + reshape.reshape(
embed_id.embed_id(y_prev * n_label + y, cost), (batch,)))
if scores:
scores.append(score)
score = concat.concat(scores[::-1], axis=0)
loss = logz - score
if reduce == 'mean':
return _sum.sum(loss) / n_batch
else:
return loss
def argmax_crf1d(cost, xs):
"""Computes a state that maximizes a joint probability of the given CRF.
Args:
cost (:class:`~chainer.Variable` or :ref:`ndarray`):
A :math:`K \\times K` matrix which holds transition
cost between two labels, where :math:`K` is the number of labels.
xs (list of Variable): Input vector for each label.
``len(xs)`` denotes the length of the sequence,
and each :class:`~chainer.Variable` holds a :math:`B \\times K`
matrix, where :math:`B` is mini-batch size, :math:`K` is the number
of labels.
Note that :math:`B`\\ s in all the variables are not necessary
the same, i.e., it accepts the input sequences with different
lengths.
Returns:
tuple: A tuple of :class:`~chainer.Variable` object ``s`` and a
:class:`list` ``ps``.
The shape of ``s`` is ``(B,)``, where ``B`` is the mini-batch size.
i-th element of ``s``, ``s[i]``, represents log-likelihood of i-th
data.
``ps`` is a list of :ref:`ndarray`, and denotes the state that
maximizes the point probability.
``len(ps)`` is equal to ``len(xs)``, and shape of each ``ps[i]`` is
the mini-batch size of the corresponding ``xs[i]``. That means,
``ps[i].shape == xs[i].shape[0:1]``.
"""
alpha = xs[0]
alphas = []
max_inds = []
for x in xs[1:]:
batch = x.shape[0]
if alpha.shape[0] > batch:
alpha, alpha_rest = split_axis.split_axis(alpha, [batch], axis=0)
alphas.append(alpha_rest)
else:
alphas.append(None)
b_alpha, b_cost = broadcast.broadcast(alpha[..., None], cost)
scores = b_alpha + b_cost
max_ind = minmax.argmax(scores, axis=1)
max_inds.append(max_ind)
alpha = minmax.max(scores, axis=1) + x
inds = minmax.argmax(alpha, axis=1)
path = [inds.data]
for m, a in zip(max_inds[::-1], alphas[::-1]):
inds = select_item.select_item(m, inds)
if a is not None:
inds = concat.concat([inds, minmax.argmax(a, axis=1)], axis=0)
path.append(inds.data)
path.reverse()
score = minmax.max(alpha, axis=1)
for a in alphas[::-1]:
if a is None:
continue
score = concat.concat([score, minmax.max(a, axis=1)], axis=0)
return score, path
| 8,264
| 37.985849
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/math/fmod.py
|
from chainer import backend
from chainer import function_node
import chainer.functions
from chainer import utils
from chainer.utils import type_check
class Fmod(function_node.FunctionNode):
@property
def label(self):
return 'fmod'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 'divisor'))
type_check.expect(
in_types[0].dtype == in_types[1].dtype,
in_types[0].dtype.kind == 'f',
in_types[1].dtype.kind == 'f',
)
def forward(self, inputs):
self.retain_inputs((0, 1))
xp = backend.get_array_module(*inputs)
x, divisor = inputs
m = xp.fmod(x, divisor)
return utils.force_array(m, x.dtype),
def backward(self, indexes, grad_outputs):
x, divisor = self.get_retained_inputs()
gw, = grad_outputs
return gw, - chainer.functions.fix(x / divisor) * gw
def fmod(x, divisor):
"""Elementwise mod function.
.. math::
y_i = x_i \\bmod \\mathrm{divisor}.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
divisor (:class:`~chainer.Variable` or :ref:`ndarray`): Input divisor.
Returns:
~chainer.Variable: Output variable.
"""
return Fmod().apply((x, divisor))[0]
| 1,315
| 26.416667
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/math/einsum.py
|
import warnings
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import argument
from chainer.utils import type_check
def _enumerate_axes(subscripts):
if '@' in subscripts:
left_sub, right_sub = subscripts.split('@')
for i, s in enumerate(left_sub):
yield i, s
yield slice(len(left_sub), -len(right_sub) or None), '@'
for i, s in enumerate(right_sub):
yield i - len(right_sub), s
else:
for i, s in enumerate(subscripts):
yield i, s
def _einsum(xp, dtype, in_subscripts, out_subscript, *inputs, **kwargs):
check_undefined_ellipsis_sum, = argument.parse_kwargs(
kwargs, ('check_undefined_ellipsis_sum', False))
sum_ellipsis = '@' in in_subscripts and '@' not in out_subscript
if sum_ellipsis:
# einsum does not usually allow summing over '...'
subscripts = '{}->...{}'.format(
in_subscripts.replace('@', '...'),
out_subscript
)
else:
subscripts = '{}->{}'.format(
in_subscripts,
out_subscript
).replace('@', '...')
# Use optimize option whenever it is critical in speed.
# Otherwise avoid bugs in numpy>=1.12,<1.15.
einsum_kwargs = {}
if len(inputs) >= 3:
einsum_kwargs['optimize'] = True
try:
y = xp.einsum(subscripts, *inputs, **einsum_kwargs)
except TypeError:
warnings.warn(
'{xp}.einsum does not support optimize option. '
'Use newer version of {xp} to speed up.'
.format(xp=xp.__name__),
chainer.warnings.PerformanceWarning,
)
y = xp.einsum(subscripts, *inputs)
if sum_ellipsis:
sum_ndim = y.ndim - len(out_subscript)
if check_undefined_ellipsis_sum and sum_ndim > 0:
raise ValueError(
'einsum should not support summing over Ellipsis, '
'while NumPy 1.14 sometimes accidentally supports it. '
'This feature is no longer supported by Chainer. '
'See also NumPy issues #10926, #9984.',
)
y = xp.sum(y, axis=tuple(range(sum_ndim)))
return utils.force_array(y, dtype)
class EinSum(function_node.FunctionNode):
def __init__(self, in_subs, out_sub):
self.in_subs = in_subs
self.out_sub = out_sub
def check_type_forward(self, in_types):
for i, in_type in enumerate(in_types):
type_check._argname((in_type,), ('x{}'.format(i),))
type_check.expect(in_type.dtype.kind == 'f')
in_subs = self.in_subs.split(',')
type_check.expect(in_types.size() == len(in_subs))
shape_dict = {}
for in_sub, in_type in zip(in_subs, in_types):
for axis, char in _enumerate_axes(in_sub):
shape = in_type.shape[axis]
if char in shape_dict:
type_check.expect(shape_dict[char] == shape)
else:
shape_dict[char] = shape
def forward(self, inputs):
n_args = len(inputs)
# TODO(kataoka): Do not retain inputs if n_args == 1
self.retain_inputs(tuple(range(n_args)))
xp = backend.get_array_module(inputs[0])
dtype = xp.result_type(*[x.dtype for x in inputs])
y = _einsum(xp, dtype, self.in_subs, self.out_sub, *inputs,
check_undefined_ellipsis_sum=True)
return y,
def backward(self, indices, grad_outputs):
inputs = self.get_retained_inputs()
g, = grad_outputs
fwd_in_subs = self.in_subs.split(',')
fwd_out_sub = self.out_sub
return tuple(
DiagEinSum(
in_subs=','.join([
(fwd_out_sub if j == i else s)
for j, s in enumerate(fwd_in_subs)
]),
out_sub=fwd_in_subs[i],
out_shape=inputs[i].shape,
).apply(tuple(
(g if j == i else x)
for j, x in enumerate(inputs)
))[0]
for i in indices
)
class DiagEinSum(EinSum):
def __init__(self, in_subs, out_sub, out_shape):
self.in_subs = in_subs
self.out_sub = out_sub
self.out_shape = out_shape
def forward(self, inputs):
n_args = len(inputs)
# TODO(kataoka): Do not retain inputs if n_args == 1
self.retain_inputs(tuple(range(n_args)))
xp = backend.get_array_module(inputs[0])
dtype = xp.result_type(*[x.dtype for x in inputs])
out_set = set(self.out_sub)
# '@' is a single char, ',' is excluded.
io_set = out_set.intersection(set(self.in_subs))
if len(io_set) == len(self.out_sub):
y = _einsum(xp, dtype, self.in_subs, self.out_sub, *inputs)
else:
direct_sub = []
inverse_sub = []
expander = []
for c in sorted(out_set):
if c in io_set:
direct_sub.append(c)
expander.append(slice(None))
else:
expander.append(None)
inverse_sub.append(c)
y = xp.zeros(self.out_shape, dtype)
diag_y = _einsum(
xp, dtype, self.out_sub, ''.join(inverse_sub), y)
if diag_y.base is not y:
raise ValueError('Update CuPy to close CuPy Issue #1199')
# Make the view writeable as numpy PR #5410 for numpy<1.10.
if xp is not cuda.cupy: # no setflags in cupy
diag_y.setflags(write=True)
diag_y[...] = _einsum(
xp, dtype, self.in_subs, ''.join(direct_sub), *inputs
)[tuple(expander)]
return y,
def einsum(*operands):
"""Einstein summation
This function supports two formats of inputs:
- ``einsum(subscripts, op0, op1, ...)``
- ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``
See also :func:`numpy.einsum`
.. admonition:: Example
The following example computes a batched application of a bilinear
function with weight ``w``.
>>> x1 = np.arange(12).reshape(3, 4).astype(np.float32)
>>> x2 = np.arange(15).reshape(3, 5).astype(np.float32)
>>> w = np.arange(120).reshape(4, 5, 6).astype(np.float32)
>>> y = F.einsum('ij,ik,jkl->il', x1, x2, w)
>>> y.shape
(3, 6)
The batch axes can be denoted by ``...``. If the string of output
subscripts is omitted, the summation is taken over the subscript
alphabets with two (or more) occurrences.
>>> np.allclose(y.array, F.einsum('...j,...k,jkl', x1, x2, w).array)
True
In the other format:
>>> y = F.einsum(x1, [0, 1], x2, [0, 2], w, [1, 2, 3], [0, 3])
>>> y.shape
(3, 6)
>>> y = F.einsum(x1, [Ellipsis, 1], x2, [Ellipsis, 2], w, [1, 2, 3])
>>> y.shape
(3, 6)
"""
input_subscripts, output_subscript, ioperands = \
_parse_einsum_input(operands)
return EinSum(
in_subs=input_subscripts,
out_sub=output_subscript,
).apply(ioperands)[0]
# #################### cupy.linalg.einsum ####################
# From cupy PR #873
einsum_symbols = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
einsum_symbols_set = set(einsum_symbols)
def _parse_einsum_input(operands):
"""Parses einsum operands.
This function is based on `numpy.core.einsumfunc._parse_einsum_input`
function in NumPy 1.14.
Returns
-------
input_strings : str
Parsed input strings
output_string : str
Parsed output string
operands : list of array_like
The operands to use in the numpy contraction
Examples
--------
The operand list is simplified to reduce printing:
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
>>> _parse_einsum_input(('...a,...a->...', a, b))
('@a,@a', '@', [a, b])
>>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
('@a,@a', '@', [a, b])
"""
if not operands:
raise ValueError('No input operands')
if isinstance(operands[0], str):
subscripts = operands[0].replace(' ', '')
operands = operands[1:]
# Ensure all characters are valid
for s in subscripts:
if s in '.,->':
continue
if s not in einsum_symbols:
raise ValueError('Character %s is not a valid symbol.' % s)
# Check for proper "->"
if ('-' in subscripts) or ('>' in subscripts):
if any((
subscripts.count('-') > 1,
subscripts.count('>') > 1,
subscripts.count('->') != 1,
)):
raise ValueError('Subscripts can only contain one \'->\'.')
# Parse "..."
subscripts = subscripts.replace('...', '@')
if '.' in subscripts:
raise ValueError('Invalid Ellipses.')
else:
tmp_operands = list(operands)
operand_list = []
subscript_list = []
for p in range(len(operands) // 2):
operand_list.append(tmp_operands.pop(0))
subscript_list.append(tmp_operands.pop(0))
output_list = tmp_operands[-1] if len(tmp_operands) else None
operands = operand_list
subscripts = ''
last = len(subscript_list) - 1
for num, sub in enumerate(subscript_list):
for s in sub:
if s is Ellipsis:
subscripts += '@'
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError('For this input type lists must contain '
'either int or Ellipsis')
if num != last:
subscripts += ','
if output_list is not None:
subscripts += '->'
for s in output_list:
if s is Ellipsis:
subscripts += '@'
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError('For this input type lists must contain '
'either int or Ellipsis')
# Build output string if does not exist
if '->' in subscripts:
input_subscripts, output_subscript = subscripts.split('->')
# Make sure output subscripts are in the input
for char in output_subscript:
if char not in input_subscripts:
raise ValueError(
'Output character %s did not appear in the input'
% ('...' if char == '@' else char))
else:
input_subscripts = subscripts
# Build output subscripts
tmp_subscripts = subscripts.replace(',', '')
output_subscript = ''
for s in sorted(set(tmp_subscripts)):
if s == '@' or tmp_subscripts.count(s) == 1:
output_subscript += s
# Make sure number operands is equivalent to the number of terms
if len(input_subscripts.split(',')) != len(operands):
raise ValueError('Number of einsum subscripts must be equal to the '
'number of operands.')
return input_subscripts, output_subscript, operands
| 11,517
| 32.289017
| 77
|
py
|
chainer
|
chainer-master/chainer/functions/math/cumprod.py
|
import six
import chainer
from chainer import backend
from chainer import function_node
from chainer.functions.array import flip
from chainer.utils import type_check
class Cumprod(function_node.FunctionNode):
"""Cumulative prod of array elements over a given axis."""
def __init__(self, axis):
if isinstance(axis, six.integer_types) or axis is None:
self.axis = axis
else:
raise TypeError('axis must be int or None')
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
if self.axis is not None:
if self.axis >= 0:
type_check.expect(self.axis < in_types[0].ndim)
else:
type_check.expect(-self.axis - 1 < in_types[0].ndim)
def forward(self, inputs):
self.retain_inputs((0,))
self.retain_outputs((0,))
x, = inputs
xp = backend.get_array_module(x)
return xp.cumprod(x, axis=self.axis),
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
xp = backend.get_array_module(x)
y, = self.get_retained_outputs()
gy, = grad_outputs
F = chainer.functions
axis = self.axis
if axis is None:
shape = x.shape
axis = 0
x = F.flatten(x)
else:
shape = None
if axis < 0:
axis += y.ndim
if y.shape[axis] <= 1:
gx = gy
else:
_, x = F.split_axis(x, (1,), axis)
gx = _flipcumprodsum(x, gy, axis)
y, ylast = F.split_axis(y, (-1,), axis)
gx *= F.concat([xp.ones_like(ylast.array), y], axis=axis)
if shape is not None:
gx = F.reshape(gx, shape)
return gx,
class Cumprodsum(function_node.FunctionNode):
def __init__(self, axis):
self.axis = axis
def forward(self, inputs):
self.retain_inputs((0,))
self.retain_outputs((0,))
xmul, xadd = inputs
xp = backend.get_array_module(xmul)
y = xp.empty_like(xadd)
axis = self.axis
expander = (slice(None),) * axis
cum = xp.zeros_like(xadd[expander + (0,)])
i = 0
while True:
ix = expander + (i,)
cum += xadd[ix]
y[ix] = cum
if i >= xmul.shape[axis]:
break
cum *= xmul[ix]
i += 1
return y,
def backward(self, indexes, grad_outputs):
F = chainer.functions
xmul, = self.get_retained_inputs()
y, = self.get_retained_outputs()
gy, = grad_outputs
axis = self.axis
gxadd = _flipcumprodsum(xmul, gy, axis)
_, gxmul = F.split_axis(gxadd, (1,), axis)
y, _ = F.split_axis(y, (-1,), axis)
gxmul *= y
return gxmul, gxadd
def _flipcumprodsum(xmul, xadd, axis):
z, = Cumprodsum(axis).apply((flip.flip(xmul, axis), flip.flip(xadd, axis)))
return flip.flip(z, axis)
def cumprod(x, axis=None):
"""Cumulative prod of array elements over a given axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Elements to calculate the cumulative prod.
axis (int or None):
Axis along which the cumulative prod is taken.
If it is not specified, the input is flattened.
Returns:
~chainer.Variable: Output variable.
"""
return Cumprod(axis).apply((x,))[0]
| 3,552
| 27.198413
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/math/maximum.py
|
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class Maximum(function_node.FunctionNode):
"""Element-wise maximum of input variables."""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x1', 'x2'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
)
type_check.expect_broadcast_shapes(
in_types[0].shape, in_types[1].shape)
def forward_cpu(self, inputs):
# may broadcast
self.retain_inputs((0, 1))
x1, x2 = inputs
y = numpy.maximum(x1, x2)
return utils.force_array(y),
def forward_gpu(self, inputs):
# may broadcast
self.retain_inputs((0, 1))
x1, x2 = inputs
return cuda.cupy.maximum(x1, x2),
def backward(self, indexes, grad_outputs):
x1, x2 = self.get_retained_inputs()
return MaximumGrad(x1.data, x2.data).apply((grad_outputs[0],))
class MaximumGrad(function_node.FunctionNode):
def __init__(self, x1, x2):
self.cond = x1 >= x2
self.x1_shape = x1.shape
self.x2_shape = x2.shape
def forward_cpu(self, inputs):
gy, = inputs
gx1 = utils.force_array(numpy.where(self.cond, gy, gy.dtype.type(0)))
gx2 = utils.force_array(numpy.where(self.cond, gy.dtype.type(0), gy))
return (
utils.sum_to(gx1, self.x1_shape),
utils.sum_to(gx2, self.x2_shape))
def forward_gpu(self, inputs):
gy, = inputs
gx1, gx2 = cuda.elementwise(
'S cond, T gy', 'T gx1, T gx2',
'''
gx1 = cond ? gy : (T)0.0;
gx2 = cond ? (T)0.0 : gy;
''',
'maximum_bwd1')(self.cond, gy)
return (
utils.sum_to(gx1, self.x1_shape),
utils.sum_to(gx2, self.x2_shape))
def backward(self, indexes, grad_outputs):
return chainer.functions.where(
utils.force_array(self.cond), grad_outputs[0], grad_outputs[1]),
def maximum(x1, x2):
"""Element-wise maximum of input variables.
Args:
x1 (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variables to be compared.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
x2 (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variables to be compared.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
Returns:
~chainer.Variable: Output variable.
.. admonition:: Example
>>> x1 = np.arange(6).astype(np.float32)
>>> x1
array([0., 1., 2., 3., 4., 5.], dtype=float32)
>>> x2 = np.array([5, 4, 3, 2, 1, 0]).astype(np.float32)
>>> x2
array([5., 4., 3., 2., 1., 0.], dtype=float32)
>>> y = F.maximum(x1, x2)
>>> y.shape
(6,)
>>> y.array
array([5., 4., 3., 3., 4., 5.], dtype=float32)
"""
return Maximum().apply((x1, x2))[0]
| 3,106
| 28.875
| 77
|
py
|
chainer
|
chainer-master/chainer/functions/math/minmax.py
|
import numpy
import six
from chainer import backend
from chainer import function_node
import chainer.functions
import chainer.utils
from chainer.utils import type_check
import chainerx
class SelectorBase(function_node.FunctionNode):
"""Select an array element from a given axis or set of axes."""
def __init__(self, axis=None, keepdims=False):
self.keepdims = keepdims
if axis is None:
self.axis = None
elif isinstance(axis, six.integer_types):
self.axis = (axis,)
elif isinstance(axis, tuple) and all(
isinstance(a, six.integer_types) for a in axis):
if len(set(axis)) != len(axis):
raise ValueError('duplicate value in axis: ({})'.format(
', '.join(map(str, axis))))
self.axis = axis
else:
raise TypeError('None, int or tuple of int are required')
def _fwd(self, x, xp):
raise NotImplementedError('_fwd should be implemented in sub-class.')
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
if self.axis is not None:
for axis in self.axis:
if axis >= 0:
type_check.expect(
axis < in_types[0].ndim,
)
else:
type_check.expect(
-axis - 1 < in_types[0].ndim,
)
def forward(self, x):
self.retain_inputs((0,))
self.retain_outputs((0,))
xp = backend.get_array_module(*x)
return xp.asarray(self._fwd(x[0], xp)),
def backward(self, indexes, gy):
x = self.get_retained_inputs()[0]
y = self.get_retained_outputs()[0]
if self.axis is None:
axis = range(x.ndim)
else:
axis = [ax % x.ndim for ax in self.axis]
# Add broadcastable dimensions to y and gy
# for each one that was reduced in the forward operation
shape = [s if ax not in axis else 1 for ax, s in enumerate(x.shape)]
gy = gy[0].reshape(shape)
y = y.reshape(shape)
# Compute the gradient
cond = (x.data == y.data)
gy = chainer.functions.broadcast_to(gy, cond.shape)
return gy * cond,
class Max(SelectorBase):
def forward_chainerx(self, x):
return chainerx.amax(x[0], axis=self.axis, keepdims=self.keepdims),
def _fwd(self, x, xp):
return xp.amax(x, axis=self.axis, keepdims=self.keepdims)
class Min(SelectorBase):
def forward_chainerx(self, x):
return chainerx.amin(x[0], axis=self.axis, keepdims=self.keepdims),
def _fwd(self, x, xp):
return xp.amin(x, axis=self.axis, keepdims=self.keepdims)
class IndexSelectorBase(function_node.FunctionNode):
"""Select index of an array element from a given axis."""
def __init__(self, axis=None):
if axis is None:
self.axis = None
elif isinstance(axis, six.integer_types):
self.axis = axis
else:
raise TypeError('None or int are required')
def _fwd(self, x, xp):
raise NotImplementedError('_fwd should be implemented in sub-class.')
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1,
in_types[0].dtype.kind == 'f'
)
if self.axis is not None:
if self.axis >= 0:
type_check.expect(
self.axis < in_types[0].ndim,
)
else:
type_check.expect(
-self.axis - 1 < in_types[0].ndim,
)
def forward(self, x):
xp = backend.get_array_module(*x)
return xp.asarray(self._fwd(x[0], xp)),
def backward(self, indexes, grad_outputs):
return None,
class ArgMin(IndexSelectorBase):
def forward_chainerx(self, x):
return chainerx.argmin(x[0], axis=self.axis).astype(numpy.int32),
def _fwd(self, x, xp):
return xp.argmin(x, axis=self.axis).astype(numpy.int32)
class ArgMax(IndexSelectorBase):
def forward_chainerx(self, x):
return chainerx.argmax(x[0], axis=self.axis).astype(numpy.int32),
def _fwd(self, x, xp):
return xp.argmax(x, axis=self.axis).astype(numpy.int32)
def max(x, axis=None, keepdims=False):
"""Maximum of array elements over a given axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Array to be maximized.
axis (None, int, or tuple of int): Axis over which a max is performed.
The default (axis = None) is perform a max over all the dimensions
of the input array.
Returns:
~chainer.Variable: Output variable.
"""
return Max(axis, keepdims).apply((x,))[0]
def min(x, axis=None, keepdims=False):
"""Minimum of array elements over a given axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Array to be minimized.
axis (None, int, or tuple of int): Axis over which a min is performed.
The default (axis = None) is perform a min over all the dimensions
of the input array.
Returns:
~chainer.Variable: Output variable.
"""
return Min(axis, keepdims).apply((x,))[0]
def argmax(x, axis=None):
"""Returns index which holds maximum of array elements over a given axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Array to find maximum elements.
axis (None or int): Axis over which a max is performed.
The default (axis = None) is perform a max over all the dimensions
of the input array.
Returns:
~chainer.Variable: Output variable.
"""
return ArgMax(axis).apply((x,))[0]
def argmin(x, axis=None):
"""Returns index which holds minimum of array elements over a given axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Array to find minimum elements.
axis (None or int): Axis over which a min is performed.
The default (axis = None) is perform a min over all the dimensions
of the input array.
Returns:
~chainer.Variable: Output variable.
"""
return ArgMin(axis).apply((x,))[0]
| 6,399
| 29.331754
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/math/identity.py
|
from chainer import function_node
class Identity(function_node.FunctionNode):
"""Identity function."""
def forward(self, xs):
return xs
def backward(self, indexes, gys):
return gys
def identity(*inputs):
"""Just returns input variables."""
ret = Identity().apply(inputs)
return ret[0] if len(ret) == 1 else ret
| 358
| 17.894737
| 43
|
py
|
chainer
|
chainer-master/chainer/functions/math/erfcinv.py
|
try:
from scipy import special
available_cpu = True
except ImportError as e:
available_cpu = False
_import_error = e
import math
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
BACKWORDC = math.pi ** 0.5 / 2
class ErfcInv(function_node.FunctionNode):
@property
def label(self):
return 'erfcinv'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, x):
if not available_cpu:
raise ImportError('SciPy is not available. Forward computation'
' of erfcinv in CPU cannot be done. ' +
str(_import_error))
self.retain_outputs((0,))
return utils.force_array(special.erfcinv(x[0]), dtype=x[0].dtype),
def forward_gpu(self, x):
self.retain_outputs((0,))
return cuda.elementwise(
'T x', 'T y',
'y = erfcinv(x)',
'elementwise_erfcinv',
)(x[0]),
def backward(self, indexes, gy):
y, = self.get_retained_outputs()
return -BACKWORDC * chainer.functions.exp(y ** 2) * gy[0],
def erfcinv(x):
"""Elementwise inverse function of complementary error function.
.. note::
Forward computation in CPU cannot be done if
`SciPy <https://www.scipy.org/>`_ is not available.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return ErfcInv().apply((x,))[0]
| 1,699
| 25.5625
| 75
|
py
|
chainer
|
chainer-master/chainer/functions/math/lgamma.py
|
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
_lgamma_cpu = None
class LGamma(function_node.FunctionNode):
@property
def label(self):
return 'lgamma'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, x):
global _lgamma_cpu
if _lgamma_cpu is None:
try:
from scipy import special
_lgamma_cpu = special.gammaln
except ImportError:
raise ImportError('SciPy is not available. Forward computation'
' of lgamma can not be done.')
self.retain_inputs((0,))
return utils.force_array(_lgamma_cpu(x[0]), dtype=x[0].dtype),
def forward_gpu(self, x):
self.retain_inputs((0,))
return utils.force_array(
cuda.cupyx.scipy.special.gammaln(x[0]), dtype=x[0].dtype),
def backward(self, indexes, gy):
z = self.get_retained_inputs()[0]
return chainer.functions.digamma(z) * gy[0],
def lgamma(x):
"""logarithm of gamma function.
.. note::
Forward computation in CPU can not be done if
`SciPy <https://www.scipy.org/>`_ is not available.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return LGamma().apply((x,))[0]
| 1,562
| 26.910714
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/math/floor.py
|
import chainer
from chainer import backend
from chainer import utils
def floor(x):
"""Elementwise floor function.
.. math::
y_i = \\lfloor x_i \\rfloor
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
if isinstance(x, chainer.variable.Variable):
x = x.array
xp = backend.get_array_module(x)
return chainer.as_variable(utils.force_array(xp.floor(x), x.dtype))
| 499
| 21.727273
| 73
|
py
|
chainer
|
chainer-master/chainer/functions/math/fft.py
|
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class FFT(function_node.FunctionNode):
"""Fast Fourier transform."""
def __init__(self, method):
self._method = method
def check_type_forward(self, in_types):
type_check._argname(in_types, ('real', 'imag'))
r_type, i_type = in_types
type_check.expect(
r_type.dtype.kind == 'f',
r_type.ndim > 0,
r_type.shape == i_type.shape,
r_type.dtype == i_type.dtype,
)
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
real, imag = inputs
x = real + imag * 1j
y = getattr(xp.fft, self._method)(x)
real_y = y.real.astype(real.dtype, copy=False)
imag_y = y.imag.astype(imag.dtype, copy=False)
return real_y, imag_y
def backward(self, inputs, grads):
gr, gi = grads
xp = backend.get_array_module(*grads)
if gr is None:
gr = xp.zeros_like(gi.data)
if gi is None:
gi = xp.zeros_like(gr.data)
gxi, gxr = FFT(self._method).apply((gi, gr))
return gxr, gxi
def fft(x):
"""Fast Fourier transform.
Args:
x (tuple): ``(real, imag)`` where ``real`` is a
:class:`~chainer.Variable` or an :ref:`ndarray` storing the real
part and ``imag`` is a :class:`~chainer.Variable` or an
:ref:`ndarray` storing the imaginary part.
Returns:
tuple: Returns ``(ry, iy)`` where ``ry`` is the real part of
the result and ``iy`` is the imaginary part of the result.
.. note::
Currently this function supports a tuple as input. It will support a
complex numbers directly in the future.
"""
real, imag = x
return FFT('fft').apply((real, imag))
def ifft(x):
"""Inverse fast Fourier transform.
Args:
x (tuple): ``(real, imag)`` where ``real`` is a
:class:`~chainer.Variable` or an :ref:`ndarray` storing the real
part and ``imag`` is a :class:`~chainer.Variable` or an
:ref:`ndarray` storing the imaginary part.
Returns:
tuple: Returns ``(ry, iy)`` where ``ry`` is the real part of
the result and ``iy`` is the imaginary part of the result.
.. note::
Currently this function supports a tuple as input. It will support a
complex numbers directly in the future.
"""
real, imag = x
return FFT('ifft').apply((real, imag))
| 2,544
| 28.941176
| 76
|
py
|
chainer
|
chainer-master/chainer/functions/math/average.py
|
import six
import chainer
from chainer import function_node
from chainer.functions.array import broadcast
from chainer.functions.array import reshape
from chainer.functions.math import sum as sum_mod
from chainer import utils
from chainer.utils import type_check
class Mean(function_node.FunctionNode):
"""Mean of array elements over a given axis."""
def __init__(self, axis, keepdims):
if axis is None:
self.axis = None
elif isinstance(axis, six.integer_types):
self.axis = (axis,)
elif isinstance(axis, tuple) and all(
isinstance(a, six.integer_types) for a in axis):
if len(set(axis)) != len(axis):
raise ValueError('duplicate value in axis: ({})'.format(
', '.join(map(str, axis))))
self.axis = axis
else:
raise TypeError('None, int or tuple of int are required')
self.keepdims = keepdims
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
if self.axis is not None:
for axis in self.axis:
if axis >= 0:
type_check.expect(
axis < in_types[0].ndim,
)
else:
type_check.expect(
-axis - 1 < in_types[0].ndim,
)
# TODO(kataoka): override `forward_chainerx` if `chainerx.mean` does not
# overflow for large float16 inputs
def forward(self, inputs):
x, = inputs
if self.axis is None:
self.multiplier = 1.0 / x.size
else:
divider = 1
for axis in self.axis:
divider *= x.shape[axis]
self.multiplier = 1.0 / divider
ret = utils.force_array(
x.mean(axis=self.axis, keepdims=self.keepdims))
return ret,
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
gy *= self.multiplier
ndim = len(self.inputs[0].shape)
if not (ndim == 0 or self.axis is None or self.keepdims):
actual_axis = [
axis if axis >= 0 else axis + ndim
for axis in self.axis]
shape = list(gy.shape)
for axis in sorted(actual_axis):
shape.insert(axis, 1)
gy = chainer.functions.reshape(gy, shape)
return chainer.functions.broadcast_to(gy, self.inputs[0].shape),
# TODO(kataoka): consider making the function public.
def _mean(x, axis, keepdims=False):
y, = Mean(axis, keepdims).apply((x,))
return y
def average(x, axis=None, weights=None, keepdims=False):
"""Calculate weighted average of array elements over a given axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Elements to sum.
axis (None or int or tuple of int): Axis which the method is performed.
With the default (axis = None) it performs a mean over all the
dimensions of the input array.
weights (None or :class:`~chainer.Variable` or :ref:`ndarray`):
An array holding weights to calculate weighted average.
If it is ``None``, all weights are assumed to be one.
When ``axis`` is ``None``, ``weights`` must have the same shape
of ``x``. And when ``axis`` is ``int``, it must be 1-D array
satisfying ``weights.shape == (x.shape[axis],)``.
keepdims (bool): If ``True``, the specified axes are remained as axes
of length one.
Returns:
~chainer.Variable: Output variable.
"""
if weights is None:
return _mean(x, axis, keepdims)
if axis is None:
pass
elif isinstance(axis, tuple):
axis = [a + x.ndim if a < 0 else a for a in axis]
axis.sort()
for a, b in six.moves.zip(axis, axis[1:]):
if a == b:
raise ValueError('duplicate value in \'axis\'')
axis = tuple(axis)
else:
if axis < 0:
axis += x.ndim
axis = (axis,)
if axis is not None and len(axis) > 1:
raise ValueError(
'tuple axis is not supported when weights is given')
divider = sum_mod.sum(weights)
if axis is not None:
w_shape = [d if i in axis else 1 for i, d in enumerate(x.shape)]
weights = broadcast.broadcast_to(
reshape.reshape(weights, w_shape), x.shape)
x = x * weights
x_sum = sum_mod.sum(x, axis, keepdims)
divider = broadcast.broadcast_to(divider, x_sum.shape)
return x_sum / divider
| 4,665
| 33.820896
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/math/hyperbolic.py
|
from chainer import backend
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class Cosh(function_node.FunctionNode):
@property
def label(self):
return 'cosh'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
xp = backend.get_array_module(*x)
return utils.force_array(xp.cosh(x[0])),
def backward(self, indexes, gy):
x = self.get_retained_inputs()
gx = sinh(x[0])
gx *= gy[0]
return gx,
def cosh(x):
"""Elementwise hyperbolic cosine function.
.. math::
y_i = \\cosh x_i.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Cosh().apply((x,))[0]
class Sinh(function_node.FunctionNode):
@property
def label(self):
return 'sinh'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
xp = backend.get_array_module(*x)
return utils.force_array(xp.sinh(x[0])),
def backward(self, x, gy):
x = self.get_retained_inputs()
gx = cosh(x[0])
gx *= gy[0]
return gx,
def sinh(x):
"""Elementwise hyperbolic sine function.
.. math::
y_i = \\sinh x_i.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Sinh().apply((x,))[0]
| 1,767
| 21.379747
| 73
|
py
|
chainer
|
chainer-master/chainer/functions/math/batch_l2_norm_squared.py
|
import six
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer.functions.math import sum as _sum
from chainer.utils import type_check
class BatchL2NormSquared(function_node.FunctionNode):
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type, = in_types
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim >= 2,
)
def forward_cpu(self, inputs):
self.retain_inputs((0,))
x = inputs[0].reshape(len(inputs[0]), -1)
return (x * x).sum(axis=1),
def forward_gpu(self, inputs):
self.retain_inputs((0,))
x = inputs[0].reshape(len(inputs[0]), -1)
l2normsquared_kernel = cuda.reduce(
'T x', 'T y', 'x * x', 'a + b', 'y = a', '0', 'l2normsquared'
)
return l2normsquared_kernel(x, axis=1),
def backward(self, indexes, gy):
x = self.get_retained_inputs()
return BatchL2NormSquaredGrad().apply((x[0], gy[0]))
class BatchL2NormSquaredGrad(function_node.FunctionNode):
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x, gy0 = inputs
gy0 = gy0.reshape(-1, *((1,) * (x.ndim - 1)))
gx = 2 * x * gy0
return gx,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x, gy0 = inputs
gy0 = gy0.reshape(-1, *((1,) * (x.ndim - 1)))
kernel = cuda.elementwise(
'T x, T gy', 'T gx', 'gx = 2 * x * gy',
'l2normsquared_bwd')
gx = kernel(x, gy0)
return gx,
def backward(self, indexes, grad_outputs):
x, gy0 = self.get_retained_inputs()
gy0 = gy0.reshape(-1, *((1,) * (x.ndim - 1)))
gy0 = chainer.functions.broadcast_to(gy0, x.shape)
ggx2 = 2 * grad_outputs[0]
gx = ggx2 * gy0
ggy0 = ggx2 * x
return gx, _sum.sum(ggy0, axis=tuple(six.moves.range(1, ggy0.ndim)))
def batch_l2_norm_squared(x):
"""L2 norm (a.k.a.\\ Euclidean norm) squared.
This function implements the square of L2 norm on a vector. No reduction
along batch axis is done.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
The first dimension is assumed to be the *minibatch dimension*.
If ``x`` has more than two dimensions all but the first dimension
are flattened to one dimension.
Returns:
~chainer.Variable: Two dimensional output variable.
"""
return BatchL2NormSquared().apply((x,))[0]
| 2,585
| 29.423529
| 77
|
py
|
chainer
|
chainer-master/chainer/functions/math/cholesky.py
|
import chainer
from chainer import function_node
from chainer.utils import type_check
import chainerx
class Cholesky(function_node.FunctionNode):
@property
def label(self):
return 'cholesky'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('a', ))
a_type, = in_types
type_check.expect(
a_type.dtype.kind == 'f',
a_type.ndim == 2,
)
def forward(self, inputs):
a, = inputs
self.retain_outputs((0,))
xp = chainer.backend.get_array_module(a)
return xp.linalg.cholesky(a),
def forward_chainerx(self, inputs):
return chainerx.linalg.cholesky(*inputs),
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
xp = chainer.backend.get_array_module(gy)
y, = self.get_retained_outputs()
n = y.shape[0]
dtype = y.dtype
F = chainer.functions
y_inv = F.inv(y)
mask = xp.tri(n, dtype=dtype) - 0.5 * xp.eye(n, dtype=dtype)
phi = mask * F.matmul(y, gy, transa=True)
s = F.matmul(F.matmul(y_inv, phi, transa=True), y_inv)
gx = 0.5 * (s + s.T)
return gx,
def cholesky(a):
"""Cholesky Decomposition
Args:
a (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Cholesky().apply((a,))[0]
| 1,435
| 24.192982
| 73
|
py
|
chainer
|
chainer-master/chainer/functions/math/clip.py
|
import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class Clip(function_node.FunctionNode):
"""Clips (limits) elements of input variable."""
def __init__(self, x_min, x_max):
if x_min is None and x_max is None:
raise ValueError('must set either max or min')
# x_min must be less than x_max.
if (x_min is not None) and (x_max is not None) and (x_min >= x_max):
raise ValueError('x_min must be less than x_max.')
self.x_min = x_min
self.x_max = x_max
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type, = in_types
type_check.expect(x_type.dtype.kind == 'f')
def forward_cpu(self, inputs):
self.retain_inputs((0,))
x, = inputs
return utils.force_array(
numpy.clip(x, self.x_min, self.x_max),
x.dtype),
def forward_gpu(self, x):
self.retain_inputs((0,))
return cuda.cupy.clip(x[0], self.x_min, self.x_max),
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
return ClipGrad(x.data, self.x_min, self.x_max).apply(grad_outputs)
class ClipGrad(function_node.FunctionNode):
def __init__(self, x, x_min, x_max):
if x_min is None and x_max is None:
raise ValueError('must set either max or min')
self.cond = True
if x_min is not None:
self.cond *= (x_min <= x)
if x_max is not None:
self.cond *= (x <= x_max)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('gy',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, inputs):
gy, = inputs
gx = utils.force_array(gy * self.cond, gy.dtype)
return gx,
def forward_gpu(self, inputs):
gx = cuda.elementwise(
'T gy, bool cond', 'T gx',
'gx = cond ? gy : T(0)',
'clip_bwd')(inputs[0], self.cond)
return gx,
def backward(self, indexes, grad_outputs):
return grad_outputs[0] * self.cond,
def clip(x, x_min, x_max):
"""Clips (limits) elements of input variable.
Given an interval ``[x_min, xmax]``, elements outside the interval are
clipped to the interval edges.
Its gradients at ``x_min`` and ``x_max`` are regarded as 1.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable to be clipped.
x_min (float): Minimum value.
x_max (float): Maximum value.
Returns:
~chainer.Variable: Output variable.
"""
return Clip(x_min, x_max).apply((x,))[0]
| 2,762
| 28.393617
| 76
|
py
|
chainer
|
chainer-master/chainer/functions/math/erfc.py
|
import math
import warnings
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
_erfc_cpu = None
class Erfc(function_node.FunctionNode):
@property
def label(self):
return 'erfc'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, x):
global _erfc_cpu
if _erfc_cpu is None:
try:
from scipy import special
_erfc_cpu = special.erfc
except ImportError:
warnings.warn(
'SciPy is not available. Forward computation of erfc in'
' CPU can be slow without SciPy.',
chainer.warnings.PerformanceWarning)
_erfc_cpu = numpy.vectorize(math.erfc)
self.retain_inputs((0,))
return utils.force_array(_erfc_cpu(x[0]), dtype=x[0].dtype),
def forward_gpu(self, x):
self.retain_inputs((0,))
return cuda.elementwise(
'T x', 'T y',
'y = erfc(x)',
'elementwise_erfc',
)(x[0]),
def backward(self, indexes, gy):
x = self.get_retained_inputs()[0]
return -2 / numpy.pi ** 0.5 * chainer.functions.exp(-x ** 2) * gy[0],
def erfc(x):
"""Elementwise complementary error function.
.. note::
Forward computation in CPU can be slow if
`SciPy <https://www.scipy.org/>`_ is not available.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Erfc().apply((x,))[0]
| 1,779
| 25.176471
| 77
|
py
|
chainer
|
chainer-master/chainer/functions/math/digamma.py
|
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
_digamma_cpu = None
class DiGamma(function_node.FunctionNode):
@property
def label(self):
return 'digamma'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, x):
global _digamma_cpu
if _digamma_cpu is None:
try:
from scipy import special
_digamma_cpu = special.digamma
except ImportError:
raise ImportError('SciPy is not available. Forward computation'
' of digamma can not be done.')
self.retain_inputs((0,))
return utils.force_array(_digamma_cpu(x[0]), dtype=x[0].dtype),
def forward_gpu(self, x):
self.retain_inputs((0,))
return utils.force_array(
cuda.cupyx.scipy.special.digamma(x[0]), dtype=x[0].dtype),
def backward(self, indexes, gy):
z = self.get_retained_inputs()[0]
xp = backend.get_array_module(*gy)
return chainer.functions.polygamma(xp.array(1), z) * gy[0],
def digamma(x):
"""Digamma function.
.. note::
Forward computation in CPU can not be done if
`SciPy <https://www.scipy.org/>`_ is not available.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return DiGamma().apply((x,))[0]
| 1,648
| 26.949153
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/math/scale.py
|
import chainer
from chainer.functions.array import broadcast
from chainer.functions.array import reshape
def scale(x, y, axis=1):
"""Elementwise product with broadcasting.
Computes a elementwise product of two input variables, with the shape of
the latter variable broadcasted to match the shape of the former. ``axis``
is the first axis of the first variable along which the second variable is
applied.
The term "broadcasting" here comes from Caffe's scale layer so the
"broadcasting" with the following arguments::
x : 100 x 3 x 40 x 5 x 6
y : 3 x 40
axis : 1
is equivalent to the following numpy broadcasting::
x : 100 x 3 x 40 x 5 x 6
y : (1 x) 3 x 40 x 1 x 1
Note that the axis of ``x`` to which we apply ``y`` is specified by the
argument ``axis``, whose meaning is different from numpy's ``axis``.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable to be scaled.
y (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable to scale, broadcasted.
axis (int): The first axis of ``x`` along which ``y`` is applied.
Returns:
~chainer.Variable: Output variable.
"""
x_shape = x.shape
y_shape = y.shape
if chainer.is_debug():
assert x_shape[axis:axis + len(y_shape)] == y_shape
y1_shape = tuple([1] * axis + list(y_shape) +
[1] * (len(x_shape) - axis - len(y_shape)))
y1 = reshape.reshape(y, y1_shape)
y2 = broadcast.broadcast_to(y1, x_shape)
return x * y2
| 1,607
| 31.816327
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/math/arctanh.py
|
from chainer import backend
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class Arctanh(function_node.FunctionNode):
"""Elementwise inverse hyperbolic tangent function."""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type, = in_types
type_check.expect(x_type.dtype.kind == 'f')
def forward(self, inputs):
self.retain_inputs((0,))
x, = inputs
xp = backend.get_array_module(x)
y = xp.arctanh(x)
return utils.force_array(y, dtype=x.dtype),
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
gy, = grad_outputs
gx = 1. / (1 - x ** 2) * gy
return gx,
def arctanh(x):
"""Elementwise inverse hyperbolic tangent function.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Arctanh().apply((x,))[0]
| 1,037
| 23.714286
| 73
|
py
|
chainer
|
chainer-master/chainer/functions/math/sum.py
|
import numpy
import six
import chainer
from chainer import backend
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
import chainerx
class Sum(function_node.FunctionNode):
"""Sum of array elements over a given axis."""
keepdims = False
def __init__(self, axis=None, keepdims=False):
if axis is None:
self.axis = None
elif isinstance(axis, six.integer_types):
self.axis = (axis,)
elif isinstance(axis, tuple) and all(
isinstance(a, six.integer_types) for a in axis):
if len(set(axis)) != len(axis):
raise ValueError('duplicate value in axis: ({})'.format(
', '.join(map(str, axis))))
self.axis = axis
else:
raise TypeError('None, int or tuple of int are required')
self.keepdims = keepdims
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
if self.axis is not None:
for axis in self.axis:
if axis >= 0:
type_check.expect(
axis < in_types[0].ndim,
)
else:
type_check.expect(
-axis - 1 < in_types[0].ndim,
)
def forward_chainerx(self, inputs):
x, = inputs
return chainerx.sum(x, axis=self.axis, keepdims=self.keepdims),
def forward(self, inputs):
x, = inputs
ret = x.sum(axis=self.axis, keepdims=self.keepdims)
if backend.get_array_module(x) is numpy:
ret = numpy.asarray(ret)
return ret,
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
ndim = len(self.inputs[0].shape)
if not (ndim == 0 or self.axis is None or self.keepdims):
actual_axis = [
axis if axis >= 0 else axis + ndim
for axis in self.axis]
shape = list(gy.shape)
for axis in sorted(actual_axis):
shape.insert(axis, 1)
gy = chainer.functions.reshape(gy, shape)
return chainer.functions.broadcast_to(gy, self.inputs[0].shape),
def sum(x, axis=None, keepdims=False):
"""Sum of array elements over a given axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Elements to sum.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
axis (None, int, or tuple of int): Axis along which a sum is performed.
The default (axis = None) is perform a sum over all the dimensions
of the input array.
keepdims (bool): If ``True``, the specified axes are remained as axes
of length one.
Returns:
~chainer.Variable: Output variable.
.. admonition:: Example
>>> x = np.arange(6).reshape(2,3).astype(np.float32)
>>> x
array([[0., 1., 2.],
[3., 4., 5.]], dtype=float32)
>>> y = F.sum(x)
>>> y.shape
()
>>> y.array
array(15., dtype=float32)
>>> y = F.sum(x, axis=1)
>>> y.shape
(2,)
>>> y.array
array([ 3., 12.], dtype=float32)
>>> y = F.sum(x, keepdims=True)
>>> y.shape
(1, 1)
>>> y.array
array([[15.]], dtype=float32)
"""
y, = Sum(axis, keepdims).apply((x,))
return y
class SumTo(function_node.FunctionNode):
"""Sum axes to output an array of a given shape."""
def __init__(self, shape):
self._shape = shape
def forward(self, inputs):
x, = inputs
return utils.sum_to(x, self._shape),
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
x_node, = self.inputs
return chainer.functions.broadcast_to(gy, x_node.shape),
def sum_to(x, shape):
"""Sum elements along axes to output an array of a given shape.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
shape (tuple of int): The target shape.
Returns:
~chainer.Variable: Output variable of shape ``shape``.
.. admonition:: Example
>>> x = np.array([[1., 2., 3.], [4., 5., 6.]])
>>> x
array([[1., 2., 3.],
[4., 5., 6.]])
>>> y = F.sum_to(x, (1, 3))
>>> y
variable([[5., 7., 9.]])
>>> z = F.sum_to(x, (2, 1))
>>> z
variable([[ 6.],
[15.]])
"""
if x.shape == shape:
return chainer.as_variable(x)
y, = SumTo(shape).apply((x,))
return y
| 4,692
| 28.149068
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/math/trigonometric.py
|
import numpy
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
import chainer.functions
from chainer import utils
from chainer.utils import type_check
class Sin(function_node.FunctionNode):
@property
def label(self):
return 'sin'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
xp = backend.get_array_module(*x)
return utils.force_array(xp.sin(x[0])),
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
return SinGrad().apply((x, grad_outputs[0]))
class SinGrad(function_node.FunctionNode):
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
gx = utils.force_array(numpy.cos(x))
gx *= gy
return gx,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
gx = cuda.elementwise(
'T x, T gy', 'T gx', 'gx = cos(x) * gy', 'sin_bwd'
)(x, gy)
return gx,
def backward(self, indexes, grad_outputs):
x, gy = self.get_retained_inputs()
ret = []
if 0 in indexes:
ret.append(- sin(x) * gy * grad_outputs[0])
if 1 in indexes:
ret.append(cos(x) * grad_outputs[0])
return ret
def sin(x):
"""Elementwise sin function.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Sin().apply((x,))[0]
class Cos(function_node.FunctionNode):
@property
def label(self):
return 'cos'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
xp = backend.get_array_module(*x)
return utils.force_array(xp.cos(x[0])),
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
return CosGrad().apply((x, grad_outputs[0]))
class CosGrad(function_node.FunctionNode):
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
gx = utils.force_array(numpy.sin(x))
numpy.negative(gx, out=gx)
gx *= gy
return gx,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
gx = cuda.elementwise(
'T x, T gy', 'T gx', 'gx = -sin(x) * gy', 'cos_bwd'
)(x, gy)
return gx,
def backward(self, indexes, grad_outputs):
x, gy = self.get_retained_inputs()
ret = []
if 0 in indexes:
ret.append(- cos(x) * gy * grad_outputs[0])
if 1 in indexes:
ret.append(- sin(x) * grad_outputs[0])
return ret
def cos(x):
"""Elementwise cos function.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Cos().apply((x,))[0]
class Tan(function_node.FunctionNode):
@property
def label(self):
return 'tan'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
xp = backend.get_array_module(*x)
return utils.force_array(xp.tan(x[0])),
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
return grad_outputs[0] / chainer.functions.square(cos(x)),
def tan(x):
"""Elementwise tan function.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Tan().apply((x,))[0]
class Arcsin(function_node.FunctionNode):
@property
def label(self):
return 'arcsin'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
xp = backend.get_array_module(*x)
return utils.force_array(xp.arcsin(x[0])),
def backward(self, indexes, grad_outputs):
x = self.get_retained_inputs()
return ArcsinGrad().apply((x[0], grad_outputs[0]))
class ArcsinGrad(function_node.FunctionNode):
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
gx = utils.force_array(numpy.square(x))
numpy.negative(gx, out=gx)
gx += 1
numpy.sqrt(gx, out=gx)
numpy.reciprocal(gx, out=gx)
gx *= gy
return gx,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
gx = cuda.elementwise(
'T x, T gy', 'T gx',
'gx = rsqrt((T)1.0 - x * x) * gy',
'arcsin_bwd'
)(x, gy)
return gx,
def backward(self, indexes, grad_outputs):
x, gy = self.get_retained_inputs()
ret = []
if 0 in indexes:
ret.append(grad_outputs[0] * gy * x / ((1 - x ** 2) ** 1.5))
if 1 in indexes:
ret.append(ArcsinGrad().apply((x, grad_outputs[0]))[0])
return ret
def arcsin(x):
"""Elementwise arcsine function.
.. math::
y_i = \\arcsin x_i.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Arcsin().apply((x,))[0]
class Arccos(function_node.FunctionNode):
@property
def label(self):
return 'arccos'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
xp = backend.get_array_module(*x)
return utils.force_array(xp.arccos(x[0])),
def backward(self, indexes, grad_outputs):
x = self.get_retained_inputs()
return ArccosGrad().apply((x[0], grad_outputs[0]))
class ArccosGrad(function_node.FunctionNode):
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
gx = utils.force_array(numpy.square(x))
numpy.negative(gx, out=gx)
gx += 1
numpy.sqrt(gx, out=gx)
numpy.reciprocal(gx, out=gx)
numpy.negative(gx, out=gx)
gx *= gy
return gx,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
gx = cuda.elementwise(
'T x, T gy', 'T gx',
'gx = -rsqrt((T)1.0 - x * x) * gy',
'arccos_bwd'
)(x, gy)
return gx,
def backward(self, indexes, grad_outputs):
x, gy = self.get_retained_inputs()
ret = []
if 0 in indexes:
ret.append(- grad_outputs[0] * (gy * x) / ((1 - x ** 2) ** 1.5))
if 1 in indexes:
ret.append(ArccosGrad().apply((x, grad_outputs[0]))[0])
return ret
def arccos(x):
"""Elementwise arccosine function.
.. math::
y_i = \\arccos x_i.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Arccos().apply((x,))[0]
class Arctan(function_node.FunctionNode):
@property
def label(self):
return 'arctan'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
xp = backend.get_array_module(*x)
return utils.force_array(xp.arctan(x[0])),
def backward(self, indexes, grad_outputs):
x = self.get_retained_inputs()
return ArctanGrad().apply((x[0], grad_outputs[0]))
class ArctanGrad(function_node.FunctionNode):
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
gx = utils.force_array(numpy.square(x))
gx += 1
numpy.reciprocal(gx, out=gx)
gx *= gy
return gx,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
gx = cuda.elementwise(
'T x, T gy', 'T gx',
'gx = (T)1.0 / ((T)1.0 + x * x) * gy',
'arctan_bwd'
)(x, gy)
return gx,
def backward(self, indexes, grad_outputs):
x, gy = self.get_retained_inputs()
ret = []
x_sq = chainer.functions.square(x)
if 0 in indexes:
ret.append(
-2 * gy * x * grad_outputs[0] /
(chainer.functions.square(x_sq) + 2 * x_sq + 1))
if 1 in indexes:
ret.append(grad_outputs[0] / (x_sq + 1))
return ret
def arctan(x):
"""Elementwise arctangent function.
.. math::
y_i = \\arctan x_i.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Arctan().apply((x,))[0]
class Arctan2(function_node.FunctionNode):
@property
def label(self):
return 'arctan2'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x1', 'x2'))
type_check.expect(in_types[0].dtype.kind == 'f')
type_check.expect(in_types[1].dtype.kind == 'f')
def forward(self, inputs):
self.retain_inputs((0, 1))
xp = backend.get_array_module(*inputs)
x1, x2 = inputs
return utils.force_array(xp.arctan2(x1, x2)),
def backward(self, indexes, grad_outputs):
x1, x2 = self.get_retained_inputs()
return Arctan2Grad().apply((x1, x2, grad_outputs[0]))
class Arctan2Grad(function_node.FunctionNode):
def forward_cpu(self, inputs):
self.retain_inputs((0, 1, 2))
x1, x2, gy = inputs
sqnorm = x1 ** 2 + x2 ** 2
gx1 = utils.force_array(x2 / sqnorm * gy)
gx2 = utils.force_array(-x1 / sqnorm * gy)
return gx1, gx2
def forward_gpu(self, inputs):
self.retain_inputs((0, 1, 2))
x1, x2, gy = inputs
gx1, gx2 = cuda.elementwise(
'T x1, T x2, T gy',
'T gx1, T gx2',
('T sqnorm = x1 * x1 + x2 * x2;'
'gx1 = x2 / sqnorm * gy;'
'gx2 = -x1 / sqnorm * gy;'),
'arctan2_bwd'
)(x1, x2, gy)
return gx1, gx2
def backward(self, indexes, grad_outputs):
x1, x2, gy = self.get_retained_inputs()
ggx1, ggx2 = grad_outputs
x1_sq = x1 ** 2
x2_sq = x2 ** 2
sqnorm = x1_sq + x2_sq
ret = []
if 0 in indexes:
ret.append(
(- ggx1 * 2 * x1 * x2 + ggx2 * (x1_sq - x2_sq)) * gy /
sqnorm ** 2)
if 1 in indexes:
ret.append(
(ggx1 * (x1_sq - x2_sq) + ggx2 * (2 * x1 * x2)) * gy /
sqnorm ** 2)
if 2 in indexes:
ret.append((ggx1 * x2 - ggx2 * x1) / sqnorm)
return ret
def arctan2(x1, x2):
"""Elementwise arctangent function with two arguments.
Args:
x1 (:class:`~chainer.Variable` or :ref:`ndarray`):
Y-coordinates.
x2 (:class:`~chainer.Variable` or :ref:`ndarray`):
X-coordinates.
Returns:
~chainer.Variable: Angles in radians, in the range [-pi, pi].
"""
return Arctan2().apply((x1, x2))[0]
| 11,758
| 25.306488
| 76
|
py
|
chainer
|
chainer-master/chainer/functions/math/cumsum.py
|
import six
from chainer import backend
from chainer import function_node
from chainer.functions.array import flip
from chainer.utils import type_check
class Cumsum(function_node.FunctionNode):
"""Cumulative sum of array elements over a given axis."""
def __init__(self, axis=None):
if isinstance(axis, six.integer_types) or axis is None:
self.axis = axis
else:
raise TypeError('axis must be int or None')
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
if self.axis is not None:
if self.axis >= 0:
type_check.expect(self.axis < in_types[0].ndim)
else:
type_check.expect(-self.axis - 1 < in_types[0].ndim)
def forward(self, inputs):
x, = inputs
self._in_shape = x.shape
xp = backend.get_array_module(x)
return xp.cumsum(x, axis=self.axis),
def backward(self, indexes, grad_outputs):
gy = grad_outputs[0]
axis = self.axis
if axis is not None:
gx = flip.flip(cumsum(flip.flip(gy, axis), axis), axis)
else:
gx = flip.flip(cumsum(flip.flip(gy, 0), 0), 0)
gx = gx.reshape(self._in_shape)
return gx,
def cumsum(x, axis=None):
"""Cumulative sum of array elements over a given axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Elements to calculate the cumulative sum.
axis (int or None):
Axis along which the cumulative sum is taken.
If it is not specified, the input is flattened.
Returns:
~chainer.Variable: Output variable.
"""
return Cumsum(axis).apply((x,))[0]
| 1,792
| 27.919355
| 68
|
py
|
chainer
|
chainer-master/chainer/functions/math/inv.py
|
import numpy.linalg
import chainer
from chainer.backends import cuda
from chainer import function_node
import chainer.functions
from chainer.functions.math import matmul
from chainer import utils
from chainer.utils import precision
from chainer.utils import type_check
def _inv_gpu(b):
# We do a batched LU decomposition on the GPU to compute the inverse
# Change the shape of the array to be size=1 minibatch if necessary
# Also copy the matrix as the elements will be modified in-place
a = matmul._as_batch_mat(b).copy()
n = a.shape[1]
n_matrices = len(a)
# Pivot array
p = cuda.cupy.empty((n, n_matrices), dtype=numpy.int32)
# Output array
c = cuda.cupy.empty_like(a)
# These arrays hold information on the execution success
# or if the matrix was singular
info = cuda.cupy.empty(n_matrices, dtype=numpy.int32)
ap = matmul._mat_ptrs(a)
cp = matmul._mat_ptrs(c)
_, lda = matmul._get_ld(a)
_, ldc = matmul._get_ld(c)
handle = cuda.Device().cublas_handle
if b.dtype == numpy.float32:
cuda.cublas.sgetrfBatched(
handle, n, ap.data.ptr, lda, p.data.ptr, info.data.ptr, n_matrices)
cuda.cublas.sgetriBatched(
handle, n, ap.data.ptr, lda, p.data.ptr, cp.data.ptr, ldc,
info.data.ptr, n_matrices)
elif b.dtype == numpy.float64:
cuda.cublas.dgetrfBatched(
handle, n, ap.data.ptr, lda, p.data.ptr, info.data.ptr, n_matrices)
cuda.cublas.dgetriBatched(
handle, n, ap.data.ptr, lda, p.data.ptr, cp.data.ptr, ldc,
info.data.ptr, n_matrices)
else:
assert False
return c, info
class Inv(function_node.FunctionNode):
def check_type_forward(self, in_types):
type_check._argname(in_types, ('a',))
a_type, = in_types
type_check.expect(a_type.dtype.kind == 'f')
# Only 2D array shapes allowed
type_check.expect(a_type.ndim == 2)
# Matrix inversion only allowed for square matrices
type_check.expect(a_type.shape[0] == a_type.shape[1])
@precision._fp16_mixed_precision_helper
def forward_cpu(self, x):
self.retain_outputs((0,))
try:
invx = utils.force_array(numpy.linalg.inv(x[0]))
except numpy.linalg.LinAlgError:
raise ValueError('Input has singular matrices.')
return invx,
@precision._fp16_mixed_precision_helper
def forward_gpu(self, x):
self.retain_outputs((0,))
shape = x[0].shape
invx, info = _inv_gpu(x[0].reshape(1, *shape))
if chainer.is_debug():
if cuda.cupy.any(info != 0):
raise ValueError('Input has singular matrices.')
invx = invx.reshape(shape)
return invx,
def backward(self, x, gy):
invx, = self.get_retained_outputs()
# Gradient is - x^-T (dx) x^-T
invxT = chainer.functions.transpose(invx)
gx = chainer.functions.matmul(
chainer.functions.matmul(- invxT, gy[0]), invxT)
return gx,
class BatchInv(function_node.FunctionNode):
def check_type_forward(self, in_types):
type_check._argname(in_types, ('a',))
a_type, = in_types
type_check.expect(a_type.dtype.kind == 'f')
# Only a minibatch of 2D array shapes allowed
type_check.expect(a_type.ndim == 3)
# Matrix inversion only allowed for square matrices
# so assert the last two dimensions are equal
type_check.expect(a_type.shape[-1] == a_type.shape[-2])
@precision._fp16_mixed_precision_helper
def forward_cpu(self, x):
self.retain_outputs((0,))
try:
invx = utils.force_array(numpy.linalg.inv(x[0]))
except numpy.linalg.LinAlgError:
raise ValueError('Input has singular matrices.')
return invx,
@precision._fp16_mixed_precision_helper
def forward_gpu(self, x):
self.retain_outputs((0,))
invx, info = _inv_gpu(x[0])
if chainer.is_debug():
if cuda.cupy.any(info != 0):
raise ValueError('Input has singular matrices.')
return invx,
def backward(self, x, gy):
invx, = self.get_retained_outputs()
# Unpack 1-length tuples
gy, = gy
# Gradient is - x^-T (dx) x^-T
ret = chainer.functions.matmul(-invx, gy, transa=True)
ret2 = chainer.functions.matmul(ret, invx, transb=True)
return ret2,
def inv(a):
"""Computes the inverse of square matrix.
a (:class:`~chainer.Variable` or :ref:`ndarray`):
Input array to compute the inverse for. Shape of
the array should be ``(n, n)`` where ``n`` is the dimensionality of
a square matrix.
Returns:
~chainer.Variable: Matrix inverse of ``a``.
"""
return Inv().apply((a,))[0]
def batch_inv(a):
"""Computes the inverse of a batch of square matrices.
Args:
a (:class:`~chainer.Variable` or :ref:`ndarray`):
Input array to compute the inverse for. Shape of
the array should be ``(m, n, n)`` where ``m`` is the number of
matrices in the batch, and ``n`` is the dimensionality of a square
matrix.
Returns:
~chainer.Variable: Inverse of every matrix in the batch of matrices.
"""
return BatchInv().apply((a,))[0]
| 5,386
| 33.312102
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/math/basic_math.py
|
import math
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import function_node
import chainer.functions
from chainer.functions.math import floor as _floor
from chainer import utils
from chainer.utils import type_check
from chainer import variable
def _convert_value_to_string(value):
if isinstance(value, variable.Variable):
value = value.data
if numpy.isscalar(value):
if value < 0:
return '({})'.format(value)
else:
return str(value)
array_types = chainer.get_array_types()
if isinstance(value, array_types):
return 'constant array'
else:
raise ValueError(
'Value must be a Variable, scalar, {} or {}. Actual: {}'.format(
', '.join([str(at) for at in array_types[:-1]]),
array_types[-1], type(value)))
def _preprocess_const(x, value):
return x.dtype.type(value)
def _chainerx_preprocess_const(x, value, label):
# Allow mixing of numpy/cupy array and chainerx array as long as
# conversion without copy is possible.
if isinstance(value, (numpy.ndarray, cuda.ndarray)):
# TODO(niboshi): force zero-copy
return backend.to_chx(value)
if isinstance(value, (six.integer_types, float)):
return value
if isinstance(value, numpy.generic):
return value.item()
if isinstance(value, variable.Variable):
value = variable.as_array(value)
utils._check_arrays_forward_compatible((x, value), label)
return value
def _preprocess_rhs(x, value):
if isinstance(value, chainer.Variable):
return value
if not (numpy.isscalar(value)
or isinstance(value, chainer.get_array_types())):
raise TypeError(
'Value must be a scalar, `numpy.ndarray`, `cupy.ndarray` '
'or a `Variable`.\nActual: {}'.format(type(value)))
return value.astype(x.dtype, copy=False)
class Neg(function_node.FunctionNode):
is_elementwise = True
@property
def label(self):
return '__neg__'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
def forward_chainerx(self, x):
return -x[0],
def forward(self, x):
self.retain_inputs(())
return utils.force_array(-x[0]),
def backward(self, indexes, gy):
return -gy[0],
def neg(self): # -x
"""Element-wise negation.
Returns:
~chainer.Variable: Output variable.
"""
return Neg().apply((self,))[0]
class Absolute(function_node.FunctionNode):
is_elementwise = True
@property
def label(self):
return '|_|'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
return utils.force_array(abs(x[0])),
def backward(self, indexes, grad_outputs):
x = self.get_retained_inputs()[0]
return AbsoluteGrad(x.data).apply(grad_outputs)
class AbsoluteGrad(function_node.FunctionNode):
is_elementwise = True
def __init__(self, x):
super(AbsoluteGrad, self).__init__()
self.x = x
def check_type_forward(self, in_types):
type_check._argname(in_types, ('gy',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, inputs):
return utils.force_array(numpy.sign(self.x) * inputs[0]),
def forward_gpu(self, inputs):
gx0 = cuda.elementwise(
'T x0, T gy', 'T gx0',
'gx0 = ((x0 > 0) - (x0 < 0)) * gy',
'abs_bwd')(self.x, inputs[0])
return gx0,
def backward(self, indexes, grad_outputs):
return AbsoluteGrad(self.x).apply(grad_outputs)
def absolute(self):
"""Element-wise absolute.
Returns:
~chainer.Variable: Output variable.
"""
return Absolute().apply((self,))[0]
class Add(function_node.FunctionNode):
is_elementwise = True
@property
def label(self):
return '_ + _'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('lhs', 'rhs'))
type_check.expect(
in_types[0].dtype == in_types[1].dtype,
)
type_check.expect_broadcast_shapes(
in_types[0].shape, in_types[1].shape)
def forward_chainerx(self, x):
return x[0] + x[1],
def forward(self, x):
# may broadcast
y = utils.force_array(x[0] + x[1])
return y,
def backward(self, indexes, gy):
return tuple(chainer.functions.sum_to(gy[0], self.inputs[i].shape)
for i in indexes)
class AddConstant(function_node.FunctionNode):
is_elementwise = True
def __init__(self, value):
self.value = value
@property
def label(self):
return '_ + %s' % _convert_value_to_string(self.value)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types.size() == 1)
def forward_chainerx(self, x):
value = _chainerx_preprocess_const(x[0], self.value, 'add')
return x[0] + value,
def forward(self, x):
value = _preprocess_const(x[0], self.value)
return utils.force_array(x[0] + value),
def backward(self, indexes, gy):
x_node, = self.inputs
return gy
class MultiAdd(function_node.FunctionNode):
is_elementwise = True
def check_type_forward(self, in_types):
for i, in_type in enumerate(in_types):
type_check._argname((in_type,), ('x{}'.format(i),))
type_check.expect(in_types[0].dtype == in_type.dtype)
def forward(self, xs):
self.len = len(xs)
if len(xs) == 1:
return xs
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(xs)
and all(x.shape == xs[0].shape for x in xs[1:])):
y = intel64.ideep.multi_add(xs)
else:
# The output should be a new array. Add the first 2 arrays
# and get the result y. Then add the rest arrays to y.
y = xs[0] + xs[1]
for x in xs[2:]:
if x.shape == y.shape:
y += x
else:
y = x + y
return utils.force_array(y),
def backward(self, indexes, gy):
return tuple(chainer.functions.sum_to(gy[0], x_node.shape)
for x_node in self.inputs)
# TODO(hvy): Implement multi-add with chainerx.ndarrays.
def add(*xs): # lhs + rhs or add more than 2 variables
"""Element-wise addition.
Returns:
~chainer.Variable: Output variable.
"""
if len(xs) == 2:
lhs, rhs = xs
if numpy.isscalar(rhs):
return AddConstant(rhs).apply((lhs,))[0]
rhs = _preprocess_rhs(lhs, rhs)
return Add().apply((lhs, rhs))[0]
else:
return MultiAdd().apply(xs)[0]
class Sub(function_node.FunctionNode):
is_elementwise = True
@property
def label(self):
return '_ - _'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('lhs', 'rhs'))
type_check.expect(in_types[0].dtype == in_types[1].dtype)
type_check.expect_broadcast_shapes(
in_types[0].shape, in_types[1].shape)
def forward_chainerx(self, x):
return x[0] - x[1],
def forward(self, x):
# may broadcast
return utils.force_array(x[0] - x[1]),
def backward(self, indexes, gy):
x1, x2 = self.inputs
g, = gy
return (
chainer.functions.sum_to(g, x1.shape) if 0 in indexes else None,
-chainer.functions.sum_to(g, x2.shape) if 1 in indexes else None,
)
def sub(self, rhs): # lhs - rhs
"""Element-wise subtraction.
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return AddConstant(-rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return Sub().apply((self, rhs))[0]
class SubFromConstant(function_node.FunctionNode):
is_elementwise = True
def __init__(self, value):
self.value = value
@property
def label(self):
return '%s - _' % _convert_value_to_string(self.value)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
def forward(self, x):
value = _preprocess_const(x[0], self.value)
return utils.force_array(value - x[0]),
def backward(self, indexes, gy):
g, = gy
return -g,
def rsub(self, rhs): # rhs - lhs
"""Element-wise subtraction.
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return SubFromConstant(rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return Sub().apply((rhs, self))[0]
class Mul(function_node.FunctionNode):
is_elementwise = True
@property
def label(self):
return '_ * _'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('lhs', 'rhs'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
)
type_check.expect_broadcast_shapes(
in_types[0].shape, in_types[1].shape)
def forward_chainerx(self, x):
return x[0] * x[1],
def forward(self, x):
self.retain_inputs((0, 1))
# may broadcast
return utils.force_array(x[0] * x[1]),
def backward(self, indexes, gy):
xs = self.get_retained_inputs()
return tuple(
chainer.functions.sum_to(gy[0] * xs[1 - i], xs[i].shape)
for i in indexes
)
class MulConstant(function_node.FunctionNode):
is_elementwise = True
def __init__(self, value):
self.value = value
@property
def label(self):
return '_ * %s' % _convert_value_to_string(self.value)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
def forward_chainerx(self, x):
value = _chainerx_preprocess_const(x[0], self.value, 'mul')
return x[0] * value,
def forward(self, x):
value = _preprocess_const(x[0], self.value)
return utils.force_array(value * x[0]),
def backward(self, indexes, gy):
g, = gy
return self.value * g,
def mul(self, rhs): # lhs * rhs
"""Element-wise multiplication.
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return MulConstant(rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return Mul().apply((self, rhs))[0]
class Div(function_node.FunctionNode):
is_elementwise = True
@property
def label(self):
return '_ / _'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('lhs', 'rhs'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
)
type_check.expect_broadcast_shapes(
in_types[0].shape, in_types[1].shape)
def forward_chainerx(self, x):
return x[0] / x[1],
def forward(self, x):
self.retain_inputs((0, 1))
# may broadcast
return utils.force_array(x[0] / x[1]),
def backward(self, indexes, grad_outputs):
x0, x1 = self.get_retained_inputs()
is_grad_elementwise = x0.shape == x1.shape
divgrad = DivGrad(is_grad_elementwise)
return divgrad.apply((x0, x1, grad_outputs[0]))
class DivGrad(function_node.FunctionNode):
def __init__(self, is_elementwise):
self.is_elementwise = is_elementwise
def forward_cpu(self, inputs):
self.retain_inputs((0, 1, 2))
x0, x1, gy = inputs
gx0 = utils.force_array(gy / x1)
gx1 = utils.force_array(-gx0 * x0 / x1)
return utils.sum_to(gx0, x0.shape), utils.sum_to(gx1, x1.shape)
def forward_gpu(self, inputs):
self.retain_inputs((0, 1, 2))
x0, x1, gy = inputs
gx0, gx1 = cuda.elementwise(
'T x0, T x1, T gy',
'T gx0, T gx1',
'''
gx0 = gy / x1;
gx1 = -gx0 * x0 / x1;
''', 'div_bwd')(x0, x1, gy)
return utils.sum_to(gx0, x0.shape), utils.sum_to(gx1, x1.shape)
def backward(self, indexes, grad_outputs):
x0, x1, gy = self.get_retained_inputs()
ggx0, ggx1 = grad_outputs
ret = []
x1_square = x1 * x1
if 0 in indexes:
if ggx1 is None:
ret.append(None)
else:
gx0 = -ggx1 * gy / x1_square
ret.append(chainer.functions.sum_to(gx0, x0.shape))
if 1 in indexes:
gx1 = None if ggx0 is None else -ggx0 * gy / x1_square
gx1_1 = (None if ggx1 is None else
ggx1 * 2 * gy * x0 / (x1_square * x1))
if gx1 is None:
gx1 = gx1_1
elif gx1_1 is not None:
gx1 += gx1_1
ret.append(None if gx1 is None else
chainer.functions.sum_to(gx1, x1.shape))
if 2 in indexes:
ggy = None if ggx0 is None else ggx0 / x1
ggy_1 = None if ggx1 is None else ggx1 * x0 / x1_square
if ggy is None:
ggy = -ggy_1
elif ggy_1 is not None:
ggy -= ggy_1
ret.append(ggy)
return ret
def div(self, rhs): # lhs / rhs
"""Element-wise division
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return MulConstant(1. / rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return Div().apply((self, rhs))[0]
# TODO(sonots): Support chainerx
class DivFromConstant(function_node.FunctionNode):
is_elementwise = True
def __init__(self, value):
self.value = value
@property
def label(self):
return '%s / _' % _convert_value_to_string(self.value)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
value = _preprocess_const(x[0], self.value)
return utils.force_array(value / x[0]),
def backward(self, indexes, grad_outputs):
x = self.get_retained_inputs()
return DivFromConstantGrad(self.value).apply((x[0], grad_outputs[0]))
class DivFromConstantGrad(function_node.FunctionNode):
def __init__(self, value):
super(DivFromConstantGrad, self).__init__()
self.value = value
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
value = _preprocess_const(x, self.value)
return utils.force_array(-value * gy / (x ** 2)),
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
# TODO(beam2d): Make it not use the input
value = _preprocess_const(x, self.value)
return cuda.elementwise('T x, T gy, T value', 'T gx',
'gx = -value * gy / (x * x)',
'div_from_const_bwd')(x, gy, value),
def backward(self, indexes, grad_outputs):
x, gy = self.get_retained_inputs()
value = _preprocess_const(x.data, self.value)
ret = []
if 0 in indexes:
ret.append(grad_outputs[0] * 2 * value * gy / (x ** 3))
if 1 in indexes:
ret.append(grad_outputs[0] * -value / (x ** 2))
return ret
def rdiv(self, rhs): # rhs / lhs
"""Element-wise division.
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return DivFromConstant(rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return Div().apply((rhs, self))[0]
def floordiv(self, rhs): # lhs // rhs
"""Element-wise floor division.
Returns:
~chainer.Variable: Output variable.
"""
return _floor.floor(div(self, rhs))
def rfloordiv(self, rhs): # rhs // lhs
"""Element-wise floor division.
Returns:
~chainer.Variable: Output variable.
"""
return _floor.floor(rdiv(self, rhs))
class PowVarVar(function_node.FunctionNode):
is_elementwise = True
@property
def label(self):
return '_ ** _'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('lhs', 'rhs'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
)
type_check.expect_broadcast_shapes(
in_types[0].shape, in_types[1].shape)
def forward(self, x):
self.retain_inputs((0, 1))
# may broadcast
self.y = x[0] ** x[1]
return utils.force_array(self.y),
def backward(self, indexes, gy):
x0, x1 = self.get_retained_inputs()
is_grad_elementwise = x0.shape == x1.shape
return PowVarVarGrad(
is_grad_elementwise, self.y).apply((x0, x1, gy[0]))
class PowVarVarGrad(function_node.FunctionNode):
def __init__(self, is_elementwise, y):
self.is_elementwise = is_elementwise
self.y = y
def check_type_forward(self, in_types):
type_check._argname(in_types, ('lhs', 'rhs', 'gy'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].dtype == in_types[2].dtype,
)
def forward_cpu(self, inputs):
self.retain_inputs((0, 1, 2))
x0, x1, gy = inputs
one = x1.dtype.type(1)
gx0 = utils.sum_to(
utils.force_array(x1 * (x0 ** (x1 - one)) * gy), x0.shape)
gx1 = utils.sum_to(
utils.force_array(numpy.log(x0) * self.y * gy), x1.shape)
return gx0, gx1
def forward_gpu(self, inputs):
self.retain_inputs((0, 1, 2))
x0, x1, gy = inputs
gx0, gx1 = cuda.elementwise(
'T x0, T x1, T gy, T y', 'T gx0, T gx1',
'''
gx0 = x1 * pow(x0, x1 - 1) * gy;
gx1 = log(x0) * y * gy;
''', 'pow_var_var_bwd')(x0, x1, gy, self.y)
gx0 = utils.sum_to(gx0, x0.shape)
gx1 = utils.sum_to(gx1, x1.shape)
return gx0, gx1
def backward(self, indexes, ggx):
x0, x1, gy = self.get_retained_inputs()
ggx0, ggx1 = ggx
log_x0 = chainer.functions.log(x0)
pow_x0_x1 = x0 ** x1
pow_x0_x1_1 = x0 ** (x1 - 1)
pow_x0_x1_2 = x0 ** (x1 - 2)
ret = []
if 0 in indexes:
gx0_0 = (0 if ggx0 is None else
ggx0 * x1 * (x1 - 1) * pow_x0_x1_2)
gx0_1 = (0 if ggx1 is None else
ggx1 * pow_x0_x1_1 * (log_x0 * x1 + 1))
gx0 = (gx0_0 + gx0_1) * gy
ret.append(chainer.functions.sum_to(gx0, x0.shape))
if 1 in indexes:
gx1_0 = (0 if ggx0 is None else
ggx0 * pow_x0_x1_1 * (log_x0 * x1 + 1))
gx1_1 = (0 if ggx1 is None else
ggx1 * log_x0 * log_x0 * pow_x0_x1)
gx1 = (gx1_0 + gx1_1) * gy
ret.append(chainer.functions.sum_to(gx1, x1.shape))
if 2 in indexes:
ggy_0 = 0 if ggx0 is None else ggx0 * x1 * pow_x0_x1_1
ggy_1 = 0 if ggx1 is None else ggx1 * log_x0 * pow_x0_x1
ggy = ggy_0 + ggy_1
ret.append(ggy)
return ret
class PowVarConst(function_node.FunctionNode):
is_elementwise = True
def __init__(self, value):
self.value = value
@property
def label(self):
return '_ ** %s' % _convert_value_to_string(self.value)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
y = x[0] ** _preprocess_const(x[0], self.value)
return utils.force_array(y, x[0].dtype),
def backward(self, indexes, gy):
inputs = self.get_retained_inputs()
return PowVarConstGrad(self.value).apply((inputs[0], gy[0]))
class PowVarConstGrad(function_node.FunctionNode):
is_elementwise = True
def __init__(self, value):
self.value = value
self.val = self.val_1 = None
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 'gy'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].shape == in_types[1].shape
)
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
self.val_1 = _preprocess_const(x, self.value - 1)
gx = utils.force_type(x.dtype, self.value) * (x ** self.val_1) * gy
gx = utils.force_array(gx)
return gx,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
self.val = _preprocess_const(x, self.value)
gx = cuda.elementwise(
'T x, T gy, T value', 'T gx',
'gx = value * pow(x, value - 1) * gy',
'pow_var_const_bwd')(x, gy, self.val)
return gx,
def backward(self, indexes, ggx):
x, gy = self.get_retained_inputs()
if self.val is None:
self.val = _preprocess_const(x.data, self.value)
if self.val_1 is None:
self.val_1 = _preprocess_const(x.data, self.value - 1)
val_2 = _preprocess_const(x.data, self.value - 2)
ret = []
if 0 in indexes:
ret.append(ggx[0] * self.val * gy * self.val_1 * x ** val_2)
if 1 in indexes:
ret.append(ggx[0] * self.val * x ** self.val_1)
return ret
def pow(self, rhs): # lhs ** rhs
"""Element-wise power function.
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return PowVarConst(rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return PowVarVar().apply((self, rhs))[0]
class PowConstVar(function_node.FunctionNode):
is_elementwise = True
def __init__(self, value):
self.value = value
@property
def label(self):
return '%s ** _' % _convert_value_to_string(self.value)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_outputs((0,))
value = _preprocess_const(x[0], self.value)
y = value ** x[0]
return utils.force_array(y),
def backward(self, indexes, gy):
outputs = self.get_retained_outputs()
return PowConstVarGrad(self.value).apply((outputs[0], gy[0]))
class PowConstVarGrad(function_node.FunctionNode):
is_elementwise = True
def __init__(self, value):
self.value = value
self.log_value = math.log(value)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('y', 'gy'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].shape == in_types[1].shape
)
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
y, gy = inputs
gx = utils.force_array(y.dtype.type(self.log_value) * y * gy)
return gx,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
y, gy = inputs
value = _preprocess_const(y, self.value)
gx = cuda.elementwise(
'T y, T gy, T value', 'T gx',
'gx = log(value) * y * gy',
'pow_const_var_bwd')(y, gy, value)
return gx,
def backward(self, indexes, ggx):
y, gy = self.get_retained_inputs()
gygy = y.dtype.type(self.log_value) * ggx[0]
ret = []
if 0 in indexes:
ret.append(gygy * gy)
if 1 in indexes:
ret.append(gygy * y)
return ret
def rpow(self, rhs): # rhs ** lhs
"""Element-wise power function.
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return PowConstVar(rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return PowVarVar().apply((rhs, self))[0]
def matmul(self, rhs): # lhs @ rhs
"""Matrix multiplication.
Returns:
~chainer.Variable: Output variable.
"""
rhs = _preprocess_rhs(self, rhs)
return chainer.functions.matmul(self, rhs)
def rmatmul(self, rhs): # rhs @ lhs
"""Matrix multiplication.
Returns:
~chainer.Variable: Output variable.
"""
rhs = _preprocess_rhs(self, rhs)
return chainer.functions.matmul(rhs, self)
def install_variable_arithmetics():
variable.Variable.__neg__ = neg
variable.Variable.__abs__ = absolute
variable.Variable.__add__ = add
variable.Variable.__radd__ = add
variable.Variable.__sub__ = sub
variable.Variable.__rsub__ = rsub
variable.Variable.__mul__ = mul
variable.Variable.__rmul__ = mul
variable.Variable.__div__ = div
variable.Variable.__truediv__ = div
variable.Variable.__rdiv__ = rdiv
variable.Variable.__rtruediv__ = rdiv
variable.Variable.__floordiv__ = floordiv
variable.Variable.__rfloordiv__ = rfloordiv
variable.Variable.__pow__ = pow
variable.Variable.__rpow__ = rpow
variable.Variable.__matmul__ = matmul
variable.Variable.__rmatmul__ = rmatmul
| 25,827
| 26.712446
| 77
|
py
|
chainer
|
chainer-master/chainer/functions/math/minimum.py
|
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
import chainer.functions
from chainer import utils
from chainer.utils import type_check
class Minimum(function_node.FunctionNode):
"""Element-wise minimum of input variables."""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x1', 'x2'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
)
type_check.expect_broadcast_shapes(
in_types[0].shape, in_types[1].shape)
def forward(self, inputs):
# may broadcast
self.retain_inputs((0, 1))
x1, x2 = inputs
xp = backend.get_array_module(x1, x2)
return utils.force_array(xp.minimum(x1, x2)),
def backward(self, indexes, grad_outputs):
x1, x2 = self.get_retained_inputs()
return MinimumGrad(x1.data, x2.data).apply((grad_outputs[0],))
class MinimumGrad(function_node.FunctionNode):
def __init__(self, x1, x2):
self.x1 = x1
self.x2 = x2
def forward_cpu(self, inputs):
gy, = inputs
x1, x2 = self.x1, self.x2
gx1 = utils.force_array(gy * (x1 <= x2))
gx2 = utils.force_array(gy * (x1 > x2))
return utils.sum_to(gx1, x1.shape), utils.sum_to(gx2, x2.shape)
def forward_gpu(self, inputs):
gy, = inputs
x1, x2 = self.x1, self.x2
gx1 = cuda.elementwise(
'T x1, T x2, T gy', 'T gx1',
'gx1 = (x1 <= x2) ? gy : (T)0.0',
'minimum_bwd1')(x1, x2, gy)
gx2 = cuda.elementwise(
'T x1, T x2, T gy', 'T gx1',
'gx1 = (x1 > x2) ? gy : (T)0.0',
'minimum_bwd2')(x1, x2, gy)
return utils.sum_to(gx1, x1.shape), utils.sum_to(gx2, x2.shape)
def backward(self, indexes, grad_outputs):
x1, x2 = self.x1, self.x2
cond = utils.force_array(x1 <= x2)
ggy = chainer.functions.where(cond, grad_outputs[0], grad_outputs[1])
return ggy,
def minimum(x1, x2):
"""Element-wise minimum of input variables.
Args:
x1 (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variables to be compared.
x2 (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variables to be compared.
Returns:
~chainer.Variable: Output variable.
"""
return Minimum().apply((x1, x2))[0]
| 2,462
| 30.177215
| 77
|
py
|
chainer
|
chainer-master/chainer/functions/math/linear_interpolate.py
|
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class LinearInterpolate(function_node.FunctionNode):
def check_type_forward(self, in_types):
type_check._argname(in_types, ('p', 'x', 'y'))
p_type, x_type, y_type = in_types
type_check.expect(
p_type.dtype.kind == 'f',
x_type.dtype == p_type.dtype,
y_type.dtype == p_type.dtype,
p_type.shape == x_type.shape,
p_type.shape == y_type.shape,
)
def forward_cpu(self, inputs):
self.retain_inputs((0, 1, 2))
p, x, y = inputs
one = p.dtype.type(1)
return utils.force_array(p * x + (one - p) * y),
def forward_gpu(self, inputs):
self.retain_inputs((0, 1, 2))
p, x, y = inputs
return cuda.elementwise(
'T p, T x, T y', 'T z',
'z = p * x + (1 - p) * y',
'linear_interpolate_fwd',
)(p, x, y),
def backward(self, indexes, grad_outputs):
p, x, y = self.get_retained_inputs()
gz, = grad_outputs
return LinearInterpolateGrad().apply((p, x, y, gz))
class LinearInterpolateGrad(function_node.FunctionNode):
def forward_cpu(self, inputs):
self.retain_inputs((0, 1, 2, 3))
p, x, y, gz = inputs
pg = p * gz
return (utils.force_array((x - y) * gz),
utils.force_array(pg),
utils.force_array(gz - pg))
def forward_gpu(self, inputs):
self.retain_inputs((0, 1, 2, 3))
p, x, y, gz = inputs
return cuda.elementwise(
'T p, T x, T y, T gz', 'T gp, T gx, T gy',
'''
gp = (x - y) * gz;
gx = gz * p;
gy = gz * (1 - p);
''',
'linear_interpolate_bwd'
)(p, x, y, gz)
def backward(self, indexes, grad_outputs):
p, x, y, gz = self.get_retained_inputs()
ggp, ggx, ggy = grad_outputs
gp = gz * (ggx - ggy)
gx = gz * ggp
gy = - gx
ggz = (x - y) * ggp + p * ggx + (1 - p) * ggy
return gp, gx, gy, ggz
def linear_interpolate(p, x, y):
"""Elementwise linear-interpolation function.
This function is defined as
.. math::
f(p, x, y) = p x + (1 - p) y.
Args:
p (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
y (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return LinearInterpolate().apply((p, x, y))[0]
| 2,707
| 27.505263
| 73
|
py
|
chainer
|
chainer-master/chainer/functions/math/zeta.py
|
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
_zeta_cpu = None
class Zeta(function_node.FunctionNode):
def __init__(self, x):
self._x = x
@property
def label(self):
return 'zeta'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('q'))
q_type, = in_types
type_check.expect(
q_type.dtype.kind == 'f'
)
def forward_cpu(self, inputs):
q, = inputs
global _zeta_cpu
if _zeta_cpu is None:
try:
from scipy import special
_zeta_cpu = special.zeta
except ImportError:
raise ImportError('Scipy is not available. Forward computation'
' of zeta cannot be done.')
self.retain_inputs((0,))
return utils.force_array(_zeta_cpu(self._x, q), dtype=q.dtype),
def forward_gpu(self, inputs):
q, = inputs
self.retain_inputs((0,))
return utils.force_array(
cuda.cupyx.scipy.special.zeta(self._x, q), dtype=q.dtype),
def backward(self, indexes, gy):
q, = self.get_retained_inputs()
return gy[0] * -self._x * zeta(self._x + 1, q),
def zeta(x, q):
"""Zeta function.
Differentiable only with respect to q
.. note::
Forward computation in CPU can not be done if
`SciPy <https://www.scipy.org/>`_ is not available.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
q (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Zeta(x).apply((q,))[0]
| 1,776
| 25.522388
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/math/square.py
|
from chainer import backend
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class Square(function_node.FunctionNode):
@property
def label(self):
return 'square'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
xp = backend.get_array_module(*x)
return utils.force_array(xp.square(x[0], dtype=x[0].dtype)),
def backward(self, indexes, gy):
x = self.get_retained_inputs()[0]
gx = gy[0] * 2.0 * x
return gx,
def square(x):
"""Elementwise square function.
.. math::
y_i = x_i ^ 2.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
Returns:
~chainer.Variable: Output variable.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
.. admonition:: Example
>>> x = np.arange(6).reshape(2,3).astype(np.float32)
>>> x
array([[0., 1., 2.],
[3., 4., 5.]], dtype=float32)
>>> y = F.square(x)
>>> y.shape
(2, 3)
>>> y.array
array([[ 0., 1., 4.],
[ 9., 16., 25.]], dtype=float32)
"""
return Square().apply((x,))[0]
| 1,419
| 23.912281
| 73
|
py
|
chainer
|
chainer-master/chainer/functions/math/sparse_matmul.py
|
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
try:
from scipy import sparse
_scipy_available = True
except ImportError:
_scipy_available = False
def _coo_matmul(sp_data, sp_row, sp_col, sp_shape, sp_order,
dn, transa, transb, transc, dtype=None):
if dtype is None:
dtype = numpy.result_type(sp_data.dtype, dn.dtype)
A_data = sp_data
if transa:
A_row = sp_col
A_col = sp_row
A_shape = (sp_shape[1], sp_shape[0])
if sp_order == 'C':
A_order = 'F'
elif sp_order == 'F':
A_order = 'C'
else:
A_order = sp_order
else:
A_row = sp_row
A_col = sp_col
A_shape = sp_shape
A_order = sp_order
if transb:
B = dn.swapaxes(-1, -2)
else:
B = dn
xp = backend.get_array_module(A_data, B)
if xp is numpy:
C = _coo_matmul_cpu(A_data, A_row, A_col, A_shape, B, dtype)
else:
C = _coo_matmul_gpu(A_data, A_row, A_col, A_shape, A_order,
B, dtype)
if transc:
C = C.swapaxes(-1, -2)
return C
def _coo_matmul_cpu(A_data, A_row, A_col, A_shape, B, dtype):
# A_shape: (_m, _k)
# B.shape: ((nb,) _k, _n)
# A_data/row/col.shape: ((nb,) ldnz)
if not _scipy_available:
msg = 'SciPy seems to be unavailable on your system. A CPU' \
' implementation of sparse_matmul uses SciPy, so you' \
' cannot use sparse_matmul on the CPU.'
raise RuntimeError(msg)
_m, _k = A_shape
_n = B.shape[-1]
if B.ndim == 2:
sp_A = sparse.coo_matrix((A_data, (A_row, A_col)), shape=(_m, _k))
C = sp_A.dot(B).astype(dtype, copy=False)
else:
nb = B.shape[0]
C = numpy.empty((nb, _m, _n), dtype=dtype)
for i in range(nb):
nnz = len(numpy.where(A_row[i] >= 0)[0])
sp_A = sparse.coo_matrix((A_data[i, :nnz],
(A_row[i, :nnz], A_col[i, :nnz])),
shape=(_m, _k))
C[i] = sp_A.dot(B[i]).astype(dtype, copy=False)
return C
def _coo_matmul_gpu(A_data, A_row, A_col, A_shape, A_order, B, dtype):
cupy_dtype = dtype
if cupy_dtype == numpy.float16:
cupy_dtype = numpy.float32
# fp32 is used in cupy kernel because fp16 atomicAdd is not supported
# A_shape: (_m, _k)
# B.shape: ((nb,) _k, _n)
# A_data/row/col.shape: ((nb,) ldnz)
_m, _k = A_shape
_n = B.shape[-1]
ldnz = A_data.shape[-1]
if B.ndim == 2:
nb = 1
C = cuda.cupy.zeros((_m, _n), dtype=cupy_dtype)
else:
nb = B.shape[0]
C = cuda.cupy.zeros((nb, _m, _n), dtype=cupy_dtype)
if A_order == 'C':
# A chunk is the number of non-zero elements handled by a single GPU
# thread. If contiguous non-zero elemets are related to the same
# location of the output matrix and they are processed in the same
# thread, number of atomic-add operations can be reduced.
chunk = max(ldnz // _m, 1)
else:
chunk = 1
nthreads = (nb * ldnz + chunk - 1) // chunk * _n
_cupy_coo_matmul()(nb, _m, _n, _k, ldnz, chunk,
A_data, A_row, A_col, B, C,
size=nthreads)
return C.astype(dtype, copy=False)
def _cupy_coo_matmul():
utils.nondeterministic('atomicAdd')
return cuda.elementwise(
'int32 nb, int32 _m, int32 _n, int32 _k, int32 nnz, int32 chunk, \
raw A A_data, raw T A_row, raw T A_col, \
raw B _B',
'raw C _C',
'''
int i_n = (i % _n);
int i0 = (i / _n) * chunk;
int i_C = -1;
C val_C = 0;
for (int i1 = 0; i1 < chunk; i1++) {
int i_A = i0 + i1;
int i_b = i_A / nnz;
if (i_b >= nb) {
continue;
}
int i_k = A_col[i_A];
if (i_k < 0) {
continue;
}
assert(i_k < _k);
int i_m = A_row[i_A];
if (i_m < 0) {
continue;
}
assert(i_m < _m);
int i_B = i_n + _n * (i_k + _k * i_b);
int i_C_now = i_n + _n * (i_m + _m * i_b);
A val_A = A_data[i_A];
B val_B = _B[i_B];
C val_C_now = static_cast<C>(val_A * val_B);
if (i_C >= 0 && i_C != i_C_now) {
atomicAdd(&_C[i_C], val_C);
val_C = 0;
}
i_C = i_C_now;
val_C += val_C_now;
}
if (i_C >= 0) {
atomicAdd(&_C[i_C], val_C);
}
''',
'coo_matmul')
class CooMatMul(function_node.FunctionNode):
def __init__(self, sp_row, sp_col, sp_shape, sp_order='other',
transa=False, transb=False, transc=False, dtype=None):
if sp_row.ndim != sp_col.ndim:
raise ValueError('ndim of sp_row and sp_col must be the same.')
if sp_row.ndim != 1 and sp_row.ndim != 2:
raise ValueError('ndim of sp_row and sp_col must be one or two.')
for i in range(sp_row.ndim):
if sp_row.shape[i] != sp_col.shape[i]:
msg = 'shape of sp_row and sp_col must be the same.'
raise ValueError(msg)
if len(sp_shape) != 2:
raise ValueError('len(sp_shape) must be two.')
self.sp_row = sp_row # ((nb,) ldnz)
self.sp_col = sp_col # ((nb,) ldnz)
self.sp_shape = sp_shape # (_m, _k) when transa is False
self.sp_order = sp_order
self.transa = transa
self.transb = transb
self.transc = transc
self.dtype = dtype
def check_type_forward(self, in_types):
type_check._argname(in_types, ('sp', 'dn'))
sp_type, dn_type = in_types
# sp_type.shape: ((nb,) ldnz)
# dn_type.shape: ((nb,) _k, _n) when transb is False
sp_k_axis = -1
if self.transa:
sp_k_axis = -2
dn_k_axis = -2
if self.transb:
dn_k_axis = -1
type_check.expect(
sp_type.dtype.kind == 'f',
dn_type.dtype.kind == 'f',
dn_type.ndim >= 2,
dn_type.ndim <= 3,
sp_type.ndim == dn_type.ndim - 1,
sp_type.shape[-1] == self.sp_row.shape[-1],
self.sp_shape[sp_k_axis] == dn_type.shape[dn_k_axis],
)
dn_ndim = type_check.eval(dn_type.ndim)
if dn_ndim == 3:
type_check.expect(
sp_type.shape[0] == self.sp_row.shape[0],
dn_type.shape[0] == self.sp_row.shape[0],
)
def forward(self, inputs):
self.retain_inputs((0, 1))
sp, dn = inputs
c = _coo_matmul(sp, self.sp_row, self.sp_col, self.sp_shape,
self.sp_order, dn,
self.transa, self.transb, self.transc, self.dtype)
return utils.force_array(c, self.dtype),
def backward(self, indexes, grad_outputs):
sp, dn = self.get_retained_inputs()
g_c, = grad_outputs
ret = []
if 0 in indexes:
g_sp = CooMatMulGradSP(self.sp_row, self.sp_col, self.sp_shape,
self.sp_order,
self.transc, not self.transb, self.transa,
dtype=sp.dtype).apply((g_c, dn))[0]
ret.append(g_sp)
if 1 in indexes:
g_dn = CooMatMul(self.sp_row, self.sp_col, self.sp_shape,
self.sp_order,
not self.transa, self.transc, self.transb,
dtype=dn.dtype).apply((sp, g_c))[0]
ret.append(g_dn)
return ret
def _coo_matmul_gradsp(a, b, c_row, c_col, c_shape, transa, transb, transc,
dtype):
if dtype is None:
dtype = numpy.result_type(a.dtype, b.dtype)
if transa:
A = a.swapaxes(-1, -2)
else:
A = a
if transb:
B = b.swapaxes(-1, -2)
else:
B = b
if transc:
C_row = c_col
C_col = c_row
else:
C_row = c_row
C_col = c_col
xp = backend.get_array_module(A, B)
if xp is numpy:
return _coo_matmul_gradsp_cpu(A, B, C_row, C_col, dtype)
else:
return _coo_matmul_gradsp_gpu(A, B, C_row, C_col, dtype)
def _coo_matmul_gradsp_cpu(A, B, C_row, C_col, dtype):
# A.shape: ((nb,) _m, _k)
# B.shape: ((nb,) _k, _n)
# C_row/col.shape: ((nb,) ldnz)
_m, _k = A.shape[-2:]
ldnz = C_row.shape[-1]
if hasattr(numpy, 'matmul'):
C = numpy.matmul(A, B)
elif A.ndim == 2:
C = numpy.dot(A, B)
else:
C = numpy.einsum('...ij,...jk->...ik', A, B)
C = C.astype(dtype, copy=False)
if A.ndim == 2:
C_data = numpy.zeros((ldnz), dtype=dtype)
nnz = len(numpy.where(C_row >= 0)[0])
C_data[:nnz] = C[C_row[:nnz], C_col[:nnz]]
else:
nb = A.shape[0]
C_data = numpy.zeros((nb, ldnz), dtype=dtype)
for i in range(nb):
nnz = len(numpy.where(C_row[i] >= 0)[0])
C_data[i, :nnz] = C[i, C_row[i, :nnz], C_col[i, :nnz]]
return C_data
def _coo_matmul_gradsp_gpu(A, B, C_row, C_col, dtype):
# A.shape: ((nb,) _m, _k)
# B.shape: ((nb,) _k, _n)
# C_row/col.shape: ((nb,) ldnz)
_m, _k = A.shape[-2:]
_n = B.shape[-1]
ldnz = C_row.shape[-1]
if A.ndim == 2:
nb = 1
C_data = cuda.cupy.zeros((ldnz), dtype=dtype)
else:
nb = A.shape[0]
C_data = cuda.cupy.zeros((nb, ldnz), dtype=dtype)
nthreads = nb * ldnz
_cupy_coo_matmul_gradsp()(nb, _m, _n, _k, ldnz, A, B, C_row, C_col, C_data,
size=nthreads)
return C_data
def _cupy_coo_matmul_gradsp():
return cuda.elementwise(
'int32 nb, int32 _m, int32 _n, int32 _k, int32 nnz, \
raw A _A, raw B _B, \
raw T C_row, raw T C_col',
'raw C C_data',
'''
int i_nz = (i % nnz);
int i_b = (i / nnz);
if (i_b >= nb) {
continue;
}
int i_C = i;
int i_m = C_row[i_C];
if (i_m < 0) {
continue;
}
assert(i_m < _m);
int i_n = C_col[i_C];
if (i_n < 0) {
continue;
}
assert(i_n < _n);
C val_C = 0.0;
for (int i_k = 0; i_k < _k; i_k++) {
int i_A = i_k + _k * (i_m + _m * i_b);
int i_B = i_n + _n * (i_k + _k * i_b);
A val_A = _A[i_A];
B val_B = _B[i_B];
val_C += static_cast<C>(val_A * val_B);
}
C_data[i_C] = val_C;
''',
'coo_matmul_gradsp')
class CooMatMulGradSP(function_node.FunctionNode):
def __init__(self, sp_row, sp_col, sp_shape, sp_order='other',
transa=False, transb=False, transc=False,
dtype=None):
if sp_row.ndim != sp_col.ndim:
raise ValueError('ndim of sp_row and sp_col must be the same.')
if sp_row.ndim != 1 and sp_row.ndim != 2:
raise ValueError('ndim of sp_row and sp_col must be one or two.')
for i in range(sp_row.ndim):
if sp_row.shape[i] != sp_col.shape[i]:
msg = 'shape of sp_row and sp_col must be the same.'
raise ValueError(msg)
if len(sp_shape) != 2:
raise ValueError('len(sp_shape) must be two.')
self.sp_row = sp_row # ((nb,) ldnz)
self.sp_col = sp_col # ((nb,) ldnz)
self.sp_shape = sp_shape # (_m, _n) when transc is False
self.sp_order = sp_order
self.transa = transa
self.transb = transb
self.transc = transc
self.dtype = dtype
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
a_type, b_type = in_types
# a_type.shape: ((nb,) _m, _k) when transa is False
# b_type.shape: ((nb,) _k, _n) when transb is False
a_m_axis, a_k_axis = -2, -1
b_k_axis, b_n_axis = -2, -1
sp_m_axis, sp_n_axis = -2, -1
if self.transa:
a_m_axis, a_k_axis = -1, -2
if self.transb:
b_k_axis, b_n_axis = -1, -2
if self.transc:
sp_m_axis, sp_n_axis = -1, -2
type_check.expect(
a_type.dtype.kind == 'f',
b_type.dtype.kind == 'f',
a_type.ndim >= 2,
a_type.ndim <= 3,
a_type.ndim == b_type.ndim,
a_type.shape[a_m_axis] == self.sp_shape[sp_m_axis],
b_type.shape[b_n_axis] == self.sp_shape[sp_n_axis],
a_type.shape[a_k_axis] == b_type.shape[b_k_axis],
)
a_ndim = type_check.eval(a_type.ndim)
if a_ndim == 3:
type_check.expect(
a_type.shape[0] == self.sp_row.shape[0],
b_type.shape[0] == self.sp_row.shape[0],
)
def forward(self, inputs):
self.retain_inputs((0, 1))
a, b = inputs
c = _coo_matmul_gradsp(a, b, self.sp_row, self.sp_col, self.sp_shape,
self.transa, self.transb, self.transc,
self.dtype)
return utils.force_array(c),
def backward(self, indexes, grad_outputs):
a, b = self.get_retained_inputs()
g_sp, = grad_outputs
ret = []
if 0 in indexes:
g_a = CooMatMul(self.sp_row, self.sp_col, self.sp_shape,
self.sp_order,
self.transc, not self.transb, self.transa,
dtype=a.dtype).apply((g_sp, b))[0]
ret.append(g_a)
if 1 in indexes:
g_b = CooMatMul(self.sp_row, self.sp_col, self.sp_shape,
self.sp_order,
not self.transc, self.transa, not self.transb,
dtype=b.dtype).apply((g_sp, a))[0]
ret.append(g_b)
return ret
def sparse_matmul(a, b, transa=False, transb=False):
"""Computes the batched multiplication of sparse and dense matrix.
The following use cases are supported:
1. C (dense) = A (sparse) * B (dense)
2. C (dense) = A (dense) * B (sparse)
Args:
a (~chainer.Variable or ~chainer.utils.CooMatrix): The left operand of
matrix multiplication.
b (~chainer.Variable or ~chainer.utils.CooMatrix): The right operand of
matrix multiplication.
transa (bool): If ``True``, each matrix in ``a`` will be transposed.
transb (bool): If ``True``, each matrix in ``b`` will be transposed.
Returns:
~chainer.Variable: Result of batched mat-mul.
.. seealso::
See :func:`~chainer.utils.to_coo` for how to construct a COO matrix
from an array.
.. note::
Performance of this function on GPU can be improved by using the
``order`` argument of :class:`~chainer.utils.CooMatrix` when the sparse
matrix is created.
"""
if (isinstance(a, utils.CooMatrix) and
isinstance(b, (chainer.Variable, numpy.ndarray, cuda.ndarray))):
return CooMatMul(a.row, a.col, a.shape, a.order,
transa=transa,
transb=transb,
transc=False).apply((a.data, b))[0]
elif (isinstance(a, (chainer.Variable, numpy.ndarray, cuda.ndarray)) and
isinstance(b, utils.CooMatrix)):
return CooMatMul(b.row, b.col, b.shape, b.order,
transa=not transb,
transb=not transa,
transc=True).apply((b.data, a))[0]
else:
msg = 'This combination of type of inputs is not supported.\n'
msg += ' a: {}\n'.format(type(a))
msg += ' b: {}\n'.format(type(b))
raise ValueError(msg)
| 16,155
| 32.588358
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/math/matmul.py
|
import warnings
import numpy
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
import chainer.functions
from chainer import utils
from chainer.utils import type_check
import chainerx
def _mat_ptrs(a):
"""Creates an array of pointers to matrices
Args:
a: A batch of matrices on GPU.
Returns:
GPU array of pointers to matrices.
"""
if len(a) == 1:
return cuda.cupy.full((1,), a.data.ptr, dtype=numpy.uintp)
else:
stride = a.strides[0]
ptr = a.data.ptr
return cuda.cupy.arange(ptr, ptr + stride * len(a), stride,
dtype=numpy.uintp)
def _as_batch_mat(x):
return x.reshape(len(x), x.shape[1], -1)
def _get_ld(a):
strides = a.strides[-2:]
trans = numpy.argmin(strides)
return trans, int(max(a.shape[trans - 2], max(strides) // a.itemsize))
def _matmul(a, b, transa=False, transb=False, transout=False):
if transout:
transa, transb = not transb, not transa
a, b = b, a
if transa and a.ndim != 1:
a = a.swapaxes(-1, -2)
if transb and b.ndim != 1:
b = b.swapaxes(-1, -2)
xp = backend.get_array_module(a)
if hasattr(xp, 'matmul'): # numpy.matmul is supported from version 1.10.0
return xp.matmul(a, b)
if a.ndim <= 2 or b.ndim <= 2:
return numpy.dot(a, b)
else:
return numpy.einsum('...ij,...jk->...ik', a, b)
def _check_ndim(in_type, lower=1, upper=2):
type_check.expect(
in_type.ndim >= lower,
in_type.ndim <= upper
)
def _get_check_index(trans, right, row_idx=0, col_idx=1):
if trans ^ right:
return row_idx
else:
return col_idx
class MatMul(function_node.FunctionNode):
def __init__(self, transa=False, transb=False, transc=False, dtype=None):
self.transa = transa
self.transb = transb
self.transc = transc
self.dtype = dtype
def check_type_forward(self, in_types):
type_check._argname(in_types, ('a', 'b'))
a_type, b_type = in_types
type_check.expect(
a_type.dtype.kind == 'f',
b_type.dtype.kind == 'f',
a_type.ndim >= 1,
b_type.ndim >= 1,
)
a_ndim = type_check.eval(a_type.ndim)
b_ndim = type_check.eval(b_type.ndim)
if b_ndim == 1:
a_idx = -2 if self.transa and a_ndim > 1 else -1
type_check.expect(a_type.shape[a_idx] == b_type.shape[0])
elif a_ndim == 1:
b_idx = -1 if self.transb and b_ndim > 1 else -2
type_check.expect(a_type.shape[0] == b_type.shape[b_idx])
else:
a_idx = _get_check_index(self.transa, False,
row_idx=-2, col_idx=-1)
b_idx = _get_check_index(self.transb, True,
row_idx=-2, col_idx=-1)
type_check.expect(a_type.shape[a_idx] == b_type.shape[b_idx])
type_check.expect_broadcast_shapes(
a_type.shape[:-2], b_type.shape[:-2])
def forward_chainerx(self, x):
a, b = x
# TODO(sonots): Support transa and transb in ChainerX
if self.transa or self.transb or self.transc:
return chainer.Fallback
# TODO(sonots): Support dtype promotion in ChainerX
if a.dtype != b.dtype:
return chainer.Fallback
# TODO(sonots): Support ndim > 2 in ChainerX
if a.ndim != 2 or b.ndim != 2:
return chainer.Fallback
# TODO(niboshi): Support it
if self.dtype is not None and self.dtype != a.dtype:
return chainer.Fallback
return chainerx.dot(a, b),
def forward(self, x):
self.retain_inputs((0, 1))
a, b = x
# may broadcast
y = _matmul(a, b, self.transa, self.transb, self.transc)
if self.dtype is not None:
dtype = self.dtype
else:
dtype = y.dtype
return utils.force_array(y, dtype),
def backward(self, indexes, grad_outputs):
a, b = self.get_retained_inputs()
gy, = grad_outputs
is_a_vector = a.ndim == 1
is_b_vector = b.ndim == 1
ret = []
if 0 in indexes:
if is_b_vector:
u, v = chainer.functions.cast(gy, b.dtype), b
if not is_a_vector:
if self.transa:
u, v = v, u
u = chainer.functions.expand_dims(u, -1)
v = (chainer.functions.expand_dims(v, -2) if v.ndim > 1
else v)
ga = chainer.functions.cast(u * v, a.dtype)
elif is_a_vector:
bt = chainer.functions.rollaxis(b, -1 if self.transb else -2)
ga = chainer.functions.tensordot(bt, gy, axes=gy.ndim)
ga = chainer.functions.cast(ga, a.dtype)
else:
ga, = MatMul(self.transc, not self.transb, self.transa,
a.dtype).apply((gy, b))
ga = chainer.functions.sum_to(ga, a.shape)
ret.append(ga)
if 1 in indexes:
if is_a_vector:
u, v = a, chainer.functions.cast(gy, a.dtype)
if not is_b_vector:
if self.transb:
u, v = v, u
u = chainer.functions.expand_dims(u, -1)
v = (chainer.functions.expand_dims(v, -2) if v.ndim > 1
else v)
gb = chainer.functions.cast(u * v, b.dtype)
elif is_b_vector:
at = chainer.functions.rollaxis(a, -2 if self.transa else -1)
gb = chainer.functions.tensordot(at, gy, axes=gy.ndim)
gb = chainer.functions.cast(gb, b.dtype)
else:
gb, = MatMul(not self.transa, self.transc, self.transb,
b.dtype).apply((a, gy))
gb = chainer.functions.sum_to(gb, b.shape)
ret.append(gb)
return ret
def matmul(a, b, transa=False, transb=False):
"""Computes the matrix multiplication of two arrays.
Args:
a (:class:`~chainer.Variable` or :ref:`ndarray`):
The left operand of the matrix multiplication.
If ``a`` and ``b`` are both 1-D arrays, ``matmul`` returns a dot
product of vector `a` and vector `b`. If 2-D arrays, ``matmul``
returns matrix product of ``a`` and ``b``. If either's dimension is
larger than 2, they are treated as a stack of matrices residing in
the last two indexes. ``matmul`` returns a stack of each two
arrays. In this case, ``a`` and ``b`` are broadcasted along axes
except the last two.
b (:class:`~chainer.Variable` or :ref:`ndarray`):
The right operand of the matrix multiplication.
Its array is treated as a matrix in the same way as ``a``'s array.
transa (bool): If ``True``, each matrices in ``a`` will be transposed.
If ``a.ndim == 1``, do nothing.
transb (bool): If ``True``, each matrices in ``b`` will be transposed.
If ``b.ndim == 1``, do nothing.
Returns:
~chainer.Variable: The result of the matrix multiplication.
.. admonition:: Example
>>> a = np.array([[1, 0], [0, 1]], np.float32)
>>> b = np.array([[4, 1], [2, 2]], np.float32)
>>> F.matmul(a, b).array
array([[4., 1.],
[2., 2.]], dtype=float32)
"""
return MatMul(transa=transa, transb=transb).apply((a, b))[0]
def _get_size(typ, index):
if index == 2 and type_check.eval(typ.ndim) == 2:
return 1
else:
return typ.shape[index]
def _batch_matmul(a, b, transa, transb, transout):
a = a.reshape(a.shape[:2] + (-1,))
b = b.reshape(b.shape[:2] + (-1,))
return _matmul(a, b, transa, transb, transout)
class BatchMatMul(function_node.FunctionNode):
def __init__(self, transa=False, transb=False):
self.transa = transa
self.transb = transb
def check_type_forward(self, in_types):
type_check._argname(in_types, ('a', 'b'))
a_type, b_type = in_types
type_check.expect(
a_type.dtype == numpy.float32,
b_type.dtype == numpy.float32
)
_check_ndim(a_type, lower=2, upper=3)
_check_ndim(b_type, lower=2, upper=3)
a_idx = _get_check_index(self.transa, False, row_idx=1, col_idx=2)
b_idx = _get_check_index(self.transb, True, row_idx=1, col_idx=2)
a_size = _get_size(a_type, a_idx)
b_size = _get_size(b_type, b_idx)
type_check.expect(
a_size == b_size
)
def forward(self, x):
self.retain_inputs((0, 1))
a, b = x
return _batch_matmul(a, b, self.transa, self.transb, False),
def backward(self, indexes, grad_outputs):
a, b = self.get_retained_inputs()
return BatchMatMulGrad(self.transa, self.transb).apply(
(a, b, grad_outputs[0]))
class BatchMatMulGrad(function_node.FunctionNode):
def __init__(self, transa=False, transb=False):
self.transa = transa
self.transb = transb
def forward(self, inputs):
self.retain_inputs((0, 1, 2))
a, b, gy = inputs
ga = _batch_matmul(gy, b, False, not self.transb,
self.transa).reshape(a.shape)
gb = _batch_matmul(a, gy, not self.transa, False,
self.transb).reshape(b.shape)
return ga, gb
def backward(self, indexes, grad_outputs):
a, b, gy = self.get_retained_inputs()
gga, ggb = grad_outputs
ret = []
if 0 in indexes or 1 in indexes:
ga, gb = BatchMatMulGrad(self.transa, self.transb).apply(
(gga, ggb, gy))
if 0 in indexes:
ret.append(ga)
if 1 in indexes:
ret.append(gb)
if 2 in indexes:
ret.append(
BatchMatMul(self.transa, self.transb).apply((gga, b))[0] +
BatchMatMul(self.transa, self.transb).apply((a, ggb))[0])
return ret
def batch_matmul(a, b, transa=False, transb=False):
"""Computes the batch matrix multiplications of two sets of arrays.
Args:
a (:class:`~chainer.Variable` or :ref:`ndarray`):
The left operand of the batch matrix multiplications.
A 2-D array of shape ``(B, N)`` is considered as B
:math:`N \\times 1` matrices.
A 3-D array of shape ``(B, M, N)`` is considered as B
:math:`M \\times N` matrices.
b (:class:`~chainer.Variable` or :ref:`ndarray`):
The right operand of the batch matrix multiplications.
Its array is treated as matrices in the same way as ``a``'s array.
transa (bool): If ``True``, transpose each matrix in ``a``.
transb (bool): If ``True``, transpose each matrix in ``b``.
Returns:
~chainer.Variable: The result of the batch matrix multiplications as a
3-D array.
.. deprecated:: v3.0.0
batch_matmul is deprecated. Use ``matmul`` instead.
"""
warnings.warn('batch_matmul is deprecated. Use matmul instead.',
DeprecationWarning)
return BatchMatMul(transa=transa, transb=transb).apply((a, b))[0]
| 11,485
| 33.389222
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/math/prod.py
|
import numpy
import six
from chainer import backend
from chainer import function_node
import chainer.functions
from chainer.utils import type_check
class Prod(function_node.FunctionNode):
"""Product of array elements over a given axis."""
keepdims = False
def __init__(self, axis=None, keepdims=False):
if axis is None:
self.axis = None
elif isinstance(axis, six.integer_types):
self.axis = (axis,)
elif isinstance(axis, tuple) and all(
isinstance(a, six.integer_types) for a in axis):
if len(set(axis)) != len(axis):
raise ValueError('duplicate value in axis: ({})'.format(
', '.join(map(str, axis))))
self.axis = axis
else:
raise TypeError('None, int or tuple of int are required')
self.keepdims = keepdims
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
if self.axis is not None:
for axis in self.axis:
if axis >= 0:
type_check.expect(
axis < in_types[0].ndim,
)
else:
type_check.expect(
-axis - 1 < in_types[0].ndim,
)
def forward(self, x):
self.retain_inputs((0,))
xp = backend.get_array_module(*x)
return xp.asarray(x[0].prod(axis=self.axis, keepdims=self.keepdims)),
def backward(self, indexes, gy):
x, = self.get_retained_inputs()
gy, = gy
F = chainer.functions
if self.axis is None:
axes = tuple(six.moves.range(x.ndim))
else:
axes = tuple([
axis if axis >= 0 else axis + x.ndim
for axis in self.axis
])
if not self.keepdims:
for axis in sorted(axes):
gy = F.expand_dims(gy, axis=axis)
# indices of axes that are not reduced
axes_kept = tuple(a for a in six.moves.range(x.ndim) if a not in axes)
n_reduced_elements = 1
for axis in axes:
n_reduced_elements *= x.shape[axis]
transpose_axes = axes + axes_kept
x = x.transpose(transpose_axes)
transposed_shape = x.shape
kept_shape = transposed_shape[len(axes):]
x = x.reshape((n_reduced_elements,) + kept_shape)
def shifted_cumprod(a):
a, _ = F.split_axis(
F.concat([a.xp.ones((1,) + kept_shape, a.dtype), a], 0),
(-1,), 0)
return F.cumprod(a, 0)
gx = shifted_cumprod(x) * F.flip(shifted_cumprod(F.flip(x, 0)), 0)
gx = gx.reshape(transposed_shape)
gx = gx.transpose(list(numpy.argsort(transpose_axes)))
gx = gx * gy
return gx,
def prod(x, axis=None, keepdims=False):
"""Product of array elements over a given axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Elements to calculate the product.
axis (None, int, or tuple of int): Axis which a product is performed.
The default (axis = None) is perform a product over all the
dimensions of the input array.
keepdims (bool): If ``True``, the specified axes are remained as axes
of length one.
Returns:
~chainer.Variable: Output variable.
"""
return Prod(axis, keepdims).apply((x,))[0]
| 3,528
| 30.792793
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/math/det.py
|
import chainer
from chainer import function_node
import chainer.functions
from chainer.utils import precision
from chainer.utils import type_check
class BatchDet(function_node.FunctionNode):
@property
def label(self):
return 'det'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
a_type, = in_types
type_check.expect(a_type.dtype.kind == 'f')
# Only a minibatch of 2D array shapes allowed.
type_check.expect(a_type.ndim == 3)
# Matrix inversion only allowed for square matrices
# so assert the last two dimensions are equal.
type_check.expect(a_type.shape[-1] == a_type.shape[-2])
@precision._fp16_mixed_precision_helper
def forward(self, inputs):
self.retain_inputs((0,))
self.retain_outputs((0,))
x, = inputs
xp = chainer.backend.get_array_module(x)
detx = xp.linalg.det(x)
return detx,
def backward(self, indexes, gy):
x, = self.get_retained_inputs()
detx, = self.get_retained_outputs()
gy, = gy
inv_x = chainer.functions.batch_inv(
chainer.functions.transpose(x, (0, 2, 1)))
gy = chainer.functions.broadcast_to(gy[:, None, None], inv_x.shape)
detx = chainer.functions.broadcast_to(detx[:, None, None], inv_x.shape)
grad = gy * detx * inv_x
return grad,
def batch_det(a):
"""Computes the determinant of a batch of square matrices.
Args:
a (:class:`~chainer.Variable` or :ref:`ndarray`):
Input array to compute the determinant for.
The first dimension should iterate over each matrix and be
of the batchsize.
Returns:
~chainer.Variable: vector of determinants for every matrix
in the batch.
"""
return BatchDet().apply((a,))[0]
def det(a):
"""Computes the determinant of a single square matrix.
Args:
a (:class:`~chainer.Variable` or :ref:`ndarray`):
Input array to compute the determinant for.
Returns:
~chainer.Variable: Scalar determinant of the matrix a.
"""
shape = (1, a.shape[0], a.shape[1])
batched_a = chainer.functions.reshape(a, shape)
batched_det = BatchDet().apply((batched_a,))[0]
return chainer.functions.reshape(batched_det, ())
| 2,352
| 29.558442
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/math/erfcx.py
|
import numpy
try:
from scipy import special
available_cpu = True
except ImportError as e:
available_cpu = False
_import_error = e
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class Erfcx(function_node.FunctionNode):
@property
def label(self):
return 'erfcx'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, x):
if not available_cpu:
raise ImportError('SciPy is not available. Forward computation'
' of erfcx in CPU cannot be done. ' +
str(_import_error))
self.retain_inputs((0,))
self.retain_outputs((0,))
return utils.force_array(special.erfcx(x[0]), dtype=x[0].dtype),
def forward_gpu(self, x):
self.retain_inputs((0,))
self.retain_outputs((0,))
return cuda.elementwise(
'T x', 'T y',
'y = erfcx(x)',
'elementwise_erfcx',
)(x[0]),
def backward(self, indexes, gy):
x = self.get_retained_inputs()[0]
y = self.get_retained_outputs()[0]
return 2 * (x * y - numpy.pi ** -0.5) * gy[0],
def erfcx(x):
"""Elementwise scaled complementary error function.
.. note::
Forward computation in CPU cannot be done if
`SciPy <https://www.scipy.org/>`_ is not available.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Erfcx().apply((x,))[0]
| 1,722
| 25.921875
| 75
|
py
|
chainer
|
chainer-master/chainer/functions/math/tensordot.py
|
import numpy
import six
from chainer import backend
from chainer import function_node
from chainer import utils
from chainer.utils import collections_abc
from chainer.utils import type_check
def _tensordot(a, b, a_axes, b_axes, c_axes=None):
a_col_ndim = len(a_axes[1])
b_row_ndim = len(b_axes[0])
if a_col_ndim != b_row_ndim:
raise ValueError('axes count mismatch')
if a.ndim < a_col_ndim or b.ndim < b_row_ndim:
raise ValueError('dimension of input tensors must be '
'greater equal to dot-axes count ({})'
.format(a_col_ndim))
for a_axis, b_axis in zip(a_axes[1], b_axes[0]):
if a.shape[a_axis] != b.shape[b_axis]:
raise ValueError('shape mismatch')
xp = backend.get_array_module(a)
y = xp.tensordot(a, b, axes=(tuple(a_axes[1]), tuple(b_axes[0])))
if c_axes is not None:
a_row_ndim = len(a_axes[0])
b_col_ndim = len(b_axes[1])
c_row_ndim = len(c_axes[0])
c_col_ndim = len(c_axes[1])
if a_row_ndim != c_row_ndim:
raise ValueError('axes count mismatch')
if b_col_ndim != c_col_ndim:
raise ValueError('axes count mismatch')
trans = [None for i in six.moves.range(y.ndim)]
table_a = [1 if i in a_axes[0] else 0 for i in six.moves.range(a.ndim)]
table_a = numpy.cumsum(table_a) - 1
for i, c_axis in enumerate(c_axes[0]):
trans[c_axis] = table_a[a_axes[0][i]]
table_b = [1 if i in b_axes[1] else 0 for i in six.moves.range(b.ndim)]
table_b = numpy.cumsum(table_b) - 1
for i, c_axis in enumerate(c_axes[1]):
trans[c_axis] = table_b[b_axes[1][i]] + len(a_axes[0])
for i, c_axis in enumerate(trans):
if i != c_axis:
y = xp.transpose(y, trans)
break
return y
class TensorDot(function_node.FunctionNode):
def __init__(self, axes=2, a_axes=None, b_axes=None, c_axes=None,
dtype=None):
self.axes = axes
self.a_axes = a_axes
self.b_axes = b_axes
self.c_axes = c_axes
self.dtype = dtype
if isinstance(axes, collections_abc.Sequence):
if len(axes) != 2:
raise ValueError('axes must be a pair of sequence of integers '
'when it is a list or tuple.')
elif isinstance(axes, six.integer_types):
pass
else:
raise TypeError('axes must be a pair of sequence of integers or '
'an integer')
def check_type_forward(self, in_types):
type_check._argname(in_types, ('a', 'b'))
a_type, b_type = in_types
type_check.expect(
a_type.dtype.kind == 'f',
b_type.dtype.kind == 'f',
)
def forward(self, inputs):
self.retain_inputs((0, 1))
a, b = inputs
if self.a_axes is None or self.b_axes is None:
a_axes = [[], []] # 0:row axes, 1:col axes
b_axes = [[], []] # 0:row axes, 1:col axes
axes = self.axes
if isinstance(axes, collections_abc.Sequence):
a_axes[1], b_axes[0] = axes
if numpy.isscalar(a_axes[1]):
a_axes[1] = a_axes[1],
if numpy.isscalar(b_axes[0]):
b_axes[0] = b_axes[0],
else:
a_axes[1] = six.moves.range(a.ndim - axes, a.ndim)
b_axes[0] = six.moves.range(axes)
a_range = six.moves.range(a.ndim)
a_axes[0] = [i for i in a_range if i not in a_axes[1]]
b_range = six.moves.range(b.ndim)
b_axes[1] = [i for i in b_range if i not in b_axes[0]]
self.a_axes = a_axes
self.b_axes = b_axes
c = _tensordot(a, b, self.a_axes, self.b_axes, self.c_axes)
if self.c_axes is None:
c_axes = [[], []] # 0:row axes, 1:col axes
c_row_ndim = len(self.a_axes[0])
c_col_ndim = len(self.b_axes[1])
c_axes[0] = six.moves.range(c_row_ndim)
c_axes[1] = six.moves.range(c_row_ndim, c_row_ndim + c_col_ndim)
self.c_axes = c_axes
return utils.force_array(c, self.dtype),
def backward(self, indexes, grad_outputs):
a, b = self.get_retained_inputs()
gc, = grad_outputs
ga = None
if 0 in indexes:
ga, = TensorDot(a_axes=self.c_axes,
b_axes=[self.b_axes[1], self.b_axes[0]],
c_axes=self.a_axes,
dtype=a.dtype).apply((gc, b))
gb = None
if 1 in indexes:
gb, = TensorDot(a_axes=[self.a_axes[1], self.a_axes[0]],
b_axes=self.c_axes,
c_axes=self.b_axes,
dtype=b.dtype).apply((a, gc))
return ga, gb
def tensordot(a, b, axes=2):
"""Returns the tensor dot product of two arrays along specified axes.
This is equivalent to compute dot product along the specified axes which
are treated as one axis by reshaping.
Args:
a (:class:`~chainer.Variable` or :ref:`ndarray`): The first argument.
b (:class:`~chainer.Variable` or :ref:`ndarray`): The second argument.
axes:
- If it is an integer, then ``axes`` axes at the last of ``a`` and
the first of ``b`` are used.
- If it is a pair of sequences of integers, then these two
sequences specify the list of axes for ``a`` and ``b``. The
corresponding axes are paired for sum-product.
Returns:
~chainer.Variable: The tensor dot product of ``a`` and ``b`` along the
axes specified by ``axes``.
.. admonition:: Example
>>> a = np.random.rand(5, 3, 2)
>>> b = np.random.rand(3, 2, 4)
>>> c = F.tensordot(a, b, axes=2)
>>> c.shape
(5, 4)
.. seealso:: :func:`numpy.tensordot`
"""
return TensorDot(axes=axes).apply((a, b))[0]
| 6,126
| 34.622093
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/math/ndtri.py
|
try:
from scipy import special
available_cpu = True
except ImportError as e:
available_cpu = False
_import_error = e
import math
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class Ndtri(function_node.FunctionNode):
@property
def label(self):
return 'ndtri'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, x):
if not available_cpu:
raise ImportError('SciPy is not available. Forward computation'
' of ndtri in CPU can not be done.' +
str(_import_error))
self.retain_outputs((0,))
return utils.force_array(special.ndtri(x[0]), dtype=x[0].dtype),
def forward_gpu(self, x):
self.retain_outputs((0,))
return cuda.elementwise(
'T x', 'T y',
'y = normcdfinv(x)',
'elementwise_ndtri',
)(x[0]),
def backward(self, indexes, gy):
y, = self.get_retained_outputs()
sqrt_2pi = (2 * math.pi) ** 0.5
return sqrt_2pi * chainer.functions.exp(0.5 * y ** 2) * gy[0],
def ndtri(x):
"""Elementwise inverse function of ndtr.
.. note::
Forward computation in CPU can not be done if
`SciPy <https://www.scipy.org/>`_ is not available.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Ndtri().apply((x,))[0]
| 1,676
| 26.048387
| 75
|
py
|
chainer
|
chainer-master/chainer/functions/math/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/functions/math/ndtr.py
|
import math
import warnings
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer.functions.math import exponential
from chainer import utils
from chainer.utils import type_check
_ndtr_cpu = None
def _slow_ndtr_cpu(x):
return 0.5 * math.erfc(-x / 2 ** 0.5)
class Ndtr(function_node.FunctionNode):
@property
def label(self):
return 'ndtr'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, x):
global _ndtr_cpu
if _ndtr_cpu is None:
try:
from scipy import special
_ndtr_cpu = special.ndtr
except ImportError:
warnings.warn(
'SciPy is not available. Forward computation of ndtr in'
' CPU can be slow without SciPy.',
chainer.warnings.PerformanceWarning)
_ndtr_cpu = numpy.vectorize(_slow_ndtr_cpu)
self.retain_inputs((0,))
return utils.force_array(_ndtr_cpu(x[0]), dtype=x[0].dtype),
def forward_gpu(self, x):
self.retain_inputs((0,))
return cuda.elementwise(
'T x', 'T y',
'y = normcdf(x)',
'elementwise_ndtr',
)(x[0]),
def backward(self, indexes, gy):
x = self.get_retained_inputs()[0]
return (2 * numpy.pi) ** -0.5 * exponential.exp(-0.5 * x ** 2) * gy[0],
def ndtr(x):
"""Elementwise cumulative distribution function of normal distribution.
.. note::
Forward computation in CPU can be slow if
`SciPy <https://www.scipy.org/>`_ is not available.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Ndtr().apply((x,))[0]
| 1,932
| 25.479452
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/math/sqrt.py
|
import numpy
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
import chainerx
class Sqrt(function_node.FunctionNode):
@property
def label(self):
return 'sqrt'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_chainerx(self, x):
return chainerx.sqrt(x[0]),
def forward(self, x):
self.retain_outputs((0,))
xp = backend.get_array_module(*x)
return utils.force_array(xp.sqrt(x[0], dtype=x[0].dtype)),
def backward(self, indexes, grad_outputs):
gx = self.get_retained_outputs()[0]
gy = grad_outputs[0]
return gy / (gx * 2.0),
class RsqrtGPU(function_node.FunctionNode):
@property
def label(self):
return 'rsqrt'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_gpu(self, inputs):
self.retain_outputs((0,))
x, = inputs
out = cuda.cupyx.rsqrt(x, dtype=x.dtype)
return utils.force_array(out),
def backward(self, indexes, grad_outputs):
y, = self.get_retained_outputs()
gy, = grad_outputs
return gy * (y ** 3) * -0.5,
def sqrt(x):
"""Elementwise square root function.
.. math::
y_i = \\sqrt x_i.
If the value of :math:`x_i` is negative, it returns ``Nan`` for :math:`y_i`
respect to underlying numpy and cupy specification.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Sqrt().apply((x,))[0]
def rsqrt(x):
"""Computes elementwise reciprocal of square root of input :math:`x_i`.
.. math::
y_i = {1 \\over \\sqrt x_i}.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
.. seealso:: :func:`~chainer.functions.sqrt`
"""
xp = backend.get_array_module(x)
if xp is numpy or xp is chainerx:
return 1.0 / sqrt(x)
# CuPy provides `rsqrt` which is faster than `1.0 / sqrt(x)`.
return RsqrtGPU().apply((x,))[0]
| 2,385
| 24.115789
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/math/log_ndtr.py
|
import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer.functions.math import erfcx
from chainer import utils
from chainer.utils import type_check
_log_ndtr_cpu = None
class LogNdtr(function_node.FunctionNode):
@property
def label(self):
return 'log_ndtr'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, x):
global _log_ndtr_cpu
if _log_ndtr_cpu is None:
try:
from scipy import special
_log_ndtr_cpu = special.log_ndtr
except ImportError:
raise ImportError('SciPy is not available. Forward computation'
' of log_ndtr can not be done.')
self.retain_inputs((0,))
return utils.force_array(_log_ndtr_cpu(x[0]), dtype=x[0].dtype),
def forward_gpu(self, x):
self.retain_inputs((0,))
return cuda.elementwise(
'T x', 'T y',
'''
if (x > 0) {
y = log1p(-normcdf(-x));
} else {
y = log(0.5 * erfcx(-sqrt(0.5) * x)) - 0.5 * x * x;
}
''',
'elementwise_log_ndtr',
)(x[0]),
def backward(self, indexes, gy):
x = self.get_retained_inputs()[0]
return (2 / numpy.pi) ** 0.5 / erfcx.erfcx(- x / 2 ** 0.5) * gy[0],
def log_ndtr(x):
"""Logarithm of cumulative distribution function of normal distribution.
.. note::
Forward computation in CPU can not be done if
`SciPy <https://www.scipy.org/>`_ is not available.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return LogNdtr().apply((x,))[0]
| 1,898
| 26.521739
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/math/polygamma.py
|
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
_polygamma_cpu = None
class PolyGamma(function_node.FunctionNode):
@property
def label(self):
return 'polygamma'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('n', 'x'))
n_type, x_type = in_types
type_check.expect(
n_type.dtype.kind == 'i',
x_type.dtype.kind == 'f',
)
def forward_cpu(self, inputs):
n, x = inputs
global _polygamma_cpu
if _polygamma_cpu is None:
try:
from scipy import special
_polygamma_cpu = special.polygamma
except ImportError:
raise ImportError('SciPy is not available. Forward computation'
' of polygamma can not be done.')
self.retain_inputs((0, 1))
return utils.force_array(_polygamma_cpu(n, x), dtype=x.dtype),
def forward_gpu(self, inputs):
n, x = inputs
self.retain_inputs((0, 1))
return utils.force_array(
cuda.cupyx.scipy.special.polygamma(n, x), dtype=x.dtype),
def backward(self, indexes, gy):
n, x = self.get_retained_inputs()
return None, polygamma(n + 1, x) * gy[0],
def polygamma(n, x):
"""Polygamma function.
.. note::
Forward computation in CPU can not be done if
`SciPy <https://www.scipy.org/>`_ is not available.
Args:
n (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return PolyGamma().apply((n, x))[0]
| 1,796
| 27.078125
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/math/sign.py
|
import chainer
from chainer import backend
from chainer import utils
def sign(x):
"""Elementwise sign function.
For a given input :math:`x`, this function returns :math:`sgn(x)`
defined as
.. math::
sgn(x) = \\left \\{ \\begin{array}{cc}
-1 & {\\rm if~x < 0} \\\\
0 & {\\rm if~x = 0} \\\\
1 & {\\rm if~x > 0} \\\\
\\end{array} \\right.
.. note::
The gradient of this function is ``None`` everywhere and therefore
unchains the computational graph.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable for which the sign is computed.
Returns:
~chainer.Variable: Output variable.
"""
if isinstance(x, chainer.variable.Variable):
x = x.array
xp = backend.get_array_module(x)
return chainer.as_variable(utils.force_array(xp.sign(x)))
| 893
| 23.162162
| 74
|
py
|
chainer
|
chainer-master/chainer/functions/math/bias.py
|
import chainer
from chainer.functions.array import broadcast
from chainer.functions.array import reshape
def bias(x, y, axis=1):
"""Elementwise summation with broadcasting.
Computes a elementwise summation of two input variables, with the shape of
the latter variable broadcasted to match the shape of the former. ``axis``
is the first axis of the first variable along which the second variable is
applied.
The term "broadcasting" here comes from Caffe's bias layer so the
"broadcasting" with the following arguments::
x : 100 x 3 x 40 x 5 x 6
y : 3 x 40
axis : 1
is equivalent to the following numpy broadcasting::
x : 100 x 3 x 40 x 5 x 6
y : (1 x) 3 x 40 x 1 x 1
Note that the axis of ``x`` to which we apply ``y`` is specified by the
argument ``axis``, whose meaning is different from numpy's ``axis``.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable to be summed.
y (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable to sum, broadcasted.
axis (int): The first axis of ``x`` along which ``y`` is applied.
Returns:
~chainer.Variable: Output variable.
"""
x_shape = x.shape
y_shape = y.shape
if chainer.is_debug():
assert x_shape[axis:axis + len(y_shape)] == y_shape
y1_shape = tuple([1] * axis + list(y_shape) +
[1] * (len(x_shape) - axis - len(y_shape)))
y1 = reshape.reshape(y, y1_shape)
y2 = broadcast.broadcast_to(y1, x_shape)
return x + y2
| 1,607
| 31.816327
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/math/exponential.py
|
import math
import numpy
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
import chainerx
class Exp(function_node.FunctionNode):
@property
def label(self):
return 'exp'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_chainerx(self, x):
return chainerx.exp(x[0]),
def forward_cpu(self, x):
self.retain_outputs((0,))
return utils.force_array(numpy.exp(x[0])),
def forward_gpu(self, x):
self.retain_outputs((0,))
return cuda.cupy.exp(x[0]),
def backward(self, indexes, gy):
y = self.get_retained_outputs()[0]
return y * gy[0],
def exp(x):
"""Elementwise exponential function.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Exp().apply((x,))[0]
class Log(function_node.FunctionNode):
@property
def label(self):
return 'log'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_chainerx(self, x):
return chainerx.log(x[0]),
def forward_cpu(self, x):
self.retain_inputs((0,))
return utils.force_array(numpy.log(x[0])),
def forward_gpu(self, x):
self.retain_inputs((0,))
return cuda.cupy.log(x[0]),
def backward(self, indexes, gy):
x = self.get_retained_inputs()[0]
return utils.force_array(gy[0] / x),
def log(x):
"""Elementwise natural logarithm function.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Log().apply((x,))[0]
class Log2(function_node.FunctionNode):
@property
def label(self):
return 'log2'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, inputs):
self.retain_inputs((0,))
x = inputs[0]
xp = backend.get_array_module(x)
return utils.force_array(xp.log2(x)),
def backward(self, indexes, gy):
x = self.get_retained_inputs()[0]
return gy[0] / x * (1 / math.log(2)),
def log2(x):
"""Elementwise logarithm function to the base 2.
.. math::
y_i = \\log_2 x_i.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Log2().apply((x,))[0]
class Log10(function_node.FunctionNode):
@property
def label(self):
return 'log10'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, inputs):
self.retain_inputs((0,))
x = inputs[0]
xp = backend.get_array_module(x)
return utils.force_array(xp.log10(x)),
def backward(self, indexes, gy):
x = self.get_retained_inputs()[0]
return gy[0] / x * (1 / math.log(10)),
def log10(x):
"""Elementwise logarithm function to the base 10.
.. math::
y_i = \\log_{10} x_i.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Log10().apply((x,))[0]
| 3,676
| 22.125786
| 73
|
py
|
chainer
|
chainer-master/chainer/functions/math/logsumexp.py
|
import six
import chainer
from chainer import backend
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
import chainerx
class LogSumExp(function_node.FunctionNode):
def __init__(self, axis=None):
if axis is None:
self.axis = None
elif isinstance(axis, six.integer_types):
self.axis = (axis,)
elif isinstance(axis, tuple) and all(
isinstance(a, six.integer_types) for a in axis):
if len(set(axis)) != len(axis):
raise ValueError('duplicate value in axis: ({})'.format(
', '.join(map(str, axis))))
self.axis = axis
else:
raise TypeError('None, int or tuple of int are required')
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
if self.axis is not None:
for axis in self.axis:
if axis >= 0:
type_check.expect(
axis < in_types[0].ndim,
)
else:
type_check.expect(
-axis - 1 < in_types[0].ndim,
)
def forward_chainerx(self, inputs):
return chainerx.logsumexp(inputs[0], self.axis),
def forward(self, inputs):
self.retain_inputs((0,))
self.retain_outputs((0,))
xp = backend.get_array_module(*inputs)
x, = inputs
m = x.max(axis=self.axis, keepdims=True)
y = utils.force_array(x - m)
xp.exp(y, out=y)
y_sum = y.sum(axis=self.axis)
y = xp.asarray(xp.log(y_sum) + m.reshape(y_sum.shape))
return y,
def backward(self, indexes, grads):
x, = self.get_retained_inputs()
y, = self.get_retained_outputs()
gy, = grads
if self.axis is not None:
actual_axis = []
for axis in self.axis:
if axis < 0:
axis = len(x.shape) + axis
actual_axis.append(axis)
for axis in sorted(actual_axis):
gy = chainer.functions.expand_dims(gy, axis=axis)
y = chainer.functions.expand_dims(y, axis=axis)
gy = chainer.functions.broadcast_to(gy, x.shape)
y = chainer.functions.broadcast_to(y, x.shape)
gx = gy * chainer.functions.exp(x - y)
return gx,
def logsumexp(x, axis=None):
"""Log-sum-exp of array elements over a given axis.
This function calculates logarithm of sum of exponential of array elements.
.. math::
y_i = \\log\\left(\\sum_j \\exp(x_{ij})\\right)
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Elements to log-sum-exp.
axis (None, int, or tuple of int): Axis which a sum is performed.
The default (axis = None) is perform a sum over all the dimensions
of the input array.
Returns:
~chainer.Variable: Output variable.
"""
return LogSumExp(axis).apply((x,))[0]
| 3,116
| 30.484848
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/math/ceil.py
|
import chainer
from chainer import backend
from chainer import utils
def ceil(x):
"""Elementwise ceil function.
.. math::
y_i = \\lceil x_i \\rceil
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
if isinstance(x, chainer.variable.Variable):
x = x.data
xp = backend.get_array_module(x)
return chainer.as_variable(utils.force_array(xp.ceil(x), x.dtype))
| 493
| 21.454545
| 73
|
py
|
chainer
|
chainer-master/chainer/functions/math/logarithm_1p.py
|
import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class Log1p(function_node.FunctionNode):
@property
def label(self):
return 'log1p'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, x):
self.retain_inputs((0,))
return utils.force_array(numpy.log1p(x[0])),
def forward_gpu(self, x):
self.retain_inputs((0,))
return cuda.cupy.log1p(x[0]),
def backward(self, indexes, gy):
x = self.get_retained_inputs()
return gy[0] / (x[0] + 1.0),
def log1p(x):
"""Elementwise natural logarithm plus one function.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Log1p().apply((x,))[0]
| 985
| 22.47619
| 73
|
py
|
chainer
|
chainer-master/chainer/functions/math/exponential_m1.py
|
import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class Expm1(function_node.FunctionNode):
@property
def label(self):
return 'expm1'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, x):
self.retain_outputs((0,))
return utils.force_array(numpy.expm1(x[0])),
def forward_gpu(self, x):
self.retain_outputs((0,))
return cuda.cupy.expm1(x[0]),
def backward(self, indexes, gy):
y = self.get_retained_outputs()[0]
return (y + 1.0) * gy[0],
def expm1(x):
"""Elementwise exponential minus one function.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Expm1().apply((x,))[0]
| 983
| 22.428571
| 73
|
py
|
chainer
|
chainer-master/chainer/functions/math/erfinv.py
|
try:
from scipy import special
available_cpu = True
except ImportError as e:
available_cpu = False
_import_error = e
import math
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
BACKWORDC = math.pi ** 0.5 / 2
class ErfInv(function_node.FunctionNode):
@property
def label(self):
return 'erfinv'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, x):
if not available_cpu:
raise ImportError('SciPy is not available. Forward computation'
' of erfinv in CPU cannot be done. ' +
str(_import_error))
self.retain_outputs((0,))
return utils.force_array(special.erfinv(x[0]), dtype=x[0].dtype),
def forward_gpu(self, x):
self.retain_outputs((0,))
return cuda.elementwise(
'T x', 'T y',
'y = erfinv(x)',
'elementwise_erfinv',
)(x[0]),
def backward(self, indexes, gy):
y, = self.get_retained_outputs()
return BACKWORDC * chainer.functions.exp(y ** 2) * gy[0],
def erfinv(x):
"""Elementwise inverse function of error function.
.. note::
Forward computation in CPU cannot be done if
`SciPy <https://www.scipy.org/>`_ is not available.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return ErfInv().apply((x,))[0]
| 1,674
| 25.171875
| 75
|
py
|
chainer
|
chainer-master/chainer/functions/math/fix.py
|
import chainer
from chainer import backend
from chainer import utils
def fix(x):
"""Elementwise fix function.
.. math::
y_i = \\lfix x_i \\rfix
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
if isinstance(x, chainer.variable.Variable):
x = x.array
xp = backend.get_array_module(x)
return chainer.as_variable(utils.force_array(xp.fix(x), x.dtype))
| 490
| 20.347826
| 73
|
py
|
chainer
|
chainer-master/chainer/functions/math/erf.py
|
import math
import warnings
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
_erf_cpu = None
class Erf(function_node.FunctionNode):
@property
def label(self):
return 'erf'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, x):
global _erf_cpu
if _erf_cpu is None:
try:
from scipy import special
_erf_cpu = special.erf
except ImportError:
warnings.warn(
'SciPy is not available. Forward computation of erf in CPU'
' can be slow without SciPy.',
chainer.warnings.PerformanceWarning)
_erf_cpu = numpy.vectorize(math.erf)
self.retain_inputs((0,))
return utils.force_array(_erf_cpu(x[0]), dtype=x[0].dtype),
def forward_gpu(self, x):
self.retain_inputs((0,))
return cuda.elementwise(
'T x', 'T y',
'y = erf(x)',
'elementwise_erf',
)(x[0]),
def backward(self, indexes, gy):
x = self.get_retained_inputs()[0]
return 2 / numpy.pi ** 0.5 * chainer.functions.exp(-x ** 2) * gy[0],
def erf(x):
"""Elementwise error function.
.. note::
Forward computation in CPU can be slow if
`SciPy <https://www.scipy.org/>`_ is not available.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Erf().apply((x,))[0]
| 1,749
| 24.735294
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/noise/simplified_dropconnect.py
|
import numpy
from chainer import backend
from chainer import function_node
import chainer.functions
from chainer.utils import type_check
from chainer import variable
def _as_mat(x):
if x.ndim == 2:
return x
return x.reshape(len(x), -1)
def _matmul(a, b, xp):
if xp is numpy:
# numpy 1.9 does not support matmul.
# So we use numpy.einsum instead of numpy.matmul.
return xp.einsum('...jk,...kl->...jl', a, b)
else:
return xp.matmul(a, b)
class SimplifiedDropconnect(function_node.FunctionNode):
"""Linear unit regularized by simplified dropconnect."""
def __init__(self, ratio, mask=None, use_batchwise_mask=True):
self.ratio = ratio
self.mask = mask
self.use_batchwise_mask = use_batchwise_mask
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type, w_type = in_types[:2]
type_check._argname((x_type, w_type), ('x', 'W'))
type_check.expect(
x_type.dtype.kind == 'f',
w_type.dtype.kind == 'f',
x_type.ndim >= 2,
w_type.ndim == 2,
type_check.prod(x_type.shape[1:]) == w_type.shape[1],
)
if type_check.eval(n_in) == 3:
b_type = in_types[2]
type_check._argname((b_type,), ('b',))
type_check.expect(
b_type.dtype == x_type.dtype,
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
if self.mask is not None:
if self.use_batchwise_mask:
type_check.expect(
self.mask.shape[0] == x_type.shape[0],
self.mask.shape[1:] == w_type.shape,
)
else:
type_check.expect(self.mask.shape == w_type.shape)
def forward(self, inputs):
self.retain_inputs((0, 1))
scale = inputs[1].dtype.type(1. / (1 - self.ratio))
xp = backend.get_array_module(*inputs)
if self.mask is None:
if self.use_batchwise_mask:
mask_shape = (inputs[0].shape[0], inputs[1].shape[0],
inputs[1].shape[1])
else:
mask_shape = (inputs[1].shape[0], inputs[1].shape[1])
if xp == numpy:
self.mask = xp.random.rand(*mask_shape) >= self.ratio
else:
self.mask = xp.random.rand(*mask_shape,
dtype=numpy.float32) >= self.ratio
elif isinstance(self.mask, variable.Variable):
self.mask = self.mask.data
x = _as_mat(inputs[0])
W = inputs[1] * scale * self.mask
# (i)jk,ik->ij
y = _matmul(W, x[:, :, None], xp)
y = y.reshape(y.shape[0], y.shape[1]).astype(x.dtype, copy=False)
if len(inputs) == 3:
b = inputs[2]
y += b
return y,
def backward(self, indexes, grad_outputs):
inputs = self.get_retained_inputs()
ret = []
scale = inputs[1].dtype.type(1. / (1 - self.ratio))
x = _as_mat(inputs[0])
W = inputs[1]
if self.use_batchwise_mask:
W = chainer.functions.broadcast_to(
W, self.mask.shape) * scale * self.mask
else:
W = chainer.functions.broadcast_to(
W * scale * self.mask, (x.shape[0],) + self.mask.shape)
gy = grad_outputs[0]
if 0 in indexes:
# ij,(i)jk->ik
gx = chainer.functions.matmul(
gy[:, None, :], W).reshape(inputs[0].shape)
gx = chainer.functions.cast(gx, x.dtype)
ret.append(gx)
if 1 in indexes:
# ij,ik,ijk->jk
gy2 = gy[:, :, None]
x2 = x[:, None, :]
shape = (gy2.shape[0], gy2.shape[1], x2.shape[2])
gy2 = chainer.functions.broadcast_to(gy2, shape)
x2 = chainer.functions.broadcast_to(x2, shape)
gW = chainer.functions.sum(gy2 * x2 * self.mask, axis=0) * scale
gW = chainer.functions.cast(gW, W.dtype)
ret.append(gW)
if 2 in indexes:
gb = chainer.functions.sum(gy, axis=0)
ret.append(gb)
return ret
def simplified_dropconnect(x, W, b=None, ratio=.5, train=True, mask=None,
use_batchwise_mask=True):
"""Linear unit regularized by simplified dropconnect.
Simplified dropconnect drops weight matrix elements randomly with
probability ``ratio`` and scales the remaining elements by factor
``1 / (1 - ratio)``.
It accepts two or three arguments: an input minibatch ``x``, a weight
matrix ``W``, and optionally a bias vector ``b``. It computes
:math:`Y = xW^\\top + b`.
In testing mode, zero will be used as simplified dropconnect ratio instead
of ``ratio``.
Notice:
This implementation cannot be used for reproduction of the paper.
There is a difference between the current implementation and the
original one.
The original version uses sampling with gaussian distribution before
passing activation function, whereas the current implementation averages
before activation.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. Its first dimension ``n`` is assumed
to be the *minibatch dimension*. The other dimensions are treated
as concatenated one dimension whose size must be ``N``.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Weight variable of shape ``(M, N)``.
b (:class:`~chainer.Variable` or :ref:`ndarray`):
Bias variable (optional) of shape ``(M,)``.
ratio (float):
Dropconnect ratio.
train (bool):
If ``True``, executes simplified dropconnect.
Otherwise, simplified dropconnect function works as a linear
function.
mask (None or :class:`~chainer.Variable` or :ref:`ndarray`):
If ``None``, randomized dropconnect mask is generated.
Otherwise, The mask must be ``(n, M, N)`` or ``(M, N)`` shaped
array, and `use_batchwise_mask` is ignored.
Main purpose of this option is debugging.
`mask` array will be used as a dropconnect mask.
use_batchwise_mask (bool):
If ``True``, dropped connections depend on each sample in
mini-batch.
Returns:
~chainer.Variable: Output variable.
.. seealso:: :class:`~chainer.links.Dropconnect`
.. seealso::
Li, W., Matthew Z., Sixin Z., Yann L., Rob F. (2013).
Regularization of Neural Network using DropConnect.
International Conference on Machine Learning.
`URL <https://cs.nyu.edu/~wanli/dropc/>`_
"""
if not train:
ratio = 0
if b is None:
return SimplifiedDropconnect(
ratio, mask, use_batchwise_mask).apply((x, W))[0]
else:
return SimplifiedDropconnect(
ratio, mask, use_batchwise_mask).apply((x, W, b))[0]
| 7,164
| 34.295567
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/noise/gumbel_softmax.py
|
from chainer import backend
import chainer.functions
from chainer import variable
def gumbel_softmax(log_pi, tau=0.1, axis=1):
"""Gumbel-Softmax sampling function.
This function draws samples :math:`y_i` from Gumbel-Softmax distribution,
.. math::
y_i = {\\exp((g_i + \\log\\pi_i)/\\tau)
\\over \\sum_{j}\\exp((g_j + \\log\\pi_j)/\\tau)},
where :math:`\\tau` is a temperature parameter and
:math:`g_i` s are samples drawn from
Gumbel distribution :math:`Gumbel(0, 1)`
See `Categorical Reparameterization with Gumbel-Softmax
<https://arxiv.org/abs/1611.01144>`_.
Args:
log_pi (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable
representing pre-normalized log-probability :math:`\\log\\pi`.
tau (:class:`~float` or :class:`~chainer.Variable` or :ref:`ndarray`):
Input variable representing temperature :math:`\\tau`.
Returns:
~chainer.Variable: Output variable.
"""
xp = backend.get_array_module(log_pi)
if log_pi.ndim < 1:
return variable.Variable(xp.ones((), log_pi.dtype))
dtype = log_pi.dtype
g = xp.random.gumbel(size=log_pi.shape).astype(dtype)
y = chainer.functions.softmax((log_pi + g) / tau, axis=axis)
return y
| 1,281
| 31.05
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/noise/gaussian.py
|
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import argument
from chainer.utils import type_check
class Gaussian(function_node.FunctionNode):
"""Gaussian sampling function.
.. note::
In forward calculation, this function takes a mean and the logarithm of
a variance as inputs, and draws a sample from a Gaussian distribution
accordingly.
"""
def __init__(self, eps=None):
# When ``eps`` is set to None, per-instance noise that is generated
# once during its first forward pass and then reused in subsequent
# calls.
self.eps = eps
def check_type_forward(self, in_types):
type_check._argname(in_types, ('mean', 'ln_var'))
m_type, v_type = in_types
type_check.expect(
m_type.dtype.kind == 'f',
m_type.dtype == v_type.dtype,
m_type.shape == v_type.shape,
)
def forward_cpu(self, inputs):
self.retain_inputs((1,))
mean, ln_var = inputs
if self.eps is None:
self.eps = (
numpy.random.standard_normal(ln_var.shape)
.astype(mean.dtype, copy=False)
)
self.noise = numpy.exp(ln_var * mean.dtype.type(0.5)) * self.eps
return utils.force_array(mean + self.noise),
def forward_gpu(self, inputs):
self.retain_inputs((1,))
mean, ln_var = inputs
if self.eps is None:
if mean.dtype != numpy.float16:
self.eps = cuda.cupy.random.standard_normal(
ln_var.shape, dtype=mean.dtype)
else:
# Draw samples in FP32 then cast them to FP16 because
# cupy.random does not support FP16 currently.
self.eps = cuda.cupy.random.standard_normal(
ln_var.shape, dtype=numpy.float32).astype(numpy.float16)
self.noise = cuda.cupy.empty_like(mean)
self.noise = cuda.elementwise(
'T v, T e', 'T noise',
'noise = exp(v / 2) * e',
'gaussian_forward'
)(ln_var, self.eps)
return mean + self.noise,
def backward(self, indexes, grad_outputs):
ln_var, = self.get_retained_inputs()
gy, = grad_outputs
ret = []
if 0 in indexes:
ret.append(gy)
if 1 in indexes:
noise = chainer.functions.exp(ln_var * 0.5) * self.eps
ret.append(gy * noise * 0.5)
return ret
def gaussian(mean, ln_var, **kwargs):
"""gaussian(mean, ln_var, *, eps=None, return_eps=False)
Gaussian sampling function.
This function takes a mean :math:`\\mu` and the logarithm of a variance
:math:`\\log(\\sigma^2)` as inputs and outputs a sample drawn from a
Gaussian distribution :math:`N(\\mu, \\sigma)`.
The inputs must have the same shape.
Args:
mean (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable representing the mean :math:`\\mu`.
ln_var (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable representing the logarithm of a variance
:math:`\\log(\\sigma^2)`.
eps (:ref:`ndarray` or None):
The eps value to be used.
You do not have to specify this value, unless you need to make
results deterministic.
If ``eps`` is not specified or set to ``None``, an eps value will
be generated randomly.
The shape and dtype must be the same as ``ln_var`` and should be
on the same device.
return_eps (bool):
If ``True``, the eps value used in this function is returned
together with the output variable.
The returned eps can later be reused by passing it to the ``eps``
argument.
Returns:
~chainer.Variable or tuple:
When ``return_eps`` is ``False`` (default), returns the output
variable with the shape of ``mean`` and/or ``ln_var``.
When ``True``, returns the tuple of the output variable and eps
(:ref:`ndarray`).
The eps will be on the same device as the input (``ln_var``).
"""
eps = None
return_eps = False
if kwargs:
eps, return_eps = argument.parse_kwargs(
kwargs, ('eps', eps), ('return_eps', return_eps))
func = Gaussian(eps)
out = func.apply((mean, ln_var))[0]
if return_eps:
return out, func.eps
return out
| 4,588
| 32.253623
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/noise/dropout.py
|
import numpy
import chainer
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import configuration
from chainer import function_node
from chainer.utils import argument
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
class Dropout(function_node.FunctionNode):
"""Dropout regularization."""
def __init__(self, dropout_ratio, mask=None, return_mask=False):
if not 0.0 <= dropout_ratio < 1.0:
raise ValueError('dropout_ratio must be in the range [0, 1)')
self.dropout_ratio = dropout_ratio
self.mask = mask
self.return_mask = return_mask
self._use_cudnn = False
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, x):
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(x)
and self.mask is None):
return self._forward_ideep(x)
if self.mask is not None:
y = x[0] * self.mask
else:
scale = x[0].dtype.type(1. / (1 - self.dropout_ratio))
flag = numpy.random.rand(*x[0].shape) >= self.dropout_ratio
self.mask = scale * flag
y = x[0] * self.mask
return y,
def forward_gpu(self, x):
if (chainer.should_use_cudnn('>=auto', 5000)
and x[0].flags.c_contiguous
and self.mask is None
and not self.return_mask):
self._use_cudnn = True
if hasattr(self, 'states'):
# if we already have a dropout mask,
# the forward operation is equal to backward.
return cuda.get_cudnn_dropout_states().backward(
None, x[0], self.dropout_ratio, self.states),
self.states, y = cuda.get_cudnn_dropout_states().forward(
None, x[0], self.dropout_ratio)
return y,
else:
if self.mask is not None:
y = x[0] * self.mask
else:
rand = cuda.cupy.random.rand(*x[0].shape, dtype=numpy.float32)
scale = x[0].dtype.type(1. / (1 - self.dropout_ratio))
self.mask, y = cuda.elementwise(
'T x, R r, T scale, T ratio', 'T mask, T y',
'''
mask = (r >= ratio) * scale;
y = x * mask;
''',
'dropout_fwd',
)(x[0], rand, scale, self.dropout_ratio)
return y,
def _forward_ideep(self, x):
mask, y = intel64.ideep.dropout.Forward(
intel64.ideep.array(x[0]),
self.dropout_ratio)
self.mask = mask
return y,
def backward(self, x, gy):
if chainer.should_use_cudnn('>=auto', 5000) and self._use_cudnn:
return DropoutGradCuDNN(self.states, self.dropout_ratio).apply(gy)
else:
return DropoutGrad(self.mask).apply(gy)
class DropoutGrad(function_node.FunctionNode):
"""Computes the gradient of the Dropout function."""
def __init__(self, mask):
self.mask = mask
def forward(self, inputs):
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs)):
return self._forward_ideep(inputs)
y = inputs[0] * self.mask
return y,
def _forward_ideep(self, inputs):
return intel64.ideep.dropout.Backward(
intel64.ideep.array(self.mask),
intel64.ideep.array(inputs[0])),
def backward(self, indexes, gy):
return DropoutGrad(self.mask).apply(gy)
class DropoutGradCuDNN(function_node.FunctionNode):
"""Computes the gradient of the Dropout function with cuDNN support."""
def __init__(self, states, dropout_ratio):
self.states = states
self.dropout_ratio = dropout_ratio
def forward(self, inputs):
return cuda.get_cudnn_dropout_states().backward(
None, inputs[0], self.dropout_ratio, self.states),
def backward(self, indexes, gy):
return DropoutGradCuDNN(self.states, self.dropout_ratio).apply(gy)
def dropout(x, ratio=.5, **kwargs):
"""dropout(x, ratio=.5, *, mask=None, return_mask=False)
Drops elements of input variable randomly.
This function drops input elements randomly with probability ``ratio`` and
scales the remaining elements by factor ``1 / (1 - ratio)``. In testing
mode (i.e., ``chainer.config.train`` is set to ``False``), it does nothing
and just returns ``x``.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
ratio (float):
Dropout ratio. The ``ratio`` must be ``0.0 <= ratio < 1.0``.
mask (:ref:`ndarray` or None):
The mask to be used for dropout.
You do not have to specify this value, unless you need to make
results deterministic.
If ``mask`` is not specified or set to ``None``, a mask will be
generated randomly according to the given ``ratio``.
If ``mask`` is specified, ``ratio`` will be ignored.
The shape and dtype must be the same as ``x`` and should be on the
same device.
Note that iDeep and cuDNN will not be used for this function if
mask is specified, as iDeep and cuDNN do not support it.
return_mask (bool):
If ``True``, the mask used for dropout is returned together with
the output variable.
The returned mask can later be reused by passing it to ``mask``
argument.
Returns:
~chainer.Variable or tuple:
When ``return_mask`` is ``False`` (default), returns the output
variable.
When ``True``, returns the tuple of the output variable and
mask (:ref:`ndarray`). The mask will be on the same device as the
input. The mask will become ``None`` when ``chainer.config.train``
is set to ``False``.
See the paper by G. Hinton: `Improving neural networks by preventing
co-adaptation of feature detectors <https://arxiv.org/abs/1207.0580>`_.
.. admonition:: Example
>>> x = np.array([[-1, 0], [2, -3], [-2, 1]], np.float32)
>>> with chainer.using_config('train', True):
... y = F.dropout(x)
>>> y.array
array([[-2., 0.],
[ 4., -6.],
[-0., 2.]], dtype=float32)
>>> with chainer.using_config('train', True):
... y = F.dropout(x, ratio=0.0) \
# dropout returns original input if ratio=0.0
>>> (x == y.array).all()
True
>>> with chainer.using_config('train', False):
... y = F.dropout(x) \
# dropout in test mode returns original input
>>> (x == y.array).all()
True
"""
mask = None
return_mask = False
if kwargs:
mask, return_mask = argument.parse_kwargs(
kwargs, ('mask', mask), ('return_mask', return_mask),
train='train argument is not supported anymore. '
'Use chainer.using_config')
if configuration.config.train:
func = Dropout(ratio, mask, return_mask)
out, = func.apply((x,))
mask = func.mask
else:
out = chainer.as_variable(x)
mask = None
if return_mask:
return out, mask
return out
| 7,599
| 34.514019
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/noise/zoneout.py
|
import numpy
from chainer import backend
from chainer import configuration
from chainer import function_node
from chainer.utils import argument
from chainer.utils import type_check
class Zoneout(function_node.FunctionNode):
"""Zoneout regularization."""
def __init__(self, zoneout_ratio):
self.zoneout_ratio = zoneout_ratio
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
def forward(self, inputs):
self.retain_inputs(())
h, x = inputs
xp = backend.get_array_module(*x)
if xp is numpy:
flag_x = xp.random.rand(*x.shape) >= self.zoneout_ratio
else:
flag_x = (xp.random.rand(*x.shape) >=
self.zoneout_ratio)
self.flag_h = xp.ones_like(flag_x) ^ flag_x
self.flag_x = flag_x
return h * self.flag_h + x * self.flag_x,
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
ret = []
if 0 in indexes:
ret.append(gy * self.flag_h)
if 1 in indexes:
ret.append(gy * self.flag_x)
return ret
def zoneout(h, x, ratio=.5, **kwargs):
"""zoneout(h, x, ratio=.5)
Drops elements of input variable and sets to previous variable randomly.
This function drops input elements randomly with probability ``ratio`` and
instead sets dropping element to their previous variable. In testing mode ,
it does nothing and just returns ``x``.
Args:
h (:class:`~chainer.Variable` or :ref:`ndarray`): Previous variable.
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
ratio (float): Zoneout ratio.
Returns:
~chainer.Variable: Output variable.
See the paper: `Zoneout: Regularizing RNNs by Randomly Preserving Hidden
Activations <https://arxiv.org/abs/1606.01305>`_.
"""
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
if configuration.config.train:
return Zoneout(ratio).apply((h, x))[0]
return x
| 2,199
| 28.72973
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/noise/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/testing/doctest_helper.py
|
import os
import pkg_resources
_gpu_limit = int(os.getenv('CHAINER_TEST_GPU_LIMIT', '-1'))
def skipif(condition):
# In the readthedocs build, doctest should never be skipped, because
# otherwise the code would disappear from the documentation.
if os.environ.get('READTHEDOCS') == 'True':
return False
return condition
def skipif_requires_satisfied(*requirements):
ws = pkg_resources.WorkingSet()
try:
ws.require(*requirements)
except pkg_resources.ResolutionError:
return False
return skipif(True)
def skipif_not_enough_cuda_devices(device_count):
return skipif(0 <= _gpu_limit < device_count)
| 663
| 22.714286
| 72
|
py
|
chainer
|
chainer-master/chainer/testing/attr.py
|
import os
import unittest
try:
import pytest
_error = None
except ImportError as e:
_error = e
def is_available():
return _error is None
def check_available():
if _error is not None:
raise RuntimeError('''\
{} is not available.
Reason: {}: {}'''.format(__name__, type(_error).__name__, _error))
def get_error():
return _error
if _error is None:
_gpu_limit = int(os.getenv('CHAINER_TEST_GPU_LIMIT', '-1'))
def chainerx(*args, **kwargs):
return pytest.mark.chainerx(*args, **kwargs)
def cudnn(*args, **kwargs):
return pytest.mark.cudnn(*args, **kwargs)
def ideep(*args, **kwargs):
return pytest.mark.ideep(*args, **kwargs)
def slow(*args, **kwargs):
return pytest.mark.slow(*args, **kwargs)
else:
def _dummy_callable(*args, **kwargs):
check_available()
assert False # Not reachable
chainerx = _dummy_callable
cudnn = _dummy_callable
ideep = _dummy_callable
slow = _dummy_callable
def multi_gpu(gpu_num):
"""Decorator to indicate number of GPUs required to run the test.
Tests can be annotated with this decorator (e.g., ``@multi_gpu(2)``) to
declare number of GPUs required to run. When running tests, if
``CHAINER_TEST_GPU_LIMIT`` environment variable is set to value greater
than or equals to 0, test cases that require GPUs more than the limit will
be skipped.
"""
check_available()
def deco(f):
return unittest.skipIf(
0 <= _gpu_limit < gpu_num,
reason='{} GPUs required'.format(gpu_num)
)(pytest.mark.gpu(f))
return deco
def gpu(f):
"""Decorator to indicate that GPU is required to run the test.
Tests can be annotated with this decorator (e.g., ``@gpu``) to
declare that one GPU is required to run.
"""
return multi_gpu(1)(f)
| 1,878
| 21.638554
| 78
|
py
|
chainer
|
chainer-master/chainer/testing/_bundle.py
|
import collections
import inspect
import sys
# A tuple that represents a test case.
# For bare (non-generated) test cases, [1] and [2] are None.
# [0] Test case class
# [1] Module name in whicn the class is defined
# [2] Class name
_TestCaseTuple = collections.namedtuple(
'_TestCaseTuple', ('klass', 'module_name', 'class_name'))
class _ParameterizedTestCaseBundle(object):
def __init__(self, cases):
# cases is a list of _TestCaseTuple's
assert isinstance(cases, list)
assert all(isinstance(tup, _TestCaseTuple) for tup in cases)
self.cases = cases
def make_decorator(test_case_generator):
# `test_case_generator` is a callable that receives the source test class
# (typically a subclass of unittest.TestCase) and returns an iterable of
# generated test cases.
# Each element of the iterable is a 3-element tuple:
# [0] Generated class name
# [1] Dict of members
# [2] Method generator
# The method generator is also a callable that receives an original test
# method and returns a new test method.
def f(cases):
if isinstance(cases, _ParameterizedTestCaseBundle):
# The input is a parameterized test case.
cases = cases.cases
else:
# Input is a bare test case, i.e. not one generated from another
# parameterize.
cases = [_TestCaseTuple(cases, None, None)]
generated_cases = []
for klass, mod_name, cls_name in cases:
if mod_name is not None:
# The input is a parameterized test case.
# Remove it from its module.
delattr(sys.modules[mod_name], cls_name)
else:
# The input is a bare test case
mod_name = klass.__module__
# Generate parameterized test cases out of the input test case.
l = _generate_test_cases(mod_name, klass, test_case_generator)
generated_cases += l
# Return the bundle of generated cases to allow repeated application of
# parameterize decorators.
return _ParameterizedTestCaseBundle(generated_cases)
return f
def _generate_case(base, module, cls_name, mb, method_generator):
# Returns a _TestCaseTuple.
members = mb.copy()
# ismethod for Python 2 and isfunction for Python 3
base_methods = inspect.getmembers(
base, predicate=lambda m: inspect.ismethod(m) or inspect.isfunction(m))
for name, value in base_methods:
if not name.startswith('test_'):
continue
value = method_generator(value)
# If the return value of method_generator is None, None is assigned
# as the value of the test method and it is ignored by pytest.
members[name] = value
cls = type(cls_name, (base,), members)
# Add new test class to module
setattr(module, cls_name, cls)
return _TestCaseTuple(cls, module.__name__, cls_name)
def _generate_test_cases(module_name, base_class, test_case_generator):
# Returns a list of _TestCaseTuple's holding generated test cases.
module = sys.modules[module_name]
generated_cases = []
for cls_name, members, method_generator in (
test_case_generator(base_class)):
c = _generate_case(
base_class, module, cls_name, members, method_generator)
generated_cases.append(c)
return generated_cases
| 3,437
| 33.727273
| 79
|
py
|
chainer
|
chainer-master/chainer/testing/condition.py
|
import functools
import unittest
import six
try:
import _pytest.outcomes
_error = None
except ImportError as e:
_error = e
class QuietTestRunner(object):
def run(self, suite):
result = unittest.TestResult()
suite(result)
return result
def repeat_with_success_at_least(times, min_success):
"""Decorator for multiple trial of the test case.
The decorated test case is launched multiple times.
The case is judged as passed at least specified number of trials.
If the number of successful trials exceeds `min_success`,
the remaining trials are skipped.
Args:
times(int): The number of trials.
min_success(int): Threshold that the decorated test
case is regarded as passed.
"""
assert times >= min_success
def _repeat_with_success_at_least(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
assert len(args) > 0
instance = args[0]
assert isinstance(instance, unittest.TestCase)
success_counter = 0
failure_counter = 0
results = []
def fail():
msg = '\nFail: {0}, Success: {1}'.format(
failure_counter, success_counter)
if results:
first = results[0]
errs = first.failures + first.errors
if errs:
err_msg = '\n'.join(fail[1] for fail in errs)
msg += '\n\nThe first error message:\n' + err_msg
instance.fail(msg)
# Wrapper to convert pytest.skip() to unittest.SkipTest
def f_wrap(ins, args, kwargs):
try:
f(ins, *args[1:], **kwargs)
except _pytest.outcomes.Skipped as e:
ins.skipTest(e.msg)
for _ in six.moves.range(times):
suite = unittest.TestSuite()
# Create new instance to call the setup and the teardown only
# once.
ins = type(instance)(instance._testMethodName)
suite.addTest(
unittest.FunctionTestCase(
lambda: f_wrap(ins, args, kwargs),
setUp=ins.setUp,
tearDown=ins.tearDown))
result = QuietTestRunner().run(suite)
if len(result.skipped) == 1:
# "Skipped" is a special case of "Successful".
# When the test has been skipped, immediately quit the
# test regardless of `times` and `min_success` by raising
# SkipTest exception using the original reason.
instance.skipTest(result.skipped[0][1])
elif result.wasSuccessful():
success_counter += 1
else:
results.append(result)
failure_counter += 1
if success_counter >= min_success:
instance.assertTrue(True)
return
if failure_counter > times - min_success:
fail()
return
fail()
return wrapper
return _repeat_with_success_at_least
def repeat(times):
"""Decorator that imposes the test to be successful in a row.
Decorated test case is launched multiple times.
The case is regarded as passed only if it is successful
specified times in a row.
.. note::
In current implementation, this decorator grasps the
failure information of each trial.
Args:
times(int): The number of trials.
"""
return repeat_with_success_at_least(times, times)
def retry(times):
"""Decorator that imposes the test to be successful at least once.
Decorated test case is launched multiple times.
The case is regarded as passed if it is successful
at least once.
.. note::
In current implementation, this decorator grasps the
failure information of each trial.
Args:
times(int): The number of trials.
"""
return repeat_with_success_at_least(times, 1)
| 4,230
| 31.05303
| 77
|
py
|
chainer
|
chainer-master/chainer/testing/array.py
|
import numpy
import six
import chainer
from chainer import backend
from chainer import utils
import chainerx
def assert_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True):
"""Asserts if some corresponding element of x and y differs too much.
This function can handle both CPU and GPU arrays simultaneously.
Args:
x: Left-hand-side array.
y: Right-hand-side array.
atol (float): Absolute tolerance.
rtol (float): Relative tolerance.
verbose (bool): If ``True``, it outputs verbose messages on error.
"""
x = backend.CpuDevice().send(utils.force_array(x))
y = backend.CpuDevice().send(utils.force_array(y))
try:
numpy.testing.assert_allclose(
x, y, atol=atol, rtol=rtol, verbose=verbose)
except AssertionError as e:
f = six.StringIO()
f.write(str(e) + '\n\n')
f.write(
'assert_allclose failed:\n' +
' shape: {} {}\n'.format(x.shape, y.shape) +
' dtype: {} {}\n'.format(x.dtype, y.dtype))
if x.shape == y.shape:
xx = numpy.atleast_1d(x)
yy = numpy.atleast_1d(y)
err = numpy.abs(xx - yy)
tol_rtol = rtol * numpy.abs(yy).astype(numpy.float64)
tol_err = atol + tol_rtol
i = numpy.unravel_index(
numpy.argmax(err.astype(numpy.float64) - tol_err), err.shape)
if yy[i] == 0:
rel_err = 'inf'
else:
rel_err = err[i] / numpy.abs(yy[i])
f.write(
' i: {}\n'.format(i) +
' x[i]: {}\n'.format(xx[i]) +
' y[i]: {}\n'.format(yy[i]) +
' relative error[i]: {}\n'.format(rel_err) +
' absolute error[i]: {}\n'.format(err[i]) +
' relative tolerance * |y[i]|: {}\n'.format(tol_rtol[i]) +
' absolute tolerance: {}\n'.format(atol) +
' total tolerance: {}\n'.format(tol_err[i]))
opts = numpy.get_printoptions()
try:
numpy.set_printoptions(threshold=10000)
f.write('x: ' + numpy.array2string(x, prefix='x: ') + '\n')
f.write('y: ' + numpy.array2string(y, prefix='y: ') + '\n')
finally:
numpy.set_printoptions(**opts)
raise AssertionError(f.getvalue())
def _as_noncontiguous_array(array):
# This is a temporary function used by tests to convert contiguous arrays
# to non-contiguous arrays.
#
# This functions can be removed if e.g. BackendConfig starts supporting
# contiguousness configurations and the array conversion method takes that
# into account. Note that that would also mean rewriting tests to use the
# backend injector in the first place.
def as_noncontiguous_array(a):
if a is None:
return None
if a.size <= 1:
return a
device = backend.get_device_from_array(a)
xp = device.xp
slices = (slice(None, None, -2),) * a.ndim
with chainer.using_device(device):
ret = xp.empty(tuple([s * 2 for s in a.shape]), dtype=a.dtype)
ret[slices] = a
ret = ret[slices]
if device.xp is chainerx:
assert not ret.is_contiguous
else:
assert not ret.flags.c_contiguous
return ret
if isinstance(array, (list, tuple)):
return type(array)([_as_noncontiguous_array(arr) for arr in array])
else:
return as_noncontiguous_array(array)
| 3,556
| 32.87619
| 78
|
py
|
chainer
|
chainer-master/chainer/testing/unary_math_function_test.py
|
import unittest
import warnings
import numpy
from chainer.backends import cuda
from chainer import function
from chainer import functions
from chainer import variable
try:
from chainer.testing import attr
_error = attr.get_error()
except ImportError as e:
_error = e
def is_available():
return _error is None
def check_available():
if _error is not None:
raise RuntimeError('''\
{} is not available.
Reason: {}: {}'''.format(__name__, type(_error).__name__, _error))
def _func_name(func):
if isinstance(func, function.Function):
return func.__class__.__name__.lower()
else:
return func.__name__
def _func_class(func):
if isinstance(func, function.Function):
return func.__class__
else:
name = func.__name__.capitalize()
return getattr(functions, name, None)
def _make_data_default(shape, dtype):
x = numpy.random.uniform(-1, 1, shape).astype(dtype, copy=False)
gy = numpy.random.uniform(-1, 1, shape).astype(dtype, copy=False)
ggx = numpy.random.uniform(-1, 1, shape).astype(dtype, copy=False)
return x, gy, ggx
def _nonlinear(func):
def aux(x):
y = func(x)
return y * y
return aux
def unary_math_function_unittest(func, func_expected=None, label_expected=None,
make_data=None, is_linear=None,
forward_options=None,
backward_options=None,
double_backward_options=None):
"""Decorator for testing unary mathematical Chainer functions.
This decorator makes test classes test unary mathematical Chainer
functions. Tested are forward and backward, including double backward,
computations on CPU and GPU across parameterized ``shape`` and ``dtype``.
Args:
func(function or ~chainer.Function): Chainer function to be tested by
the decorated test class. Taking :class:`~chainer.Function` is for
backward compatibility.
func_expected: Function used to provide expected values for
testing forward computation. If not given, a corresponsing numpy
function for ``func`` is implicitly picked up by its name.
label_expected(string): String used to test labels of Chainer
functions. If not given, the name of ``func`` is implicitly used.
make_data: Function to customize input and gradient data used
in the tests. It takes ``shape`` and ``dtype`` as its arguments,
and returns a tuple of input, gradient and double gradient data. By
default, uniform destribution ranged ``[-1, 1]`` is used for all of
them.
is_linear: Tells the decorator that ``func`` is a linear function
so that it wraps ``func`` as a non-linear function to perform
double backward test. This argument is left for backward
compatibility. Linear functions can be tested by default without
specifying ``is_linear`` in Chainer v5 or later.
forward_options(dict): Options to be specified as an argument of
:func:`chainer.testing.assert_allclose` function.
If not given, preset tolerance values are automatically selected.
backward_options(dict): Options to be specified as an argument of
:func:`chainer.gradient_check.check_backward` function.
If not given, preset tolerance values are automatically selected
depending on ``dtype``.
double_backward_options(dict): Options to be specified as an argument
of :func:`chainer.gradient_check.check_double_backward` function.
If not given, preset tolerance values are automatically selected
depending on ``dtype``.
The decorated test class tests forward, backward and double backward
computations on CPU and GPU across the following
:func:`~chainer.testing.parameterize` ed parameters:
- shape: rank of zero, and rank of more than zero
- dtype: ``numpy.float16``, ``numpy.float32`` and ``numpy.float64``
Additionally, it tests the label of the Chainer function.
Chainer functions tested by the test class decorated with the decorator
should have the following properties:
- Unary, taking one parameter and returning one value
- ``dtype`` of input and output are the same
- Elementwise operation for the supplied ndarray
.. admonition:: Example
The following code defines a test class that tests
:func:`~chainer.functions.sin` Chainer function, which takes a parameter
with ``dtype`` of float and returns a value with the same ``dtype``.
.. doctest::
>>> import unittest
>>> from chainer import testing
>>> from chainer import functions as F
>>>
>>> @testing.unary_math_function_unittest(F.sin)
... class TestSin(unittest.TestCase):
... pass
Because the test methods are implicitly injected to ``TestSin`` class by
the decorator, it is enough to place ``pass`` in the class definition.
To customize test data, ``make_data`` optional parameter can be used.
The following is an example of testing ``sqrt`` Chainer function, which
is tested in positive value domain here instead of the default input.
.. doctest::
>>> import numpy
>>>
>>> def make_data(shape, dtype):
... x = numpy.random.uniform(0.1, 1, shape).astype(dtype)
... gy = numpy.random.uniform(-1, 1, shape).astype(dtype)
... ggx = numpy.random.uniform(-1, 1, shape).astype(dtype)
... return x, gy, ggx
...
>>> @testing.unary_math_function_unittest(F.sqrt,
... make_data=make_data)
... class TestSqrt(unittest.TestCase):
... pass
``make_data`` function which returns input, gradient and double gradient
data generated in proper value domains with given ``shape`` and
``dtype`` parameters is defined, then passed to the decorator's
``make_data`` parameter.
"""
check_available()
# TODO(takagi) In the future, the Chainer functions that could be tested
# with the decorator would be extended as:
#
# - Multiple input parameters
# - Multiple output values
# - Other types than float: integer
# - Other operators other than analytic math: basic math
# Import here to avoid mutual import.
from chainer import gradient_check
from chainer import testing
is_new_style = not isinstance(func, function.Function)
func_name = _func_name(func)
func_class = _func_class(func)
if func_expected is None:
try:
func_expected = getattr(numpy, func_name)
except AttributeError:
raise ValueError('NumPy has no functions corresponding '
'to Chainer function \'{}\'.'.format(func_name))
if label_expected is None:
label_expected = func_name
elif func_class is None:
raise ValueError('Expected label is given even though Chainer '
'function does not have its label.')
if make_data is None:
if is_new_style:
make_data = _make_data_default
else:
def aux(shape, dtype):
return _make_data_default(shape, dtype)[0:2]
make_data = aux
if is_linear is not None:
warnings.warn('is_linear option is deprecated', DeprecationWarning)
def f(klass):
assert issubclass(klass, unittest.TestCase)
def setUp(self):
if is_new_style:
self.x, self.gy, self.ggx = make_data(self.shape, self.dtype)
else:
self.x, self.gy = make_data(self.shape, self.dtype)
if self.dtype == numpy.float16:
self.forward_options = {
'atol': numpy.finfo('float16').eps, # = 0.000977
'rtol': numpy.finfo('float16').eps, # = 0.000977
}
self.backward_options = {
'eps': 2 ** -4, 'atol': 2 ** -4, 'rtol': 2 ** -4,
'dtype': numpy.float64}
self.double_backward_options = {
'eps': 2 ** -4, 'atol': 2 ** -4, 'rtol': 2 ** -4,
'dtype': numpy.float64}
else:
self.forward_options = {'atol': 1e-4, 'rtol': 1e-4}
self.backward_options = {
'dtype': numpy.float64, 'atol': 1e-4, 'rtol': 1e-4}
self.double_backward_options = {
'dtype': numpy.float64, 'atol': 1e-4, 'rtol': 1e-4}
if forward_options is not None:
self.forward_options.update(forward_options)
if backward_options is not None:
self.backward_options.update(backward_options)
if double_backward_options is not None:
self.double_backward_options.update(double_backward_options)
setattr(klass, 'setUp', setUp)
def check_forward(self, x_data):
x = variable.Variable(x_data)
y = func(x)
self.assertEqual(y.data.dtype, x_data.dtype)
y_expected = func_expected(cuda.to_cpu(x_data), dtype=x_data.dtype)
testing.assert_allclose(y_expected, y.data, **self.forward_options)
setattr(klass, 'check_forward', check_forward)
def test_forward_cpu(self):
self.check_forward(self.x)
setattr(klass, 'test_forward_cpu', test_forward_cpu)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
setattr(klass, 'test_forward_gpu', test_forward_gpu)
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
func, x_data, y_grad, **self.backward_options)
setattr(klass, 'check_backward', check_backward)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
setattr(klass, 'test_backward_cpu', test_backward_cpu)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
setattr(klass, 'test_backward_gpu', test_backward_gpu)
if is_new_style:
def check_double_backward(self, x_data, y_grad, x_grad_grad):
func1 = _nonlinear(func) if is_linear else func
gradient_check.check_double_backward(
func1, x_data, y_grad,
x_grad_grad, **self.double_backward_options)
setattr(klass, 'check_double_backward', check_double_backward)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)
setattr(klass, 'test_double_backward_cpu',
test_double_backward_cpu)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx))
setattr(klass, 'test_double_backward_gpu',
test_double_backward_gpu)
if func_class is not None:
def test_label(self):
self.assertEqual(func_class().label, label_expected)
setattr(klass, 'test_label', test_label)
# Return parameterized class.
return testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))(klass)
return f
| 11,829
| 38.302326
| 79
|
py
|
chainer
|
chainer-master/chainer/testing/training.py
|
from __future__ import division
from chainer import training
try:
import mock
_error = None
except ImportError as e:
_error = e
def is_available():
return _error is None
def check_available():
if _error is not None:
raise RuntimeError('''\
{} is not available.
Reason: {}: {}'''.format(__name__, type(_error).__name__, _error))
def get_error():
return _error
def get_trainer_with_mock_updater(
stop_trigger=(10, 'iteration'), iter_per_epoch=10, extensions=None):
"""Returns a :class:`~chainer.training.Trainer` object with mock updater.
The returned trainer can be used for testing the trainer itself and the
extensions. A mock object is used as its updater. The update function set
to the mock correctly increments the iteration counts (
``updater.iteration``), and thus you can write a test relying on it.
Args:
stop_trigger: Stop trigger of the trainer.
iter_per_epoch: The number of iterations per epoch.
extensions: Extensions registered to the trainer.
Returns:
Trainer object with a mock updater.
"""
if extensions is None:
extensions = []
check_available()
updater = mock.Mock()
updater.get_all_optimizers.return_value = {}
updater.iteration = 0
updater.epoch = 0
updater.epoch_detail = 0
updater.is_new_epoch = True
updater.previous_epoch_detail = None
def update():
updater.update_core()
updater.iteration += 1
updater.epoch = updater.iteration // iter_per_epoch
updater.epoch_detail = updater.iteration / iter_per_epoch
updater.is_new_epoch = (updater.iteration - 1) // \
iter_per_epoch != updater.epoch
updater.previous_epoch_detail = (updater.iteration - 1) \
/ iter_per_epoch
updater.update = update
trainer = training.Trainer(updater, stop_trigger, extensions=extensions)
return trainer
| 1,956
| 26.56338
| 77
|
py
|
chainer
|
chainer-master/chainer/testing/helper.py
|
import contextlib
import sys
import unittest
import warnings
import pkg_resources
try:
import mock
_mock_error = None
except ImportError as e:
_mock_error = e
def _check_mock_available():
if _mock_error is not None:
raise RuntimeError(
'mock is not available: Reason: {}'.format(_mock_error))
def with_requires(*requirements):
"""Run a test case only when given requirements are satisfied.
.. admonition:: Example
This test case runs only when `numpy>=1.10` is installed.
>>> import unittest
>>> from chainer import testing
>>> class Test(unittest.TestCase):
... @testing.with_requires('numpy>=1.10')
... def test_for_numpy_1_10(self):
... pass
Args:
requirements: A list of string representing requirement condition to
run a given test case.
"""
ws = pkg_resources.WorkingSet()
try:
ws.require(*requirements)
skip = False
except pkg_resources.ResolutionError:
skip = True
msg = 'requires: {}'.format(','.join(requirements))
return unittest.skipIf(skip, msg)
def without_requires(*requirements):
"""Run a test case only when given requirements are not satisfied.
.. admonition:: Example
This test case runs only when `numpy>=1.10` is not installed.
>>> from chainer import testing
... class Test(unittest.TestCase):
... @testing.without_requires('numpy>=1.10')
... def test_without_numpy_1_10(self):
... pass
Args:
requirements: A list of string representing requirement condition to
run a given test case.
"""
ws = pkg_resources.WorkingSet()
try:
ws.require(*requirements)
skip = True
except pkg_resources.ResolutionError:
skip = False
msg = 'requires: {}'.format(','.join(requirements))
return unittest.skipIf(skip, msg)
@contextlib.contextmanager
def assert_warns(expected):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
yield
# Python 2 does not raise warnings multiple times from the same stack
# frame.
if sys.version_info >= (3, 0):
if not any(isinstance(m.message, expected) for m in w):
try:
exc_name = expected.__name__
except AttributeError:
exc_name = str(expected)
raise AssertionError('%s not triggerred' % exc_name)
def _import_object_from_name(fullname):
comps = fullname.split('.')
obj = sys.modules.get(comps[0])
if obj is None:
raise RuntimeError('Can\'t import {}'.format(comps[0]))
for i, comp in enumerate(comps[1:]):
obj = getattr(obj, comp)
if obj is None:
raise RuntimeError(
'Can\'t find object {}'.format('.'.join(comps[:i + 1])))
return obj
def patch(target, *args, **kwargs):
"""A wrapper of mock.patch which appends wraps argument.
.. note::
Unbound methods are not supported as ``wraps`` argument.
Args:
target(str): Full name of target object.
wraps: Wrapping object which will be passed to ``mock.patch`` as
``wraps`` argument.
If omitted, the object specified by ``target`` is used.
*args: Passed to ``mock.patch``.
**kwargs: Passed to ``mock.patch``.
"""
_check_mock_available()
try:
wraps = kwargs.pop('wraps')
except KeyError:
wraps = _import_object_from_name(target)
return mock.patch(target, *args, wraps=wraps, **kwargs)
| 3,610
| 26.356061
| 76
|
py
|
chainer
|
chainer-master/chainer/testing/serializer.py
|
import os
from chainer import serializers
from chainer import utils
def save_and_load(src, dst, filename, saver, loader):
"""Saves ``src`` and loads it to ``dst`` using a de/serializer.
This function simply runs a serialization and deserialization to check if
the serialization code is correctly implemented. The save and load are
done within a temporary directory.
Args:
src: An object to save from.
dst: An object to load into.
filename (str): File name used during the save/load.
saver (callable): Function that saves the source object.
loader (callable): Function that loads the file into the destination
object.
"""
with utils.tempdir() as tempdir:
path = os.path.join(tempdir, filename)
saver(path, src)
loader(path, dst)
def save_and_load_npz(src, dst):
"""Saves ``src`` to an NPZ file and loads it to ``dst``.
This is a short cut of :func:`save_and_load` using NPZ de/serializers.
Args:
src: An object to save.
dst: An object to load to.
"""
save_and_load(src, dst, 'tmp.npz',
serializers.save_npz, serializers.load_npz)
def save_and_load_hdf5(src, dst):
"""Saves ``src`` to an HDF5 file and loads it to ``dst``.
This is a short cut of :func:`save_and_load` using HDF5 de/serializers.
Args:
src: An object to save.
dst: An object to load to.
"""
save_and_load(src, dst, 'tmp.h5',
serializers.save_hdf5, serializers.load_hdf5)
| 1,562
| 27.418182
| 77
|
py
|
chainer
|
chainer-master/chainer/testing/distribution_test.py
|
import unittest
import chainer.types
try:
import pytest # NOQA
_error = None
except ImportError as e:
_error = e
if _error is None:
from chainer.testing._distribution_test import distribution_unittest
elif not chainer.types.TYPE_CHECKING:
class distribution_unittest(unittest.TestCase):
def test_dummy(self):
raise RuntimeError('''\
{} is not available.
Reason: {}: {}'''.format(__name__, type(_error).__name__, _error))
| 468
| 20.318182
| 72
|
py
|
chainer
|
chainer-master/chainer/testing/random.py
|
from __future__ import absolute_import
import atexit
import functools
import os
import random
import types
import numpy
from chainer.backends import cuda
from chainer.testing import _bundle
_old_python_random_state = None
_old_numpy_random_state = None
def _numpy_do_setup(deterministic=True):
global _old_python_random_state
global _old_numpy_random_state
_old_python_random_state = random.getstate()
_old_numpy_random_state = numpy.random.get_state()
if not deterministic:
numpy.random.seed()
else:
numpy.random.seed(100)
def _numpy_do_teardown():
global _old_python_random_state
global _old_numpy_random_state
random.setstate(_old_python_random_state)
numpy.random.set_state(_old_numpy_random_state)
_old_python_random_state = None
_old_numpy_random_state = None
def _cupy_testing_random():
testing = cuda.cupy.testing
if hasattr(testing, 'random'):
return testing.random
return testing._random
def do_setup(deterministic=True):
if cuda.available:
_cupy_testing_random().do_setup(deterministic)
else:
_numpy_do_setup(deterministic)
def do_teardown():
if cuda.available:
_cupy_testing_random().do_teardown()
else:
_numpy_do_teardown()
# In some tests (which utilize condition.repeat or condition.retry),
# setUp/tearDown is nested. _setup_random() and _teardown_random() do their
# work only in the outermost setUp/tearDown pair.
_nest_count = 0
@atexit.register
def _check_teardown():
assert _nest_count == 0, ('_setup_random() and _teardown_random() '
'must be called in pairs.')
def _setup_random():
"""Sets up the deterministic random states of ``numpy`` and ``cupy``.
"""
global _nest_count
if _nest_count == 0:
nondeterministic = bool(int(os.environ.get(
'CHAINER_TEST_RANDOM_NONDETERMINISTIC', '0')))
do_setup(not nondeterministic)
_nest_count += 1
def _teardown_random():
"""Tears down the deterministic random states set up by ``_setup_random``.
"""
global _nest_count
assert _nest_count > 0, '_setup_random has not been called'
_nest_count -= 1
if _nest_count == 0:
do_teardown()
def generate_seed():
assert _nest_count > 0, 'random is not set up'
return numpy.random.randint(0xffffffff)
def _fix_random(setup_method_name, teardown_method_name):
# TODO(niboshi): Prevent this decorator from being applied within
# condition.repeat or condition.retry decorators. That would repeat
# tests with the same random seeds. It's okay to apply this outside
# these decorators.
def decorator(impl):
if (isinstance(impl, types.FunctionType) and
impl.__name__.startswith('test_')):
# Applied to test method
@functools.wraps(impl)
def test_func(self, *args, **kw):
_setup_random()
try:
impl(self, *args, **kw)
finally:
_teardown_random()
return test_func
if isinstance(impl, _bundle._ParameterizedTestCaseBundle):
cases = impl
else:
tup = _bundle._TestCaseTuple(impl, None, None)
cases = _bundle._ParameterizedTestCaseBundle([tup])
for klass, _, _ in cases.cases:
# Applied to test case class
def make_methods():
# make_methods is required to bind the variables prev_setup and
# prev_teardown.
prev_setup = getattr(klass, setup_method_name)
prev_teardown = getattr(klass, teardown_method_name)
@functools.wraps(prev_setup)
def new_setup(self):
_setup_random()
prev_setup(self)
@functools.wraps(prev_teardown)
def new_teardown(self):
try:
prev_teardown(self)
finally:
_teardown_random()
return new_setup, new_teardown
setup, teardown = make_methods()
setattr(klass, setup_method_name, setup)
setattr(klass, teardown_method_name, teardown)
return cases
return decorator
def fix_random(*, setup_method='setUp', teardown_method='tearDown'):
"""Decorator that fixes random numbers in a test.
This decorator can be applied to either a test case class or a test method.
It should not be applied within ``condition.retry`` or
``condition.repeat``.
"""
return _fix_random(setup_method, teardown_method)
| 4,706
| 27.527273
| 79
|
py
|
chainer
|
chainer-master/chainer/testing/backend.py
|
import functools
import chainer
from chainer import backend
from chainer.testing import _bundle
from chainer.testing import attr
import chainerx
# TODO(hvy): BackendConfig.__enter__ does not have to modify the current
# device. Change it so that it does not.
class BackendConfig(object):
_props = [
# ChainerX
('use_chainerx', False),
('chainerx_device', None),
# CuPy
('use_cuda', False),
('cuda_device', None), # 0 by default, if use_cuda=True
('use_cudnn', 'never'),
('cudnn_deterministic', False),
('autotune', False),
('cudnn_fast_batch_normalization', False),
# Intel64
('use_ideep', 'never'),
]
_device = None
def __init__(self, params):
if not isinstance(params, dict):
raise TypeError('params must be a dict.')
self._contexts = []
# Default values
for k, v in self._props:
setattr(self, k, v)
# Specified values
for k, v in params.items():
if not hasattr(self, k):
raise ValueError('Parameter {} is not defined'.format(k))
setattr(self, k, v)
self._check_params()
self._adjust_params()
def _check_params(self):
# Checks consistency of parameters
if self.use_chainerx:
assert isinstance(self.chainerx_device, str), (
'\'chainerx_device\' parameter is expected to be a string '
'representing a ChainerX device specifier')
def _adjust_params(self):
# Adjusts parameters, e.g. fill the default values
if self.use_cuda:
if self.cuda_device is None:
self.cuda_device = 0
@property
def xp(self):
return self.device.xp
@property
def device(self):
if self._device is None:
if self.use_cuda:
device = backend.GpuDevice.from_device_id(self.cuda_device)
elif self.use_chainerx:
device = backend.ChainerxDevice(
chainerx.get_device(self.chainerx_device))
elif self.use_ideep != 'never':
device = backend.Intel64Device()
else:
device = backend.CpuDevice()
self._device = device
return self._device
def __enter__(self):
contexts = [
chainer.using_config(
'use_cudnn', self.use_cudnn),
chainer.using_config(
'cudnn_deterministic', self.cudnn_deterministic),
chainer.using_config(
'autotune', self.autotune),
chainer.using_config(
'use_ideep', self.use_ideep),
chainer.using_device(self.device),
]
for c in contexts:
c.__enter__()
self._contexts.append(contexts)
return self
def __exit__(self, typ, value, traceback):
contexts = self._contexts.pop()
for c in reversed(contexts):
c.__exit__(typ, value, traceback)
def __repr__(self):
lst = []
for k, _ in self._props:
lst.append('{}={!r}'.format(k, getattr(self, k)))
return '<BackendConfig {}>'.format(' '.join(lst))
def get_func_str(self):
"""Returns a string that can be used in method name"""
lst = []
for k, _ in self._props:
val = getattr(self, k)
if val is True:
val = 'true'
elif val is False:
val = 'false'
else:
val = str(val)
lst.append('{}_{}'.format(k, val))
return '__'.join(lst)
def get_pytest_marks(self):
marks = []
if self.use_chainerx:
marks.append(attr.chainerx)
backend_name, device_index = self.chainerx_device.split(':')
device_index = int(device_index)
if backend_name == 'cuda':
marks.append(attr.gpu)
if device_index >= 1:
marks.append(attr.multi_gpu(device_index + 1))
elif self.use_cuda:
marks.append(attr.gpu)
if self.use_cudnn != 'never':
marks.append(attr.cudnn)
if self.cuda_device >= 1:
marks.append(attr.multi_gpu(self.cuda_device + 1))
else:
if self.use_ideep != 'never':
marks.append(attr.ideep)
assert all(callable(_) for _ in marks)
return marks
def get_array(self, np_array):
return self.device.send(np_array)
def _test_case_generator(base, method_names, params):
# Defines the logic to generate test case classes parameterized with
# backends.
if method_names is not None:
def method_generator(base_method):
if base_method.__name__ in method_names:
return None
return base_method
yield (base.__name__, {}, method_generator)
for i_param, param in enumerate(params):
backend_config = BackendConfig(param)
marks = backend_config.get_pytest_marks()
cls_name = '{}_{}'.format(base.__name__, backend_config.get_func_str())
def method_generator(base_method):
# Generates a wrapped test method
if (method_names is not None
and base_method.__name__ not in method_names):
return None
# Bind to a new variable.
backend_config2 = backend_config
@functools.wraps(base_method)
def new_method(self, *args, **kwargs):
return base_method(self, backend_config2, *args, **kwargs)
# Apply test marks
for mark in marks:
new_method = mark(new_method)
return new_method
yield (cls_name, {}, method_generator)
def inject_backend_tests(method_names, params):
if not (method_names is None or isinstance(method_names, list)):
raise TypeError('method_names must be either None or a list.')
if not isinstance(params, list):
raise TypeError('params must be a list of dicts.')
if not all(isinstance(d, dict) for d in params):
raise TypeError('params must be a list of dicts.')
return _bundle.make_decorator(
lambda base: _test_case_generator(base, method_names, params))
| 6,385
| 30.93
| 79
|
py
|
chainer
|
chainer-master/chainer/testing/__init__.py
|
from chainer.testing.array import assert_allclose # NOQA
from chainer.testing.backend import BackendConfig # NOQA
from chainer.testing.backend import inject_backend_tests # NOQA
from chainer.testing.distribution_test import distribution_unittest # NOQA
from chainer.testing.function_link import FunctionTestCase # NOQA
from chainer.testing.function_link import FunctionTestError # NOQA
from chainer.testing.function_link import InitializerArgument # NOQA
from chainer.testing.function_link import LinkInitializersTestCase # NOQA
from chainer.testing.function_link import LinkTestCase # NOQA
from chainer.testing.function_link import LinkTestError # NOQA
from chainer.testing.helper import assert_warns # NOQA
from chainer.testing.helper import patch # NOQA
from chainer.testing.helper import with_requires # NOQA
from chainer.testing.helper import without_requires # NOQA
from chainer.testing.matrix import generate_matrix # NOQA
from chainer.testing.parameterized import from_pytest_parameterize # NOQA
from chainer.testing.parameterized import parameterize # NOQA
from chainer.testing.parameterized import parameterize_pytest # NOQA
from chainer.testing.parameterized import product # NOQA
from chainer.testing.parameterized import product_dict # NOQA
from chainer.testing.random import fix_random # NOQA
from chainer.testing.random import generate_seed # NOQA
from chainer.testing.serializer import save_and_load # NOQA
from chainer.testing.serializer import save_and_load_hdf5 # NOQA
from chainer.testing.serializer import save_and_load_npz # NOQA
from chainer.testing.training import get_trainer_with_mock_updater # NOQA
from chainer.testing.unary_math_function_test import unary_math_function_unittest # NOQA
def run_module(name, file):
"""Run current test cases of the file.
Args:
name: __name__ attribute of the file.
file: __file__ attribute of the file.
"""
if name == '__main__':
import pytest
pytest.main([file, '-vvs', '-x', '--pdb'])
| 2,027
| 48.463415
| 89
|
py
|
chainer
|
chainer-master/chainer/testing/_distribution_test.py
|
import functools
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer.testing import array
from chainer.testing import attr
from chainer import utils
def skip_not_in_test_target(test_target):
def decorator(f):
@functools.wraps(f)
def new_f(self, *args, **kwargs):
if test_target not in self.test_targets:
self.skipTest(
'\'%s\' does not exist in test_targets.' % test_target)
else:
f(self, *args, **kwargs)
return new_f
return decorator
class distribution_unittest(unittest.TestCase):
scipy_onebyone = False
def setUp(self):
self.support = 'real'
if not hasattr(self, 'event_shape'):
self.event_shape = ()
self.continuous = True
self.test_targets = set()
self.options = {}
self.setUp_configure()
targets_not_found = self.test_targets - {
'batch_shape', 'cdf', 'entropy', 'event_shape', 'icdf', 'log_cdf',
'log_prob', 'log_survival', 'mean', 'prob', 'sample', 'stddev',
'support', 'survival', 'variance'}
if targets_not_found:
raise ValueError(
'invalid target(s): {}'.format(targets_not_found))
if self.is_variable:
self.params = {k: chainer.Variable(v)
for k, v in self.params.items()}
def scipy_onebyone_params_iter(self):
for index in numpy.ndindex(self.shape):
yield {k: v[index] for k, v in self.scipy_params.items()}
@property
def cpu_dist(self):
params = self.params
params.update(self.options)
return self.dist(**params)
@property
def gpu_dist(self):
if self.is_variable:
gpu_params = {k: cuda.to_gpu(v.data)
for k, v in self.params.items()}
gpu_params = {k: chainer.Variable(v)
for k, v in gpu_params.items()}
else:
gpu_params = {k: cuda.to_gpu(v)
for k, v in self.params.items()}
gpu_params.update(self.options)
return self.dist(**gpu_params)
@skip_not_in_test_target('batch_shape')
def test_batch_shape_cpu(self):
self.assertEqual(self.cpu_dist.batch_shape, self.shape)
@attr.gpu
@skip_not_in_test_target('batch_shape')
def test_batch_shape_gpu(self):
self.assertEqual(self.gpu_dist.batch_shape, self.shape)
def check_cdf(self, is_gpu):
smp = self.sample_for_test()
if is_gpu:
cdf1 = self.gpu_dist.cdf(cuda.to_gpu(smp)).data
else:
cdf1 = self.cpu_dist.cdf(smp).data
cdf2 = self.scipy_dist.cdf(smp, **self.scipy_params)
array.assert_allclose(cdf1, cdf2)
@skip_not_in_test_target('cdf')
def test_cdf_cpu(self):
self.check_cdf(False)
@attr.gpu
@skip_not_in_test_target('cdf')
def test_cdf_gpu(self):
self.check_cdf(True)
def check_entropy(self, is_gpu):
if is_gpu:
ent1 = self.gpu_dist.entropy.data
else:
ent1 = self.cpu_dist.entropy.data
if self.scipy_onebyone:
ent2 = []
for one_params in self.scipy_onebyone_params_iter():
ent2.append(self.scipy_dist.entropy(**one_params))
ent2 = numpy.vstack(ent2).reshape(self.shape)
else:
ent2 = self.scipy_dist.entropy(**self.scipy_params)
array.assert_allclose(ent1, ent2)
@skip_not_in_test_target('entropy')
def test_entropy_cpu(self):
self.check_entropy(False)
@attr.gpu
@skip_not_in_test_target('entropy')
def test_entropy_gpu(self):
self.check_entropy(True)
@skip_not_in_test_target('event_shape')
def test_event_shape_cpu(self):
self.assertEqual(self.cpu_dist.event_shape, self.event_shape)
@attr.gpu
@skip_not_in_test_target('event_shape')
def test_event_shape_gpu(self):
self.assertEqual(self.gpu_dist.event_shape, self.event_shape)
def check_icdf(self, is_gpu):
smp = numpy.random.uniform(
1e-5, 1 - 1e-5, self.sample_shape + self.shape
).astype(numpy.float32)
if is_gpu:
icdf1 = self.gpu_dist.icdf(cuda.to_gpu(smp)).data
else:
icdf1 = self.cpu_dist.icdf(smp).data
icdf2 = self.scipy_dist.ppf(smp, **self.scipy_params)
array.assert_allclose(icdf1, icdf2)
@skip_not_in_test_target('icdf')
def test_icdf_cpu(self):
self.check_icdf(False)
@attr.gpu
@skip_not_in_test_target('icdf')
def test_icdf_gpu(self):
self.check_icdf(True)
def check_log_cdf(self, is_gpu):
smp = self.sample_for_test()
if is_gpu:
log_cdf1 = self.gpu_dist.log_cdf(cuda.to_gpu(smp)).data
else:
log_cdf1 = self.cpu_dist.log_cdf(smp).data
log_cdf2 = self.scipy_dist.logcdf(smp, **self.scipy_params)
array.assert_allclose(log_cdf1, log_cdf2)
@skip_not_in_test_target('log_cdf')
def test_log_cdf_cpu(self):
self.check_log_cdf(False)
@attr.gpu
@skip_not_in_test_target('log_cdf')
def test_log_cdf_gpu(self):
self.check_log_cdf(True)
def check_log_prob(self, is_gpu):
smp = self.sample_for_test()
if is_gpu:
log_prob1 = self.gpu_dist.log_prob(cuda.to_gpu(smp)).data
else:
log_prob1 = self.cpu_dist.log_prob(smp).data
if self.continuous:
scipy_prob = self.scipy_dist.logpdf
else:
scipy_prob = self.scipy_dist.logpmf
if self.scipy_onebyone:
onebyone_smp = smp.reshape(*[
utils.size_of_shape(sh)
for sh in [self.sample_shape, self.shape, self.event_shape]])
onebyone_smp = numpy.swapaxes(onebyone_smp, 0, 1)
onebyone_smp = onebyone_smp.reshape((-1,) + self.sample_shape
+ self.event_shape)
log_prob2 = []
for one_params, one_smp in zip(
self.scipy_onebyone_params_iter(), onebyone_smp):
log_prob2.append(scipy_prob(one_smp, **one_params))
log_prob2 = numpy.vstack(log_prob2)
log_prob2 = log_prob2.reshape(
utils.size_of_shape(self.shape), -1).T
log_prob2 = log_prob2.reshape(self.sample_shape + self.shape)
else:
log_prob2 = scipy_prob(smp, **self.scipy_params)
array.assert_allclose(log_prob1, log_prob2)
@skip_not_in_test_target('log_prob')
def test_log_prob_cpu(self):
self.check_log_prob(False)
@attr.gpu
@skip_not_in_test_target('log_prob')
def test_log_prob_gpu(self):
self.check_log_prob(True)
def check_log_survival(self, is_gpu):
smp = self.sample_for_test()
if is_gpu:
log_survival1 = \
self.gpu_dist.log_survival_function(cuda.to_gpu(smp)).data
else:
log_survival1 = self.cpu_dist.log_survival_function(smp).data
log_survival2 = self.scipy_dist.logsf(smp, **self.scipy_params)
array.assert_allclose(log_survival1, log_survival2)
@skip_not_in_test_target('log_survival')
def test_log_survival_cpu(self):
self.check_log_survival(False)
@attr.gpu
@skip_not_in_test_target('log_survival')
def test_log_survival_gpu(self):
self.check_log_survival(True)
def check_mean(self, is_gpu):
if is_gpu:
mean1 = self.gpu_dist.mean.data
else:
mean1 = self.cpu_dist.mean.data
if self.scipy_onebyone:
mean2 = []
for one_params in self.scipy_onebyone_params_iter():
mean2.append(self.scipy_dist.mean(**one_params))
mean2 = numpy.vstack(mean2).reshape(
self.shape + self.cpu_dist.event_shape)
else:
mean2 = self.scipy_dist.mean(**self.scipy_params)
array.assert_allclose(mean1, mean2)
@skip_not_in_test_target('mean')
def test_mean_cpu(self):
self.check_mean(False)
@attr.gpu
@skip_not_in_test_target('mean')
def test_mean_gpu(self):
self.check_mean(True)
def check_prob(self, is_gpu):
smp = self.sample_for_test()
if is_gpu:
prob1 = self.gpu_dist.prob(cuda.to_gpu(smp)).data
else:
prob1 = self.cpu_dist.prob(smp).data
if self.continuous:
prob2 = self.scipy_dist.pdf(smp, **self.scipy_params)
else:
prob2 = self.scipy_dist.pmf(smp, **self.scipy_params)
array.assert_allclose(prob1, prob2)
@skip_not_in_test_target('prob')
def test_prob_cpu(self):
self.check_prob(False)
@attr.gpu
@skip_not_in_test_target('prob')
def test_prob_gpu(self):
self.check_prob(True)
def check_sample(self, is_gpu):
if is_gpu:
smp1 = self.gpu_dist.sample(
sample_shape=(100000,)+self.sample_shape).data
else:
smp1 = self.cpu_dist.sample(
sample_shape=(100000,)+self.sample_shape).data
if self.scipy_onebyone:
smp2 = []
for one_params in self.scipy_onebyone_params_iter():
smp2.append(self.scipy_dist.rvs(
size=(100000,)+self.sample_shape, **one_params))
smp2 = numpy.vstack(smp2)
smp2 = smp2.reshape((utils.size_of_shape(self.shape), 100000)
+ self.sample_shape
+ self.cpu_dist.event_shape)
smp2 = numpy.rollaxis(
smp2, 0, smp2.ndim-len(self.cpu_dist.event_shape))
smp2 = smp2.reshape((100000,) + self.sample_shape + self.shape
+ self.cpu_dist.event_shape)
else:
smp2 = self.scipy_dist.rvs(
size=(100000,) + self.sample_shape + self.shape,
**self.scipy_params)
array.assert_allclose(smp1.mean(axis=0), smp2.mean(axis=0),
atol=3e-2, rtol=3e-2)
array.assert_allclose(smp1.std(axis=0), smp2.std(axis=0),
atol=3e-2, rtol=3e-2)
@skip_not_in_test_target('sample')
def test_sample_cpu(self):
self.check_sample(False)
@attr.gpu
@skip_not_in_test_target('sample')
def test_sample_gpu(self):
self.check_sample(True)
def check_stddev(self, is_gpu):
if is_gpu:
stddev1 = self.gpu_dist.stddev.data
else:
stddev1 = self.cpu_dist.stddev.data
stddev2 = self.scipy_dist.std(**self.scipy_params)
array.assert_allclose(stddev1, stddev2)
@skip_not_in_test_target('stddev')
def test_stddev_cpu(self):
self.check_stddev(False)
@attr.gpu
@skip_not_in_test_target('stddev')
def test_stddev_gpu(self):
self.check_stddev(True)
@skip_not_in_test_target('support')
def test_support_cpu(self):
self.assertEqual(self.cpu_dist.support, self.support)
@attr.gpu
@skip_not_in_test_target('support')
def test_support_gpu(self):
self.assertEqual(self.gpu_dist.support, self.support)
def check_survival(self, is_gpu):
smp = self.sample_for_test()
if is_gpu:
survival1 = self.gpu_dist.survival_function(
cuda.to_gpu(smp)).data
else:
survival1 = self.cpu_dist.survival_function(smp).data
survival2 = self.scipy_dist.sf(smp, **self.scipy_params)
array.assert_allclose(survival1, survival2)
@skip_not_in_test_target('survival')
def test_survival_cpu(self):
self.check_survival(False)
@attr.gpu
@skip_not_in_test_target('survival')
def test_survival_gpu(self):
self.check_survival(True)
def check_variance(self, is_gpu):
if is_gpu:
variance1 = self.gpu_dist.variance.data
else:
variance1 = self.cpu_dist.variance.data
if self.scipy_onebyone:
variance2 = []
for one_params in self.scipy_onebyone_params_iter():
variance2.append(self.scipy_dist.var(**one_params))
variance2 = numpy.vstack(variance2).reshape(
self.shape + self.cpu_dist.event_shape)
else:
variance2 = self.scipy_dist.var(**self.scipy_params)
array.assert_allclose(variance1, variance2)
@skip_not_in_test_target('variance')
def test_variance_cpu(self):
self.check_variance(False)
@attr.gpu
@skip_not_in_test_target('variance')
def test_variance_gpu(self):
self.check_variance(True)
| 12,806
| 32.264935
| 78
|
py
|
chainer
|
chainer-master/chainer/testing/parameterized.py
|
import functools
import itertools
import types
import typing as tp # NOQA
import unittest
import numpy
import six
from chainer.testing import _bundle
from chainer import utils
def _param_to_str(obj):
if isinstance(obj, type):
return obj.__name__
elif hasattr(obj, '__name__') and isinstance(obj.__name__, str):
# print __name__ attribute for classes, functions and modules
return obj.__name__
return repr(obj)
def _shorten(s, maxlen):
# Shortens the string down to maxlen, by replacing the middle part with
# a 3-dots string '...'.
ellipsis = '...'
if len(s) <= maxlen:
return s
n1 = (maxlen - len(ellipsis)) // 2
n2 = maxlen - len(ellipsis) - n1
s = s[:n1] + ellipsis + s[-n2:]
assert len(s) == maxlen
return s
def _make_class_name(base_class_name, i_param, param):
# Creates a class name for a single combination of parameters.
SINGLE_PARAM_MAXLEN = 100 # Length limit of a single parameter value
PARAMS_MAXLEN = 5000 # Length limit of the whole parameters part
param_strs = [
'{}={}'.format(k, _shorten(_param_to_str(v), SINGLE_PARAM_MAXLEN))
for k, v in sorted(param.items())]
param_strs = _shorten(', '.join(param_strs), PARAMS_MAXLEN)
cls_name = '{}_param_{}_{{{}}}'.format(
base_class_name, i_param, param_strs)
return cls_name
def _parameterize_test_case_generator(base, params):
# Defines the logic to generate parameterized test case classes.
for i, param in enumerate(params):
yield _parameterize_test_case(base, i, param)
def _parameterize_test_case(base, i, param):
cls_name = _make_class_name(base.__name__, i, param)
def __str__(self):
name = base.__str__(self)
return '%s parameter: %s' % (name, param)
mb = {'__str__': __str__}
for k, v in sorted(param.items()):
if isinstance(v, types.FunctionType):
def create_new_v():
f = v
def new_v(self, *args, **kwargs):
return f(*args, **kwargs)
return new_v
mb[k] = create_new_v()
else:
mb[k] = v
def method_generator(base_method):
# Generates a wrapped test method
@functools.wraps(base_method)
def new_method(self, *args, **kwargs):
try:
return base_method(self, *args, **kwargs)
except unittest.SkipTest:
raise
except Exception as e:
s = six.StringIO()
s.write('Parameterized test failed.\n\n')
s.write('Base test method: {}.{}\n'.format(
base.__name__, base_method.__name__))
s.write('Test parameters:\n')
for k, v in sorted(param.items()):
s.write(' {}: {}\n'.format(k, v))
err_class = e.__class__
if err_class.__name__ == 'OutOfMemoryError':
err_class = MemoryError
utils._raise_from(err_class, s.getvalue(), e)
return new_method
return (cls_name, mb, method_generator)
def parameterize(*params):
# TODO(niboshi): Add documentation
return _bundle.make_decorator(
lambda base: _parameterize_test_case_generator(base, params))
def _values_to_dicts(names, values):
assert isinstance(names, six.string_types)
assert isinstance(values, (tuple, list))
def safe_zip(ns, vs):
if len(ns) == 1:
return [(ns[0], vs)]
assert isinstance(vs, (tuple, list)) and len(ns) == len(vs)
return zip(ns, vs)
names = names.split(',')
params = [dict(safe_zip(names, value_list)) for value_list in values]
return params
def from_pytest_parameterize(names, values):
# Pytest-style parameterization.
# TODO(niboshi): Add documentation
return _values_to_dicts(names, values)
def parameterize_pytest(names, values):
# Pytest-style parameterization.
# TODO(niboshi): Add documentation
return parameterize(*from_pytest_parameterize(names, values))
def product(parameter):
# TODO(niboshi): Add documentation
if isinstance(parameter, dict):
return product_dict(*[
_values_to_dicts(names, values)
for names, values in sorted(parameter.items())])
elif isinstance(parameter, list):
# list of lists of dicts
if not all(isinstance(_, list) for _ in parameter):
raise TypeError('parameter must be list of lists of dicts')
if not all(isinstance(_, dict) for l in parameter for _ in l):
raise TypeError('parameter must be list of lists of dicts')
return product_dict(*parameter)
else:
raise TypeError(
'parameter must be either dict or list. Actual: {}'.format(
type(parameter)))
def product_dict(*parameters):
# TODO(niboshi): Add documentation
return [
{k: v for dic in dicts for k, v in six.iteritems(dic)}
for dicts in itertools.product(*parameters)]
# TODO(kataoka): product_dict is patched by tests/conftest.py while tests are
# collected if CHAINER_TEST_PAIRWISE_PARAMETERIZATION is configured
# accordingly. Also used in
# tests/chainer_tests/testing_tests/test_parameterized.py
_product_dict_orig = product_dict
def _pairwise_product_dict(*parameters):
if len(parameters) <= 2:
return _product_dict_orig(*parameters)
return list(_pairwise_product_dict_iter(*parameters))
def _pairwise_product_dict_iter(
*parameters: tp.Iterable[tp.Dict[str, tp.Any]]
) -> tp.Iterator[tp.Dict[str, tp.Any]]:
"""Generate combinations that cover all pairs.
The argument is the same as `chainer.testing.product_dict`.
"""
parameter_lists = [list(dicts) for dicts in parameters] # type: tp.List[tp.List[tp.Dict[str, tp.Any]]] # NOQA
for nd_index in sorted(_nd_indices_to_cover_each_2d(
[len(dicts) for dicts in parameter_lists])):
yield {
k: v
for i, dicts in zip(nd_index, parameter_lists)
for k, v in dicts[i].items()}
def _nd_indices_to_cover_each_2d(
shape: tp.Sequence[int]
) -> tp.Iterator[tp.Tuple[int, ...]]:
rs = numpy.random.RandomState(seed=0)
n = len(shape)
indices = [list(range(length)) for length in shape] # type: tp.List[tp.List[int]] # NOQA
# `(k_i, k_j) in uncovered[(i, j)]` iff it has not been yielded
# `nd_index` such that `(nd_index[i], nd_inde[j]) == (k_i, k_j)`.
uncovered = {} # type: tp.Dict[tp.Tuple[int, int], tp.Set[tp.Tuple[int, int]]] # NOQA
for i, j in itertools.combinations(range(n), 2):
uncovered[(i, j)] = set(itertools.product(indices[i], indices[j]))
nd_indices = list(itertools.product(*indices)) # type: tp.List[tp.Tuple[int, ...]] # NOQA
rs.shuffle(nd_indices)
for nd_index in nd_indices:
count = 0
for i, j in itertools.combinations(range(n), 2):
try:
uncovered[(i, j)].remove((nd_index[i], nd_index[j]))
except KeyError:
pass
else:
count += 1
if count > 0:
yield nd_index
| 7,227
| 31.558559
| 115
|
py
|
chainer
|
chainer-master/chainer/testing/matrix.py
|
import numpy
from chainer.utils import argument
def generate_matrix(shape, dtype=float, **kwargs):
r"""generate_matrix(shape, dtype=float, *, singular_values)
Generates a random matrix with given singular values.
This function generates a random NumPy matrix (or a stack of matrices) that
has specified singular values. It can be used to generate the inputs for a
test that can be instable when the input value behaves bad.
Notation: denote the shape of the generated array by :math:`(B..., M, N)`,
and :math:`K = min\{M, N\}`. :math:`B...` may be an empty sequence.
Args:
shape (tuple of int): Shape of the generated array, i.e.,
:math:`(B..., M, N)`.
dtype: Dtype of the generated array.
singular_values (array-like): Singular values of the generated
matrices. It must be broadcastable to shape :math:`(B..., K)`.
"""
singular_values, = argument.parse_kwargs(
kwargs, ('singular_values', None),
)
if len(shape) <= 1:
raise ValueError(
'shape {} is invalid for matrices: too few axes'.format(shape)
)
# TODO(beam2d): consider supporting integer/boolean matrices
dtype = numpy.dtype(dtype)
if dtype.kind not in 'fc':
raise TypeError('dtype {} is not supported'.format(dtype))
if singular_values is None:
raise TypeError('singular_values is not given')
singular_values = numpy.asarray(singular_values)
if not numpy.isrealobj(singular_values):
raise TypeError('singular_values is not real')
if (singular_values < 0).any():
raise ValueError('negative singular value is given')
if 0 in shape:
# NumPy<1.16 does not support zero-sized matrices in svd, so skip it.
# Try broadcast first to raise an error on shape mismatch.
_broadcast_to(singular_values, shape[:-2] + (min(shape[-2:]),))
return numpy.empty(shape, dtype=dtype)
# Generate random matrices with given singular values. We simply generate
# orthogonal vectors using SVD on random matrices and then combine them
# with the given singular values.
a = numpy.random.randn(*shape)
if dtype.kind == 'c':
a = a + 1j * numpy.random.randn(*shape)
u, s, vh = numpy.linalg.svd(a, full_matrices=False)
sv = _broadcast_to(singular_values, s.shape)
a = numpy.einsum('...ik,...k,...kj->...ij', u, sv, vh)
return a.astype(dtype)
def _broadcast_to(array, shape):
if hasattr(numpy, 'broadcast_to'):
return numpy.broadcast_to(array, shape)
# NumPy 1.9 does not support broadcast_to.
dummy = numpy.empty(shape, dtype=numpy.int8)
ret, _ = numpy.broadcast_arrays(array, dummy)
return ret
| 2,736
| 37.013889
| 79
|
py
|
chainer
|
chainer-master/chainer/testing/function_link.py
|
import contextlib
import typing as tp # NOQA
import unittest
import numpy
import six
import chainer
from chainer import backend
from chainer import initializers
from chainer.testing import array as array_module
from chainer import utils
class _TestError(AssertionError):
"""Parent class to Chainer test errors."""
@classmethod
def check(cls, expr, message):
if not expr:
raise cls(message)
@classmethod
def fail(cls, message, exc=None):
if exc is not None:
utils._raise_from(cls, message, exc)
raise cls(message)
@classmethod
@contextlib.contextmanager
def raise_if_fail(cls, message, error_types=AssertionError):
try:
yield
except error_types as e:
cls.fail(message, e)
class FunctionTestError(_TestError):
"""Raised when the target function is implemented incorrectly."""
pass
class LinkTestError(_TestError):
"""Raised when the target link is implemented incorrectly."""
pass
class InitializerArgument(object):
"""Class to hold a pair of initializer argument value and actual
initializer-like.
This class is meant to be included in the return value from
:meth:`chainer.testing.LinkTestCase.get_initializers` in
:class:`chainer.testing.LinkTestCase` if the argument and the actual
initializer in the link do not directly correspond.
In that case, the first element should correspond to the argument passed to
the constructor of the link, and the second element correspond to the
actual initializer-like object used by the link.
"""
def __init__(self, argument_value, expected_initializer):
if expected_initializer is None:
raise ValueError('Expected initialized cannot be None.')
initializers._check_is_initializer_like(expected_initializer)
self.argument_value = argument_value
self.expected_initializer = expected_initializer
class FunctionTestBase(object):
backend_config = None
check_forward_options = None
check_backward_options = None
check_double_backward_options = None
skip_forward_test = False
skip_backward_test = False
skip_double_backward_test = False
dodge_nondifferentiable = False
numerical_grad_dtype = numpy.float64
contiguous = None
def __init__(self, *args, **kwargs):
super(FunctionTestBase, self).__init__(*args, **kwargs)
self.check_forward_options = {}
self.check_backward_options = {}
self.check_double_backward_options = {}
def before_test(self, test_name):
pass
def forward(self, inputs, device):
raise NotImplementedError('forward() is not implemented.')
def forward_expected(self, inputs):
raise NotImplementedError('forward_expected() is not implemented.')
def generate_inputs(self):
raise NotImplementedError('generate_inputs() is not implemented.')
def generate_grad_outputs(self, outputs_template):
grad_outputs = tuple([
numpy.random.uniform(-1, 1, a.shape).astype(a.dtype)
for a in outputs_template])
return grad_outputs
def generate_grad_grad_inputs(self, inputs_template):
grad_grad_inputs = tuple([
numpy.random.uniform(-1, 1, a.shape).astype(a.dtype)
for a in inputs_template])
return grad_grad_inputs
def check_forward_outputs(self, outputs, expected_outputs):
assert isinstance(outputs, tuple)
assert isinstance(expected_outputs, tuple)
assert all(isinstance(a, chainer.get_array_types()) for a in outputs)
assert all(
isinstance(a, chainer.get_array_types()) for a in expected_outputs)
_check_arrays_equal(
outputs, expected_outputs, FunctionTestError,
**self.check_forward_options)
def _to_noncontiguous_as_needed(self, contig_arrays):
if self.contiguous is None:
# non-contiguous
return array_module._as_noncontiguous_array(contig_arrays)
if self.contiguous == 'C':
# C-contiguous
return contig_arrays
assert False, (
'Invalid value of `contiguous`: {}'.format(self.contiguous))
def _generate_inputs(self):
inputs = self.generate_inputs()
_check_array_types(inputs, backend.CpuDevice(), 'generate_inputs')
return inputs
def _generate_grad_outputs(self, outputs_template):
grad_outputs = self.generate_grad_outputs(outputs_template)
_check_array_types(
grad_outputs, backend.CpuDevice(), 'generate_grad_outputs')
return grad_outputs
def _generate_grad_grad_inputs(self, inputs_template):
grad_grad_inputs = self.generate_grad_grad_inputs(inputs_template)
_check_array_types(
grad_grad_inputs, backend.CpuDevice(), 'generate_grad_grad_inputs')
return grad_grad_inputs
def _forward_expected(self, inputs):
outputs = self.forward_expected(inputs)
_check_array_types(
outputs, backend.CpuDevice(), 'forward_expected')
return outputs
def _forward(self, inputs, backend_config):
assert all(isinstance(a, chainer.Variable) for a in inputs)
with backend_config:
outputs = self.forward(inputs, backend_config.device)
_check_variable_types(
outputs, backend_config.device, 'forward', FunctionTestError)
return outputs
def run_test_forward(self, backend_config):
# Runs the forward test.
if self.skip_forward_test:
raise unittest.SkipTest('skip_forward_test is set')
self.backend_config = backend_config
self.test_name = 'test_forward'
self.before_test(self.test_name)
cpu_inputs = self._generate_inputs()
cpu_inputs = self._to_noncontiguous_as_needed(cpu_inputs)
inputs_copied = [a.copy() for a in cpu_inputs]
# Compute expected outputs
cpu_expected = self._forward_expected(cpu_inputs)
# Compute actual outputs
inputs = backend_config.get_array(cpu_inputs)
inputs = self._to_noncontiguous_as_needed(inputs)
outputs = self._forward(
tuple([
chainer.Variable(a, requires_grad=a.dtype.kind == 'f')
for a in inputs]),
backend_config)
# Check inputs has not changed
indices = []
for i in range(len(inputs)):
try:
array_module.assert_allclose(
inputs_copied[i], inputs[i], atol=0, rtol=0)
except AssertionError:
indices.append(i)
if indices:
f = six.StringIO()
f.write(
'Input arrays have been modified during forward.\n'
'Indices of modified inputs: {}\n'
'Input array shapes and dtypes: {}\n'.format(
', '.join(str(i) for i in indices),
utils._format_array_props(inputs)))
for i in indices:
f.write('\n')
f.write('Input[{}]:\n'.format(i))
f.write('Original:\n')
f.write(str(inputs_copied[i]))
f.write('\n')
f.write('After forward:\n')
f.write(str(inputs[i]))
f.write('\n')
FunctionTestError.fail(f.getvalue())
self.check_forward_outputs(
tuple([var.array for var in outputs]),
cpu_expected)
def run_test_backward(self, backend_config):
# Runs the backward test.
if self.skip_backward_test:
raise unittest.SkipTest('skip_backward_test is set')
# avoid cyclic import
from chainer import gradient_check
self.backend_config = backend_config
self.test_name = 'test_backward'
self.before_test(self.test_name)
def f(*args):
return self._forward(args, backend_config)
def do_check():
inputs = self._generate_inputs()
outputs = self._forward_expected(inputs)
grad_outputs = self._generate_grad_outputs(outputs)
inputs = backend_config.get_array(inputs)
grad_outputs = backend_config.get_array(grad_outputs)
inputs = self._to_noncontiguous_as_needed(inputs)
grad_outputs = self._to_noncontiguous_as_needed(grad_outputs)
with FunctionTestError.raise_if_fail(
'backward is not implemented correctly'):
gradient_check.check_backward(
f, inputs, grad_outputs, dtype=self.numerical_grad_dtype,
detect_nondifferentiable=self.dodge_nondifferentiable,
**self.check_backward_options)
if self.dodge_nondifferentiable:
while True:
try:
do_check()
except gradient_check.NondifferentiableError:
continue
else:
break
else:
do_check()
def run_test_double_backward(self, backend_config):
# Runs the double-backward test.
if self.skip_double_backward_test:
raise unittest.SkipTest('skip_double_backward_test is set')
# avoid cyclic import
from chainer import gradient_check
self.backend_config = backend_config
self.test_name = 'test_double_backward'
self.before_test(self.test_name)
def f(*args):
return self._forward(args, backend_config)
def do_check():
inputs = self._generate_inputs()
outputs = self._forward_expected(inputs)
grad_outputs = self._generate_grad_outputs(outputs)
grad_grad_inputs = self._generate_grad_grad_inputs(inputs)
# Drop ggx corresponding to non-differentiable inputs.
# Generated `grad_grad_inputs`, the upstream gradients for the
# double backward test, may contain `None` for omitted gradients.
# These must be propagated to the gradient check.
grad_grad_inputs = [
ggx for ggx in grad_grad_inputs
if (ggx is None or ggx.dtype.kind == 'f')]
inputs = backend_config.get_array(inputs)
grad_outputs = backend_config.get_array(grad_outputs)
grad_grad_inputs = backend_config.get_array(grad_grad_inputs)
inputs = self._to_noncontiguous_as_needed(inputs)
grad_outputs = self._to_noncontiguous_as_needed(grad_outputs)
grad_grad_inputs = (
self._to_noncontiguous_as_needed(grad_grad_inputs))
with backend_config:
with FunctionTestError.raise_if_fail(
'double backward is not implemented correctly'):
gradient_check.check_double_backward(
f, inputs, grad_outputs, grad_grad_inputs,
dtype=self.numerical_grad_dtype,
detect_nondifferentiable=self.dodge_nondifferentiable,
**self.check_double_backward_options)
if self.dodge_nondifferentiable:
while True:
try:
do_check()
except gradient_check.NondifferentiableError:
continue
else:
break
else:
do_check()
class FunctionTestCase(FunctionTestBase, unittest.TestCase):
"""A base class for function test cases.
Function test cases can inherit from this class to define a set of function
tests.
.. rubric:: Required methods
Each concrete class must at least override the following three methods.
``forward(self, inputs, device)``
Implements the target forward function.
``inputs`` is a tuple of :class:`~chainer.Variable`\\ s.
This method is expected to return the output
:class:`~chainer.Variable`\\ s with the same array types as the inputs.
``device`` is the device corresponding to the input arrays.
``forward_expected(self, inputs)``
Implements the expectation of the target forward function.
``inputs`` is a tuple of :class:`numpy.ndarray`\\ s.
This method is expected to return the output
:class:`numpy.ndarray`\\ s.
``generate_inputs(self)``
Returns a tuple of input arrays of type :class:`numpy.ndarray`.
.. rubric:: Optional methods
Additionally the concrete class can override the following methods.
``before_test(self, test_name)``
A callback method called before each test.
Typically a skip logic is implemented by conditionally raising
:class:`unittest.SkipTest`.
``test_name`` is one of ``'test_forward'``, ``'test_backward'``, and
``'test_double_backward'``.
``generate_grad_outputs(self, outputs_template)``
Returns a tuple of output gradient arrays of type
:class:`numpy.ndarray` or ``None`` for omitted the gradients.
``outputs_template`` is a tuple of template arrays. The returned arrays
are expected to have the same shapes and dtypes as the template arrays.
``generate_grad_grad_inputs(self, inputs_template)``
Returns a tuple of the second order input gradient arrays of type
:class:`numpy.ndarray` or ``None`` for omitted gradients.
``input_template`` is a tuple of template arrays. The returned arrays
are expected to have the same shapes and dtypes as the template arrays.
``check_forward_outputs(self, outputs, expected_outputs)``
Implements check logic of forward outputs. Typically additional check
can be done after calling ``super().check_forward_outputs``.
``outputs`` and ``expected_outputs`` are tuples of arrays.
In case the check fails, ``FunctionTestError`` should be raised.
.. rubric:: Configurable attributes
The concrete class can override the following attributes to control the
behavior of the tests.
``skip_forward_test`` (bool):
Whether to skip forward computation test. ``False`` by default.
``skip_backward_test`` (bool):
Whether to skip backward computation test. ``False`` by default.
``skip_double_backward_test`` (bool):
Whether to skip double-backward computation test. ``False`` by default.
``dodge_nondifferentiable`` (bool):
Enable non-differentiable point detection in numerical gradient
calculation. If the inputs returned by ``generate_inputs`` turns
out to be a non-differentiable point, the test will repeatedly resample
inputs until a differentiable point will be finally sampled.
``False`` by default.
``numerical_grad_dtype`` (dtype):
Input arrays are casted to this dtype when calculating the numerical
gradients. It is ``float64`` by default, no matter what the original
input dtypes were, to maximize precision.
``contiguous`` (None or 'C'):
Specifies the contiguousness of incoming arrays (i.e. inputs, output
gradients, and the second order input gradients). If ``None``, the
arrays will be non-contiguous as long as possible. If ``'C'``, the
arrays will be C-contiguous. ``None`` by default.
.. rubric:: Passive attributes
These attributes are automatically set.
``test_name`` (str):
The name of the test being run. It is one of ``'test_forward'``,
``'test_backward'``, and ``'test_double_backward'``.
``backend_config`` (:class:`~chainer.testing.BackendConfig`):
The backend configuration.
.. note::
This class assumes :func:`chainer.testing.inject_backend_tests`
is used together. See the example below.
.. admonition:: Example
.. testcode::
@chainer.testing.inject_backend_tests(
None,
[
{}, # CPU
{'use_cuda': True}, # GPU
])
class TestReLU(chainer.testing.FunctionTestCase):
# ReLU function has a non-differentiable point around zero, so
# dodge_nondifferentiable should be set to True.
dodge_nondifferentiable = True
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
return x,
def forward(self, inputs, device):
x, = inputs
return F.relu(x),
def forward_expected(self, inputs):
x, = inputs
expected = x.copy()
expected[expected < 0] = 0
return expected,
.. seealso:: :class:`~chainer.testing.LinkTestCase`
"""
def test_forward(self, backend_config):
"""Tests forward computation."""
self.run_test_forward(backend_config)
def test_backward(self, backend_config):
"""Tests backward computation."""
self.run_test_backward(backend_config)
def test_double_backward(self, backend_config):
"""Tests double-backward computation."""
self.run_test_double_backward(backend_config)
class _LinkTestBase(object):
backend_config = None
contiguous = None
# List of parameter names represented as strings.
# I.e. ('gamma', 'beta') for BatchNormalization.
param_names = ()
def before_test(self, test_name):
pass
def generate_params(self):
raise NotImplementedError('generate_params is not implemented.')
def generate_inputs(self):
raise NotImplementedError('generate_inputs is not implemented.')
def create_link(self, initializers):
raise NotImplementedError('create_link is not implemented.')
def forward(self, link, inputs, device):
outputs = link(*inputs)
if not isinstance(outputs, tuple):
outputs = outputs,
return outputs
def check_forward_outputs(self, outputs, expected_outputs):
assert isinstance(outputs, tuple)
assert isinstance(expected_outputs, tuple)
assert all(isinstance(a, chainer.get_array_types()) for a in outputs)
assert all(
isinstance(a, chainer.get_array_types()) for a in expected_outputs)
_check_arrays_equal(
outputs, expected_outputs, LinkTestError,
**self.check_forward_options)
def _generate_params(self):
params_init = self.generate_params()
if not isinstance(params_init, (tuple, list)):
raise TypeError(
'`generate_params` must return a tuple or a list.')
for init in params_init:
_check_generated_initializer(init)
return params_init
def _generate_inputs(self):
inputs = self.generate_inputs()
_check_array_types(inputs, backend.CpuDevice(), 'generate_inputs')
return inputs
def _create_link(self, initializers, backend_config):
link = self.create_link(initializers)
if not isinstance(link, chainer.Link):
raise TypeError(
'`create_link` must return a chainer.Link object.')
link.to_device(backend_config.device)
return link
def _create_initialized_link(self, inits, backend_config):
inits = [_get_initializer_argument_value(i) for i in inits]
link = self._create_link(inits, backend_config)
# Generate inputs and compute a forward pass to initialize the
# parameters.
inputs_np = self._generate_inputs()
inputs_xp = backend_config.get_array(inputs_np)
inputs_xp = self._to_noncontiguous_as_needed(inputs_xp)
input_vars = [chainer.Variable(i) for i in inputs_xp]
output_vars = self._forward(link, input_vars, backend_config)
outputs_xp = [v.array for v in output_vars]
link.cleargrads()
return link, inputs_xp, outputs_xp
def _forward(self, link, inputs, backend_config):
assert all(isinstance(x, chainer.Variable) for x in inputs)
with backend_config:
outputs = self.forward(link, inputs, backend_config.device)
_check_variable_types(
outputs, backend_config.device, 'forward', LinkTestError)
return outputs
def _to_noncontiguous_as_needed(self, contig_arrays):
if self.contiguous is None:
# non-contiguous
return array_module._as_noncontiguous_array(contig_arrays)
if self.contiguous == 'C':
# C-contiguous
return contig_arrays
assert False, (
'Invalid value of `contiguous`: {}'.format(self.contiguous))
class LinkTestCase(_LinkTestBase, unittest.TestCase):
"""A base class for link forward and backward test cases.
Link test cases can inherit from this class to define a set of link tests
for forward and backward computations.
.. rubric:: Required methods
Each concrete class must at least override the following methods.
``generate_params(self)``
Returns a tuple of initializers-likes. The tuple should contain an
initializer-like for each initializer-like argument, i.e. the
parameters to the link constructor. These will be passed to
``create_link``.
``create_link(self, initializers)``
Returns a link. The link should be initialized with the given
initializer-likes ``initializers``. ``initializers`` is a tuple of
same length as the number of parameters.
``generate_inputs(self)``
Returns a tuple of input arrays of type :class:`numpy.ndarray`.
``forward(self, link, inputs, device)``
Implements the target forward function.
``link`` is a link created by ``create_link`` and
``inputs`` is a tuple of :class:`~chainer.Variable`\\ s.
This method is expected to return the output
:class:`~chainer.Variable`\\ s with the same array types as the inputs.
``device`` is the device corresponding to the input arrays.
A default implementation is provided for links that only takes the
inputs defined in ``generate_inputs`` (wrapped in
:class:`~chainer.Variable`\\ s) and returns nothing but output
:class:`~chainer.Variable`\\ s in its forward computation.
.. rubric:: Optional methods
Each concrete class may override the following methods depending on the
skip flags ``skip_forward_test`` and ``skip_backward_test``.
``before_test(self, test_name)``
A callback method called before each test.
Typically a skip logic is implemented by conditionally raising
:class:`unittest.SkipTest`.
``test_name`` is one of ``'test_forward'`` and ``'test_backward'``.
``forward_expected(self, link, inputs)``
Implements the expectation of the target forward function.
``link`` is the initialized link that was used to compute the actual
forward which the results of this method will be compared against.
The link is guaranteed to reside on the CPU.
``inputs`` is a tuple of :class:`numpy.ndarray`\\ s.
This method is expected to return the output
:class:`numpy.ndarray`\\ s.
This method must be implemented if either ``skip_forward_test`` or
``skip_backward_test`` is ``False`` in which case forward or backward
tests are executed.
``generate_grad_outputs(self, outputs_template)``
Returns a tuple of output gradient arrays of type
:class:`numpy.ndarray`.
``outputs_template`` is a tuple of template arrays. The returned arrays
are expected to have the same shapes and dtypes as the template arrays.
``check_forward_outputs(self, outputs, expected_outputs)``
Implements check logic of forward outputs. Typically additional check
can be done after calling ``super().check_forward_outputs``.
``outputs`` and ``expected_outputs`` are tuples of arrays.
In case the check fails, ``LinkTestError`` should be raised.
.. rubric:: Attributes
The concrete class can override the following attributes to control the
behavior of the tests.
``param_names`` (tuple of str):
A tuple of strings with all the names of the parameters that should be
tested. E.g. ``('gamma', 'beta')`` for the batch normalization link.
``()`` by default.
``skip_forward_test`` (bool):
Whether to skip forward computation test. ``False`` by default.
``skip_backward_test`` (bool):
Whether to skip backward computation test. ``False`` by default.
``dodge_nondifferentiable`` (bool):
Enable non-differentiable point detection in numerical gradient
calculation. If the data returned by
``generate_params``, ``create_link`` and ``generate_inputs`` turns out
to be a non-differentiable point, the test will repeatedly resample
those until a differentiable point will be finally sampled. ``False``
by default.
``numerical_grad_dtype`` (dtype):
Input arrays are casted to this dtype when calculating the numerical
gradients. It is ``float64`` by default, no matter what the original
input dtypes were, to maximize precision.
``contiguous`` (None or 'C'):
Specifies the contiguousness of incoming arrays (i.e. inputs,
parameters and gradients. If ``None``, the
arrays will be non-contiguous as long as possible. If ``'C'``, the
arrays will be C-contiguous. ``None`` by default.
.. note::
This class assumes :func:`chainer.testing.inject_backend_tests`
is used together. See the example below.
.. note::
When implementing :class:`~chainer.testing.LinkTestCase` and
:class:`~chainer.testing.LinkInitializersTestCase` to test both
forward/backward and initializers, it is often convenient to refactor
out common logic in a separate class.
.. admonition:: Example
.. testcode::
@chainer.testing.inject_backend_tests(
None,
[
{}, # CPU
{'use_cuda': True}, # GPU
])
class TestLinear(chainer.testing.LinkTestCase):
param_names = ('W', 'b')
def generate_params(self):
initialW = numpy.random.uniform(
-1, 1, (3, 2)).astype(numpy.float32)
initial_bias = numpy.random.uniform(
-1, 1, (3,)).astype(numpy.float32)
return initialW, initial_bias
def generate_inputs(self):
x = numpy.random.uniform(
-1, 1, (1, 2)).astype(numpy.float32)
return x,
def create_link(self, initializers):
initialW, initial_bias = initializers
link = chainer.links.Linear(
2, 3, initialW=initialW, initial_bias=initial_bias)
return link
def forward(self, link, inputs, device):
x, = inputs
return link(x),
def forward_expected(self, link, inputs):
W = link.W.array
b = link.b.array
x, = inputs
expected = x.dot(W.T) + b
return expected,
.. seealso::
:class:`~chainer.testing.LinkInitializersTestCase`
:class:`~chainer.testing.FunctionTestCase`
"""
check_forward_options = None
check_backward_options = None
skip_forward_test = False
skip_backward_test = False
dodge_nondifferentiable = False
numerical_grad_dtype = numpy.float64
def __init__(self, *args, **kwargs):
self.check_forward_options = {}
self.check_backward_options = {}
super(LinkTestCase, self).__init__(*args, **kwargs)
def forward_expected(self, link, inputs):
raise NotImplementedError('forward_expected() is not implemented.')
def generate_grad_outputs(self, outputs_template):
grad_outputs = tuple([
numpy.random.uniform(-1, 1, a.shape).astype(a.dtype)
for a in outputs_template])
return grad_outputs
def test_forward(self, backend_config):
"""Tests forward computation."""
if self.skip_forward_test:
raise unittest.SkipTest('skip_forward_test is set')
self.backend_config = backend_config
self.before_test('test_forward')
inits = self._generate_params()
link = self._create_link(inits, backend_config)
inputs_np = self._generate_inputs()
inputs_xp = backend_config.get_array(inputs_np)
inputs_xp = self._to_noncontiguous_as_needed(inputs_xp)
input_vars = tuple([chainer.Variable(i) for i in inputs_xp])
# Compute forward of the link and initialize its parameters.
output_vars = self._forward(link, input_vars, backend_config)
outputs_xp = [v.array for v in output_vars]
# Expected outputs are computed on the CPU so the link must be
# transferred.
link.to_device(backend.CpuDevice())
expected_outputs_np = self._forward_expected(link, inputs_np)
self.check_forward_outputs(
tuple(outputs_xp), expected_outputs_np)
def test_backward(self, backend_config):
"""Tests backward computation."""
if self.skip_backward_test:
raise unittest.SkipTest('skip_backward_test is set')
self.backend_config = backend_config
self.before_test('test_backward')
# avoid cyclic import
from chainer import gradient_check
def do_check():
# Generate an initialized temporary link that is already forward
# propagated. This link is only used to generate necessary data,
# i.e. inputs, outputs and parameters for the later gradient check
# and the link itself will be discarded.
inits = self._generate_params()
link, inputs, outputs = self._create_initialized_link(
inits, backend_config)
# Extract the parameter ndarrays from the initialized link.
params = _get_link_params(link, self.param_names)
params = [p.array for p in params]
# Prepare inputs, outputs and upstream gradients for the gradient
# check.
cpu_device = backend.CpuDevice()
outputs = [cpu_device.send(output) for output in outputs]
grad_outputs = self._generate_grad_outputs(outputs)
grad_outputs = backend_config.get_array(grad_outputs)
inputs = self._to_noncontiguous_as_needed(inputs)
params = self._to_noncontiguous_as_needed(params)
grad_outputs = self._to_noncontiguous_as_needed(grad_outputs)
# Create the link used for the actual forward propagation in the
# gradient check.
forward_link, _, _ = self._create_initialized_link(
inits, backend_config)
def forward(inputs, ps):
# Use generated parameters.
with forward_link.init_scope():
for param_name, p in zip(self.param_names, ps):
setattr(forward_link, param_name, p)
return self._forward(forward_link, inputs, backend_config)
with LinkTestError.raise_if_fail(
'backward is not implemented correctly'):
gradient_check._check_backward_with_params(
forward, inputs, grad_outputs, params=params,
dtype=self.numerical_grad_dtype,
detect_nondifferentiable=self.dodge_nondifferentiable,
**self.check_backward_options)
if self.dodge_nondifferentiable:
while True:
try:
do_check()
except gradient_check.NondifferentiableError:
continue
else:
break
else:
do_check()
def _forward_expected(self, link, inputs):
assert all(isinstance(x, numpy.ndarray) for x in inputs)
outputs = self.forward_expected(link, inputs)
_check_array_types(inputs, backend.CpuDevice(), 'test_forward')
return outputs
def _generate_grad_outputs(self, outputs_template):
assert all(isinstance(x, numpy.ndarray) for x in outputs_template)
grad_outputs = self.generate_grad_outputs(outputs_template)
_check_array_types(
grad_outputs, backend.CpuDevice(), 'generate_grad_outputs')
return grad_outputs
class LinkInitializersTestCase(_LinkTestBase, unittest.TestCase):
"""A base class for link parameter initializer test cases.
Link test cases can inherit from this class to define a set of link tests
for parameter initialization.
.. rubric:: Required methods
Each concrete class must at least override the following methods.
``generate_params(self)``
Returns a tuple of initializers-likes. The tuple should contain an
initializer-like for each initializer-like argument, i.e. the
parameters to the link constructor. These will be passed to
``create_link``.
``create_link(self, initializers)``
Returns a link. The link should be initialized with the given
initializer-likes ``initializers``. ``initializers`` is a tuple of
same length as the number of parameters.
``generate_inputs(self)``
Returns a tuple of input arrays of type :class:`numpy.ndarray`.
``forward(self, link, inputs, device)``
Implements the target forward function.
``link`` is a link created by ``create_link`` and
``inputs`` is a tuple of :class:`~chainer.Variable`\\ s.
This method is expected to return the output
:class:`~chainer.Variable`\\ s with the same array types as the inputs.
``device`` is the device corresponding to the input arrays.
A default implementation is provided for links that only takes the
inputs defined in ``generate_inputs`` (wrapped in
:class:`~chainer.Variable`\\ s) and returns nothing but output
:class:`~chainer.Variable`\\ s in its forward computation.
``get_initializers(self)``
Returns a tuple with the same length as the number of initializers that
the constructor of the link accepts. Each element in the tuple is a
container itself, listing all initializers-likes that should be tested.
Each initializer-like in the tuple is tested one at a time by being
passed to ``create_link``. When the length of the tuple is greater than
one (i.e. if the link accepts multiple initializers), the ones not
being tested are replaced by the ones returned by `generate_params`.
Initializer-likes returned here should be deterministic since test will
invoke them multiple times to test the correctness.
For testing initializer arguments that can be non-initializer values
such as ``None``, one can use the ``InitializerArgument``, defining a
pair of the link constructor argument and actual initializer-like used
by the link.
This method must be implemented if ``skip_initializers_test`` is
``False`` in which case the initializers test is executed.
.. rubric:: Optional methods
Each concrete class may override the following methods.
``before_test(self, test_name)``
A callback method called before each test.
Typically a skip logic is implemented by conditionally raising
:class:`unittest.SkipTest`.
``test_name`` is always of ``'test_initializers'``.
.. rubric:: Attributes
The concrete class can override the following attributes to control the
behavior of the tests.
``param_names`` (list of str):
A list of strings with all the names of the parameters that should be
tested. E.g. ``['gamma', 'beta']`` for the batch normalization link.
``[]`` by default.
``contiguous`` (None or 'C'):
Specifies the contiguousness of incoming arrays (i.e. inputs,
parameters and gradients. If ``None``, the
arrays will be non-contiguous as long as possible. If ``'C'``, the
arrays will be C-contiguous. ``None`` by default.
.. note::
This class assumes :func:`chainer.testing.inject_backend_tests`
is used together. See the example below.
.. note::
When implementing :class:`~chainer.testing.LinkTestCase` and
:class:`~chainer.testing.LinkInitializersTestCase` to test both
forward/backward and initializers, it is often convenient to refactor
out common logic in a separate class.
.. admonition:: Example
.. testcode::
@chainer.testing.inject_backend_tests(
None,
[
{}, # CPU
{'use_cuda': True}, # GPU
])
class TestLinear(chainer.testing.LinkInitializersTestCase):
param_names = ['W', 'b']
def generate_params(self):
initialW = numpy.random.uniform(
-1, 1, (3, 2)).astype(numpy.float32)
initial_bias = numpy.random.uniform(
-1, 1, (3,)).astype(numpy.float32)
return initialW, initial_bias
def generate_inputs(self):
x = numpy.random.uniform(
-1, 1, (1, 2)).astype(numpy.float32)
return x,
def create_link(self, initializers):
initialW, initial_bias = initializers
link = chainer.links.Linear(
2, 3, initialW=initialW, initial_bias=initial_bias)
return link
def forward(self, link, inputs, device):
x, = inputs
return link(x),
def get_initializers(self):
initialW = [initializers.Constant(1), 2]
initial_bias = [initializers.Constant(2), 3,
chainer.testing.link.InitializerArgument(None, 0)]
return initialW, initial_bias
.. seealso::
:class:`~chainer.testing.LinkTestCase`
:class:`~chainer.testing.FunctionTestCase`
"""
check_initializers_options = None
def __init__(self, *args, **kwargs):
self.check_initializers_options = {}
super(LinkInitializersTestCase, self).__init__(*args, **kwargs)
def get_initializers(self):
raise NotImplementedError('get_initializers is not implemented.')
def test_initializers(self, backend_config):
"""Tests that the parameters of a links are correctly initialized."""
self.backend_config = backend_config
self.before_test('test_initializers')
params_inits = self._get_initializers()
# TODO(hvy): Reduce the number of loop iterations by checking
# multiple parameters simultaneously.
for i_param, param_inits in enumerate(params_inits):
# When testing an initializer for a particular parameter, other
# initializers are picked from generate_params.
inits = self._generate_params()
inits = list(inits)
for init in param_inits:
inits[i_param] = init
self._test_single_initializer(i_param, inits, backend_config)
def _get_initializers(self):
params_inits = self.get_initializers()
if not isinstance(params_inits, (tuple, list)):
raise TypeError(
'`get_initializers` must return a tuple or a list.')
for param_inits in params_inits:
if not isinstance(param_inits, (tuple, list)):
raise TypeError(
'`get_initializers` must return a tuple or a list of '
'tuples or lists.')
for init in param_inits:
_check_generated_initializer(init)
return params_inits
def _test_single_initializer(self, i_param, inits, backend_config):
# Given a set of initializer constructor arguments for the link, create
# and initialize a link with those arguments. `i_param` holds the index
# of the argument that should be tested among these.
inits_orig = inits
inits = [_get_initializer_argument_value(i) for i in inits]
link, _, _ = self._create_initialized_link(inits, backend_config)
# Extract the parameters from the initialized link.
params = _get_link_params(link, self.param_names)
# Convert the parameter of interest into a NumPy ndarray.
cpu_device = backend.CpuDevice()
param = params[i_param]
param_xp = param.array
param_np = cpu_device.send(param_xp)
# The expected values of the parameter is decided by the given
# initializer. If the initializer is `None`, it should have been
# wrapped in a InitializerArgument along with the expected initializer
# that the link should default to in case of `None`.
#
# Note that for this to work, the expected parameter must be inferred
# deterministically.
expected_init = _get_expected_initializer(inits_orig[i_param])
expected_np = numpy.empty_like(param_np)
expected_init(expected_np)
# Compare the values of the expected and actual parameter.
_check_arrays_equal(
(expected_np,), (param_np,), LinkTestError,
**self.check_initializers_options)
def _check_generated_initializer(init):
if isinstance(init, InitializerArgument):
init = init.expected_initializer
elif init is None:
raise ValueError(
'A None initializer must be wrapped in a InitializerArgument '
'along with the expected initializer fallen back to.')
initializers._check_is_initializer_like(init)
def _get_initializer_argument_value(init):
# Returns the initializer that should be passed to the link constructor.
if isinstance(init, InitializerArgument):
return init.argument_value
return init
def _get_expected_initializer(init):
# Returns the expected initializer for the given initializer.
if isinstance(init, InitializerArgument):
init = init.expected_initializer
assert init is not None
if not isinstance(init, chainer.Initializer):
init = chainer.initializers._get_initializer(init)
return init
def _get_link_params(link, param_names):
params = []
for name in param_names:
param = getattr(link, name, None)
if param is None:
raise LinkTestError.fail(
'Link does not have a parameter named \'{}\'.'.format(name))
params.append(param)
return params
def _check_array_types(arrays, device, func_name):
if not isinstance(arrays, tuple):
raise TypeError(
'`{}()` must return a tuple, '
'not {}.'.format(func_name, type(arrays)))
if not all(
a is None or isinstance(a, device.supported_array_types)
for a in arrays):
raise TypeError(
'{}() must return a tuple of arrays supported by device {} or'
' None.\nActual: {}'.format(
func_name, device, tuple([type(a) for a in arrays])))
def _check_variable_types(vars, device, func_name, test_error_cls):
assert issubclass(test_error_cls, _TestError)
if not isinstance(vars, tuple):
test_error_cls.fail(
'`{}()` must return a tuple, '
'not {}.'.format(func_name, type(vars)))
if not all(isinstance(a, chainer.Variable) for a in vars):
test_error_cls.fail(
'{}() must return a tuple of Variables.\n'
'Actual: {}'.format(
func_name, ', '.join(str(type(a)) for a in vars)))
if not all(isinstance(a.array, device.supported_array_types)
for a in vars):
test_error_cls.fail(
'{}() must return a tuple of Variables of arrays supported by '
'device {}.\n'
'Actual: {}'.format(
func_name, device,
', '.join(str(type(a.array)) for a in vars)))
def _check_arrays_equal(
actual_arrays, expected_arrays, test_error_cls, **opts):
# `opts` is passed through to `testing.assert_all_close`.
# Check all outputs are equal to expected values
assert issubclass(test_error_cls, _TestError)
message = None
detail_message = None
while True:
# Check number of arrays
if len(actual_arrays) != len(expected_arrays):
message = (
'Number of outputs ({}, {}) does not match'.format(
len(actual_arrays), len(expected_arrays)))
break
# Check dtypes and shapes
dtypes_match = all([
y.dtype == ye.dtype
for y, ye in zip(actual_arrays, expected_arrays)])
shapes_match = all([
y.shape == ye.shape
for y, ye in zip(actual_arrays, expected_arrays)])
if not (shapes_match and dtypes_match):
message = 'Shapes and/or dtypes do not match'
break
# Check values
errors = []
for i, (actual, expected) in (
enumerate(zip(actual_arrays, expected_arrays))):
try:
array_module.assert_allclose(actual, expected, **opts)
except AssertionError as e:
errors.append((i, e))
if errors:
message = (
'Outputs do not match the expected values.\n'
'Indices of outputs that do not match: {}'.format(
', '.join(str(i) for i, e in errors)))
f = six.StringIO()
for i, e in errors:
f.write('Error details of output [{}]:\n'.format(i))
f.write(str(e))
f.write('\n')
detail_message = f.getvalue()
break
break
if message is not None:
msg = (
'{}\n'
'Expected shapes and dtypes: {}\n'
'Actual shapes and dtypes: {}\n'.format(
message,
utils._format_array_props(expected_arrays),
utils._format_array_props(actual_arrays)))
if detail_message is not None:
msg += '\n\n' + detail_message
test_error_cls.fail(msg)
| 46,984
| 36.860596
| 79
|
py
|
chainer
|
chainer-master/chainer/distributions/gumbel.py
|
import numpy
import chainer
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.math import exponential
from chainer.functions.math import lgamma
from chainer.utils import cache
EULER = 0.57721566490153286060651209008240243104215933593992
class Gumbel(distribution.Distribution):
"""Gumbel Distribution.
The probability density function of the distribution is expressed as
.. math::
f(x) = \\frac{1}{\\eta} \
\\exp\\left\\{ - \\frac{x - \\mu}{\\eta} \\right\\} \
\\exp\\left[-\\exp\\left\\{-\\frac{x - \\mu}{\\eta} \
\\right\\}\\right],
Args:
loc(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution :math:`\\mu`.
scale(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution :math:`\\eta`.
"""
def __init__(self, loc, scale):
super(Gumbel, self).__init__()
self.__loc = loc
self.__scale = scale
@cache.cached_property
def loc(self):
return chainer.as_variable(self.__loc)
@cache.cached_property
def scale(self):
return chainer.as_variable(self.__scale)
@cache.cached_property
def _log_scale(self):
return exponential.log(self.scale)
@property
def batch_shape(self):
return self.loc.shape
@cache.cached_property
def entropy(self):
return self._log_scale + (EULER + 1)
@property
def event_shape(self):
return ()
@property
def _is_gpu(self):
return isinstance(self.loc.data, cuda.ndarray)
def log_prob(self, x):
y = (x - self.loc) / self.scale
return - self._log_scale - y - exponential.exp(-y)
@cache.cached_property
def mean(self):
return self.loc + EULER * self.scale
@property
def params(self):
return {'loc': self.loc, 'scale': self.scale}
def sample_n(self, n):
xp = chainer.backend.get_array_module(self.loc)
if xp is cuda.cupy:
eps = xp.random.gumbel(
size=(n,)+self.batch_shape, dtype=self.loc.dtype)
else:
eps = xp.random.gumbel(
size=(n,)+self.batch_shape).astype(self.loc.dtype)
noise = self.scale * eps + self.loc
return noise
@property
def support(self):
return 'real'
@cache.cached_property
def variance(self):
return (numpy.pi ** 2 / 6) * self.scale ** 2
@distribution.register_kl(Gumbel, Gumbel)
def _kl_gumbel_gumbel(dist1, dist2):
scale_1d2 = dist1.scale / dist2.scale
return (
dist2._log_scale
- dist1._log_scale
+ EULER * (scale_1d2 - 1.)
+ exponential.exp((dist2.loc - dist1.loc) / dist2.scale
+ lgamma.lgamma(scale_1d2 + 1.))
- 1
+ (dist1.loc - dist2.loc) / dist2.scale)
| 2,898
| 25.59633
| 73
|
py
|
chainer
|
chainer-master/chainer/distributions/chisquare.py
|
import numpy
import chainer
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.math import digamma
from chainer.functions.math import exponential
from chainer.functions.math import lgamma
from chainer.utils import cache
class Chisquare(distribution.Distribution):
"""Chi-Square Distribution.
The probability density function of the distribution is expressed as
.. math::
p(x;k) = \\frac{1}{2^{k/2}\\Gamma(k/2)}x^{k/2-1}e^{-x/2}
Args:
k(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution.
"""
def __init__(self, k):
super(Chisquare, self).__init__()
self.__k = k
@cache.cached_property
def k(self):
return chainer.as_variable(self.__k)
@cache.cached_property
def _half_k(self):
return 0.5 * self.k
@property
def batch_shape(self):
return self.k.shape
@cache.cached_property
def entropy(self):
return (
self._half_k
+ numpy.log(2.)
+ lgamma.lgamma(self._half_k)
+ (1 - self._half_k) * digamma.digamma(self._half_k))
@property
def event_shape(self):
return ()
def log_prob(self, x):
return (
- lgamma.lgamma(self._half_k)
- self._half_k * numpy.log(2.)
+ (self._half_k - 1) * exponential.log(x)
- 0.5 * x)
@cache.cached_property
def mean(self):
return self.k
@property
def params(self):
return {'k': self.k}
def sample_n(self, n):
xp = chainer.backend.get_array_module(self.k)
if xp is cuda.cupy:
eps = xp.random.chisquare(
self.k.data, (n,)+self.k.shape, dtype=self.k.dtype)
else:
eps = xp.random.chisquare(
self.k.data, (n,)+self.k.shape).astype(self.k.dtype)
noise = chainer.Variable(eps)
return noise
@property
def support(self):
return 'positive'
@cache.cached_property
def variance(self):
return 2 * self.k
| 2,110
| 23.264368
| 72
|
py
|
chainer
|
chainer-master/chainer/distributions/cauchy.py
|
import warnings
import numpy
import chainer
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.math import exponential
from chainer.functions.math import trigonometric
from chainer.utils import cache
def _cauchy_icdf(x):
x = chainer.as_variable(x)
h = (x - 0.5) * numpy.pi
y = chainer.functions.tan(h)
return y
class Cauchy(distribution.Distribution):
"""Cauchy Distribution.
The probability density function of the distribution is expressed as
.. math::
p(x;x_0,\\gamma) = \\frac{1}{\\pi}\\frac{\\gamma}{(x-x_0)^2+\\gamma^2}
Args:
loc(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution representing the location :math:`x_0`.
scale(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution representing the scale :math:`\\gamma`.
"""
def __init__(self, loc, scale):
super(Cauchy, self).__init__()
self.__loc = loc
self.__scale = scale
@cache.cached_property
def loc(self):
return chainer.as_variable(self.__loc)
@cache.cached_property
def scale(self):
return chainer.as_variable(self.__scale)
@property
def batch_shape(self):
return self.loc.shape
def cdf(self, x):
return (
(1 / numpy.pi
* trigonometric.arctan((x - self.loc) / self.scale))
+ 0.5)
@cache.cached_property
def entropy(self):
return exponential.log(4 * numpy.pi * self.scale)
@property
def event_shape(self):
return ()
def icdf(self, x):
return self.loc + self.scale * _cauchy_icdf(x)
@property
def _is_gpu(self):
return isinstance(self.loc.data, cuda.ndarray)
def log_prob(self, x):
return (
- numpy.log(numpy.pi)
+ exponential.log(self.scale)
- exponential.log((x - self.loc)**2 + self.scale**2))
@cache.cached_property
def mean(self):
warnings.warn('Mean of the cauchy distribution is undefined.',
RuntimeWarning)
xp = chainer.backend.get_array_module(self.loc)
return chainer.as_variable(xp.full_like(self.loc.data, xp.nan))
@property
def params(self):
return {'loc': self.loc, 'scale': self.scale}
def sample_n(self, n):
xp = chainer.backend.get_array_module(self.loc)
if xp is cuda.cupy:
eps = xp.random.standard_cauchy(
(n,)+self.loc.shape, dtype=self.loc.dtype)
else:
eps = xp.random.standard_cauchy(
(n,)+self.loc.shape).astype(self.loc.dtype)
noise = self.scale * eps + self.loc
return noise
@property
def support(self):
return 'real'
@cache.cached_property
def variance(self):
warnings.warn('Variance of the cauchy distribution is undefined.',
RuntimeWarning)
xp = chainer.backend.get_array_module(self.loc)
return chainer.as_variable(xp.full_like(self.loc.data, xp.nan))
| 3,108
| 26.27193
| 78
|
py
|
chainer
|
chainer-master/chainer/distributions/pareto.py
|
import chainer
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.array import where
from chainer.functions.math import exponential
from chainer import utils
from chainer.utils import cache
class Pareto(distribution.Distribution):
"""Pareto Distribution.
.. math::
f(x) = \\alpha x_m^{\\alpha}(x)^{-(\\alpha+1)},
Args:
scale(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution :math:`x_m`.
alpha(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution :math:`\\alpha`.
"""
def __init__(self, scale, alpha):
super(Pareto, self).__init__()
self.__scale = scale
self.__alpha = alpha
@cache.cached_property
def scale(self):
return chainer.as_variable(self.__scale)
@cache.cached_property
def alpha(self):
return chainer.as_variable(self.__alpha)
@cache.cached_property
def _log_scale(self):
return exponential.log(self.scale)
@cache.cached_property
def _log_alpha(self):
return exponential.log(self.alpha)
@property
def batch_shape(self):
return self.scale.shape
@cache.cached_property
def entropy(self):
return (
- self._log_alpha
+ self._log_scale
+ 1. / self.alpha
+ 1.)
@property
def event_shape(self):
return ()
@property
def _is_gpu(self):
return isinstance(self.scale.data, cuda.ndarray)
def log_prob(self, x):
x = chainer.as_variable(x)
logp = (
self._log_alpha
+ self.alpha * self._log_scale
- (self.alpha + 1) * exponential.log(x))
xp = logp.xp
return where.where(
utils.force_array(x.data >= self.scale.data),
logp,
xp.array(-xp.inf, logp.dtype))
@cache.cached_property
def mean(self):
mean = (self.alpha * self.scale / (self.alpha - 1))
xp = mean.xp
return where.where(
self.alpha.data > 1,
mean,
xp.array(xp.inf, mean.dtype))
@property
def params(self):
return {'scale': self.scale, 'alpha': self.alpha}
def sample_n(self, n):
xp = chainer.backend.get_array_module(self.scale)
if xp is cuda.cupy:
eps = xp.random.pareto(
self.alpha.data, (n,)+self.batch_shape, dtype=self.alpha.dtype)
else:
eps = xp.random.pareto(
self.alpha.data, (n,)+self.batch_shape
).astype(self.alpha.dtype)
noise = self.scale * (eps + 1)
return noise
@property
def support(self):
return '[scale, inf]'
@cache.cached_property
def variance(self):
var = (
self.scale ** 2
* self.alpha
/ (self.alpha - 1) ** 2
/ (self.alpha - 2))
xp = var.xp
return where.where(
self.alpha.data > 2,
var,
xp.array(xp.inf, var.dtype))
@distribution.register_kl(Pareto, Pareto)
def _kl_pareto_pareto(dist1, dist2):
kl = (
dist2.alpha * (dist1._log_scale - dist2._log_scale)
+ dist1._log_alpha
- dist2._log_alpha
+ (dist2.alpha - dist1.alpha) / dist1.alpha)
xp = kl.xp
return where.where(
dist1.scale.data >= dist2.scale.data,
kl,
xp.array(xp.inf, kl.dtype))
| 3,490
| 25.24812
| 79
|
py
|
chainer
|
chainer-master/chainer/distributions/dirichlet.py
|
import numpy
import chainer
from chainer import distribution
from chainer.functions.array import expand_dims
from chainer.functions.math import digamma
from chainer.functions.math import exponential
from chainer.functions.math import lgamma
from chainer.functions.math import sum as sum_mod
from chainer.utils import cache
def _lbeta(x):
return (
sum_mod.sum(lgamma.lgamma(x), axis=-1)
- lgamma.lgamma(sum_mod.sum(x, axis=-1)))
class Dirichlet(distribution.Distribution):
"""Dirichlet Distribution.
The probability density function of the distribution is expressed as
.. math::
p(x) = \\frac{\\Gamma(\\sum_{i=1}^{K} \\alpha_i)}
{\\prod_{i=1}^{K} \\Gamma (\\alpha_i)}
\\prod_{i=1}^{K} {x_i}^{\\alpha_i-1}
Args:
alpha(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution.
"""
def __init__(self, alpha):
self.__alpha = alpha
@cache.cached_property
def alpha(self):
return chainer.as_variable(self.__alpha)
@cache.cached_property
def alpha0(self):
return sum_mod.sum(self.alpha, axis=-1)
@property
def batch_shape(self):
return self.alpha.shape[:-1]
@cache.cached_property
def entropy(self):
return (
_lbeta(self.alpha)
+ ((self.alpha0 - self.event_shape[0])
* digamma.digamma(self.alpha0))
- sum_mod.sum(
(self.alpha - 1) * digamma.digamma(self.alpha),
axis=-1))
@property
def event_shape(self):
return self.alpha.shape[-1:]
def log_prob(self, x):
return (
- _lbeta(self.alpha)
+ sum_mod.sum(
(self.alpha - 1) * exponential.log(x),
axis=-1))
@cache.cached_property
def mean(self):
alpha0 = expand_dims.expand_dims(self.alpha0, axis=-1)
return self.alpha / alpha0
@property
def params(self):
return {'alpha': self.alpha}
def sample_n(self, n):
obo_alpha = self.alpha.data.reshape(-1, self.event_shape[0])
xp = chainer.backend.get_array_module(self.alpha)
if xp is numpy:
eps = [
xp.random.dirichlet(one_alpha, size=(n,)).astype(numpy.float32)
for one_alpha in obo_alpha]
else:
eps = [
xp.random.dirichlet(one_alpha, size=(n,)).astype(numpy.float32)
for one_alpha in obo_alpha]
eps = [xp.expand_dims(eps_, 0) for eps_ in eps]
eps = xp.swapaxes(xp.vstack(eps), 0, 1)
eps = eps.reshape((n,) + self.alpha.shape)
noise = chainer.Variable(eps)
return noise
@property
def support(self):
return '[0, 1]'
@cache.cached_property
def variance(self):
alpha0 = expand_dims.expand_dims(self.alpha0, axis=-1)
return (
self.alpha
* (alpha0 - self.alpha)
/ alpha0 ** 2
/ (alpha0 + 1))
@distribution.register_kl(Dirichlet, Dirichlet)
def _kl_dirichlet_dirichlet(dist1, dist2):
return (
- _lbeta(dist1.alpha)
+ _lbeta(dist2.alpha)
+ sum_mod.sum(
(dist1.alpha - dist2.alpha)
* (digamma.digamma(dist1.alpha)
- expand_dims.expand_dims(
digamma.digamma(dist1.alpha0),
axis=-1)),
axis=-1))
| 3,449
| 27.04878
| 79
|
py
|
chainer
|
chainer-master/chainer/distributions/uniform.py
|
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.array import broadcast
from chainer.functions.array import where
from chainer.functions.math import clip
from chainer.functions.math import exponential
from chainer.functions.math import sqrt
from chainer import utils
from chainer.utils import argument
from chainer.utils import cache
class Uniform(distribution.Distribution):
"""Uniform Distribution.
The probability density function of the distribution is expressed as
.. math::
p(x; l, h) = \\begin{cases}
\\frac{1}{h - l} & \\text{if }l \\leq x \\leq h \\\\
0 & \\text{otherwise}
\\end{cases}
Args:
low(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution representing the lower bound :math:`l`.
high(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution representing the higher bound :math:`h`.
"""
def __init__(self, **kwargs):
low, high, loc, scale = None, None, None, None
if kwargs:
low, high, loc, scale = argument.parse_kwargs(
kwargs, ('low', low), ('high', high), ('loc', loc),
('scale', scale))
self._use_low_high = low is not None and high is not None
self._use_loc_scale = loc is not None and scale is not None
if not (self._use_low_high ^ self._use_loc_scale):
raise ValueError(
'Either `low, high` or `loc, scale` (not both) must have a '
'value.')
self.__low = low
self.__high = high
self.__loc = loc
self.__scale = scale
@cache.cached_property
def low(self):
if self._use_low_high:
return chainer.as_variable(self.__low)
else:
return self.loc
@cache.cached_property
def high(self):
if self._use_low_high:
return chainer.as_variable(self.__high)
else:
return self.loc + self.scale
@cache.cached_property
def loc(self):
if self._use_loc_scale:
return chainer.as_variable(self.__loc)
else:
return self.low
@cache.cached_property
def scale(self):
if self._use_loc_scale:
return chainer.as_variable(self.__scale)
else:
return self.high - self.low
@property
def batch_shape(self):
return self.low.shape
def cdf(self, x):
return clip.clip((x - self.loc) / self.scale, 0., 1.)
@cache.cached_property
def entropy(self):
return exponential.log(self.scale)
@property
def event_shape(self):
return ()
def icdf(self, x):
return x * self.scale + self.loc
def log_prob(self, x):
if not isinstance(x, chainer.Variable):
x = chainer.Variable(x)
xp = backend.get_array_module(x)
logp = broadcast.broadcast_to(
-exponential.log(self.scale), x.shape)
return where.where(
utils.force_array(
(x.data >= self.low.data) & (x.data <= self.high.data)),
logp,
xp.array(-xp.inf, logp.dtype))
@cache.cached_property
def mean(self):
return (self.high + self.low) / 2
@property
def params(self):
if self._use_low_high:
return {'low': self.low, 'high': self.high}
else:
return {'loc': self.loc, 'scale': self.scale}
def sample_n(self, n):
xp = backend.get_array_module(self.low)
if xp is cuda.cupy:
eps = xp.random.uniform(
0, 1, (n,) + self.low.shape, dtype=self.low.dtype)
else:
eps = (
xp.random.uniform(0, 1, (n,) + self.low.shape)
.astype(self.low.dtype))
noise = self.icdf(eps)
return noise
@cache.cached_property
def stddev(self):
return sqrt.sqrt(self.variance)
@property
def support(self):
return '[low, high]'
@cache.cached_property
def variance(self):
return self.scale ** 2 / 12
@distribution.register_kl(Uniform, Uniform)
def _kl_uniform_uniform(dist1, dist2):
xp = backend.get_array_module(dist1.low)
is_inf = xp.logical_or(dist1.high.data > dist2.high.data,
dist1.low.data < dist2.low.data)
kl = (- exponential.log(dist1.high - dist1.low)
+ exponential.log(dist2.high - dist2.low))
inf = xp.array(xp.inf, dist1.high.dtype)
return where.where(is_inf, inf, kl)
| 4,642
| 28.01875
| 76
|
py
|
chainer
|
chainer-master/chainer/distributions/geometric.py
|
import chainer
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.math import exponential
from chainer.utils import cache
class Geometric(distribution.Distribution):
"""Geometric Distribution.
The probability mass function of the distribution is expressed as
.. math::
Pr(x = k) = p(1-p)^{k-1},
for k = 1, 2, 3, ...,
Args:
p(:class:`~chainer.Variable` or :ref:`ndarray`):
Parameter of distribution.
"""
def __init__(self, p):
super(Geometric, self).__init__()
self.__p = p
@cache.cached_property
def p(self):
return chainer.as_variable(self.__p)
@property
def batch_shape(self):
return self.p.shape
@property
def event_shape(self):
return ()
@property
def _is_gpu(self):
return isinstance(self.p.data, cuda.ndarray)
def log_prob(self, x):
return (x - 1) * exponential.log(1 - self.p) + exponential.log(self.p)
@cache.cached_property
def mean(self):
return 1 / self.p
@property
def params(self):
return {'p': self.p}
def sample_n(self, n):
xp = chainer.backend.get_array_module(self.p)
if xp is cuda.cupy:
eps = xp.random.geometric(
self.p.data,
size=(n,)+self.batch_shape, dtype=self.p.dtype)
else:
eps = xp.random.geometric(
self.p.data,
size=(n,)+self.batch_shape).astype(self.p.dtype)
return chainer.Variable(eps)
@property
def support(self):
return 'positive integer'
@cache.cached_property
def variance(self):
return (1 - self.p) / self.p ** 2
@distribution.register_kl(Geometric, Geometric)
def _kl_geometric_geometric(dist1, dist2):
return (
(1 / dist1.p - 1)
* (exponential.log(1 - dist1.p) - exponential.log(1 - dist2.p))
+ exponential.log(dist1.p)
- exponential.log(dist2.p))
| 2,020
| 23.646341
| 78
|
py
|
chainer
|
chainer-master/chainer/distributions/normal.py
|
import math
import numpy
import chainer
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.math import exponential
from chainer.functions.math import log_ndtr
from chainer.functions.math import ndtr
from chainer.functions.math import ndtri
from chainer.utils import argument
from chainer.utils import cache
import chainerx
ENTROPYC = 0.5 * math.log(2 * math.pi * math.e)
LOGPROBC = - 0.5 * math.log(2 * math.pi)
PROBC = 1. / (2 * math.pi) ** 0.5
class Normal(distribution.Distribution):
"""Normal Distribution.
The probability density function of the distribution is expressed as
.. math::
p(x;\\mu,\\sigma) = \\frac{1}{\\sqrt{2\\pi\\sigma^2}}
\\exp\\left(-\\frac{(x-\\mu)^2}{2\\sigma^2}\\right)
Args:
loc(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution representing the location :math:`\\mu`. This is the
mean parameter.
scale(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution representing the scale :math:`\\sigma`. Either `scale`
or `log_scale` (not both) must have a value.
log_scale(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution representing the scale :math:`\\log(\\sigma)`. Either
`scale` or `log_scale` (not both) must have a value.
"""
def __init__(self, loc, scale=None, **kwargs):
super(Normal, self).__init__()
log_scale = None
if kwargs:
log_scale, = argument.parse_kwargs(
kwargs, ('log_scale', log_scale))
if not (scale is None) ^ (log_scale is None):
raise ValueError(
'Either `scale` or `log_scale` (not both) must have a value.')
self.__loc = loc
self.__scale = scale
self.__log_scale = log_scale
if isinstance(loc, chainer.Variable):
self.__device = loc.device
else:
self.__device = chainer.backend.get_device_from_array(loc)
@cache.cached_property
def loc(self):
return chainer.as_variable(self.__loc)
@cache.cached_property
def scale(self):
if self.__scale is not None:
return chainer.as_variable(self.__scale)
else:
return exponential.exp(self.log_scale)
@cache.cached_property
def log_scale(self):
if self.__log_scale is not None:
return chainer.as_variable(self.__log_scale)
else:
return exponential.log(self.scale)
@property
def batch_shape(self):
return self.loc.shape
def cdf(self, x):
return ndtr.ndtr((x - self.loc) / self.scale)
@cache.cached_property
def entropy(self):
return self.log_scale + ENTROPYC
@property
def event_shape(self):
return ()
def icdf(self, x):
return self.loc + self.scale * ndtri.ndtri(x)
def log_cdf(self, x):
return log_ndtr.log_ndtr((x - self.loc) / self.scale)
def log_prob(self, x):
return (
LOGPROBC
- self.log_scale
- 0.5 * (x - self.loc) ** 2 / self.variance)
def log_survival_function(self, x):
return log_ndtr.log_ndtr((self.loc - x) / self.scale)
@cache.cached_property
def mean(self):
return self.loc
@property
def params(self):
return {'loc': self.loc, 'scale': self.scale}
def prob(self, x):
return (
(PROBC / self.scale)
* exponential.exp(
-0.5 * (x - self.loc) ** 2 / self.variance))
def sample_n(self, n):
dtype = self.loc.dtype
shape = (n,) + self.loc.shape
device = self.__device
if device.xp is cuda.cupy:
if dtype == numpy.float16:
# cuRAND supports only FP32 and FP64
eps = (
cuda.cupy.random.standard_normal(
shape, dtype=numpy.float32)
.astype(numpy.float16))
else:
eps = cuda.cupy.random.standard_normal(shape, dtype=dtype)
elif device.xp is chainerx:
# TODO(niboshi): Support random in ChainerX
eps = device.send(
numpy.random.standard_normal(shape).astype(dtype))
else:
eps = numpy.random.standard_normal(shape).astype(dtype)
return self.loc + self.scale * eps
@cache.cached_property
def stddev(self):
return self.scale
@property
def support(self):
return 'real'
def survival_function(self, x):
return ndtr.ndtr((self.loc - x) / self.scale)
@cache.cached_property
def variance(self):
return self.scale ** 2
@distribution.register_kl(Normal, Normal)
def _kl_normal_normal(dist1, dist2):
return (
dist2.log_scale
- dist1.log_scale
+ 0.5 * (dist1.variance + (dist1.loc - dist2.loc)**2) / dist2.variance
- 0.5)
| 5,011
| 28.656805
| 79
|
py
|
chainer
|
chainer-master/chainer/distributions/beta.py
|
import chainer
from chainer import backend
from chainer import distribution
from chainer.functions.array import where
from chainer.functions.math import digamma
from chainer.functions.math import exponential
from chainer.functions.math import lgamma
from chainer import utils
from chainer.utils import cache
def _lbeta(a, b):
return lgamma.lgamma(a) + lgamma.lgamma(b) - lgamma.lgamma(a + b)
class Beta(distribution.Distribution):
"""Beta Distribution.
The probability density function of the distribution is expressed as
.. math::
f(x) = \\frac{x^{\\alpha-1}(1-x)^{\\beta-1}}{B(\\alpha,\\beta)},
for :math:`0 < x < 1`, :math:`\\alpha > 0`, :math:`\\beta > 0`.
Args:
a(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution representing :math:`\\alpha`.
b(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution representing :math:`\\beta`.
"""
def __init__(self, a, b):
super(Beta, self).__init__()
self.__a = a
self.__b = b
@cache.cached_property
def a(self):
return chainer.as_variable(self.__a)
@cache.cached_property
def b(self):
return chainer.as_variable(self.__b)
@cache.cached_property
def _a_plus_b(self):
return self.a + self.b
@property
def batch_shape(self):
return self.a.shape
@cache.cached_property
def entropy(self):
apb = self._a_plus_b
return (
_lbeta(self.a, self.b)
- (self.a - 1) * digamma.digamma(self.a)
- (self.b - 1) * digamma.digamma(self.b)
+ (apb - 2) * digamma.digamma(apb))
@property
def event_shape(self):
return ()
def log_prob(self, x):
x = chainer.as_variable(x)
logp = (
(self.a - 1) * exponential.log(x)
+ (self.b - 1) * exponential.log(1 - x)
- _lbeta(self.a, self.b))
xp = logp.xp
return where.where(
utils.force_array((x.array >= 0) & (x.array <= 1)),
logp,
xp.array(-xp.inf, logp.dtype))
@cache.cached_property
def mean(self):
return self.a / self._a_plus_b
@property
def params(self):
return {'a': self.a, 'b': self.b}
def sample_n(self, n):
xp = backend.get_array_module(self.a)
eps = xp.random.beta(self.a.data, self.b.data, size=(n,)+self.a.shape)
noise = chainer.Variable(eps.astype(self.a.dtype))
return noise
@property
def support(self):
return '[0, 1]'
@cache.cached_property
def variance(self):
apb = self._a_plus_b
return self.a * self.b / apb ** 2 / (apb + 1)
@distribution.register_kl(Beta, Beta)
def _kl_beta_beta(dist1, dist2):
dist1_apb = dist1._a_plus_b
dist2_apb = dist2._a_plus_b
return (
- _lbeta(dist1.a, dist1.b)
+ _lbeta(dist2.a, dist2.b)
+ (dist1.a - dist2.a) * digamma.digamma(dist1.a)
+ (dist1.b - dist2.b) * digamma.digamma(dist1.b)
+ (dist2_apb - dist1_apb) * digamma.digamma(dist1_apb))
| 3,139
| 26.304348
| 78
|
py
|
chainer
|
chainer-master/chainer/distributions/one_hot_categorical.py
|
import chainer
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.math import exponential
import chainer.functions.math.sum as sum_mod
from chainer.utils import cache
def _stack(xp, xs, axis):
try:
return xp.stack(xs, axis)
except AttributeError:
# in case numpy<1.10, which does not have numpy.stack
return xp.concatenate(
[xp.expand_dims(x, axis) for x in xs],
axis=axis)
def _random_choice(xp, a, size, p):
try:
return xp.random.choice(a, size, p=p)
except ValueError:
# Validate the sum of the probabilities as NumPy PR #6131 (numpy>=1.10)
tol = xp.finfo(p.dtype).eps ** 0.5
p = p.astype(xp.float64)
xp.testing.assert_allclose(p.sum(), 1, rtol=0, atol=tol)
# Normalize the probabilities as they satisfy the validation above, and
# generate samples again
p /= p.sum()
return xp.random.choice(a, size, p=p)
class OneHotCategorical(distribution.Distribution):
"""OneHotCategorical Distribution.
Args:
p(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution.
"""
def __init__(self, p):
super(OneHotCategorical, self).__init__()
self.__p = p
@cache.cached_property
def p(self):
return chainer.as_variable(self.__p)
@cache.cached_property
def log_p(self):
return exponential.log(self.p)
@property
def batch_shape(self):
return self.p.shape[:-1]
@property
def event_shape(self):
return self.p.shape[-1:]
@property
def _is_gpu(self):
return isinstance(self.p.data, cuda.ndarray)
def log_prob(self, x):
return sum_mod.sum(self.log_p * x, axis=-1)
@cache.cached_property
def mean(self):
return self.p
@property
def params(self):
return {'p': self.p}
def sample_n(self, n):
xp = chainer.backend.get_array_module(self.p)
obo_p = self.p.data.reshape((-1,) + self.event_shape)
eye = xp.eye(self.event_shape[0], dtype=self.p.dtype)
eps = [_random_choice(xp, one_p.shape[0], size=(n,), p=one_p)
for one_p in obo_p]
eps = _stack(xp, eps, axis=1).reshape((n,)+self.batch_shape)
eps = eye[eps]
noise = chainer.Variable(eps)
return noise
@cache.cached_property
def variance(self):
return self.p * (1. - self.p)
@distribution.register_kl(OneHotCategorical, OneHotCategorical)
def _kl_one_hot_categorical_one_hot_categorical(dist1, dist2):
return sum_mod.sum(dist1.p * (dist1.log_p - dist2.log_p), axis=-1)
| 2,684
| 26.680412
| 79
|
py
|
chainer
|
chainer-master/chainer/distributions/log_normal.py
|
import math
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.math import exponential
from chainer.utils import cache
LOGPROBC = - 0.5 * math.log(2 * math.pi)
class LogNormal(distribution.Distribution):
"""Logatithm Normal Distribution.
The probability density function of the distribution is expressed as
.. math::
p(x;\\mu,\\sigma) = \\frac{1}{\\sqrt{2\\pi\\sigma^2}x}
\\exp\\left(-\\frac{(\\log{x}-\\mu)^2}{2\\sigma^2}\\right)
Args:
mu(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution :math:`\\mu`.
sigma(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution :math:`\\sigma`.
"""
def __init__(self, mu, sigma):
self.__mu = mu
self.__sigma = sigma
@cache.cached_property
def mu(self):
return chainer.as_variable(self.__mu)
@cache.cached_property
def sigma(self):
return chainer.as_variable(self.__sigma)
@cache.cached_property
def _log_sigma(self):
return exponential.log(self.sigma)
@property
def batch_shape(self):
return self.mu.shape
@cache.cached_property
def entropy(self):
return 0.5 - LOGPROBC + self._log_sigma + self.mu
@property
def event_shape(self):
return ()
def log_prob(self, x):
logx = exponential.log(x)
return LOGPROBC - self._log_sigma - logx \
- (0.5 * (logx - self.mu) ** 2 / self.sigma ** 2)
@cache.cached_property
def mean(self):
return exponential.exp(self.mu + 0.5 * self.sigma ** 2)
@property
def params(self):
return {'mu': self.mu, 'sigma': self.sigma}
def sample_n(self, n):
xp = backend.get_array_module(self.mu)
if xp is cuda.cupy:
eps = xp.random.standard_normal(
(n,)+self.mu.shape, dtype=self.mu.dtype)
else:
eps = xp.random.standard_normal(
(n,)+self.mu.shape).astype(self.mu.dtype)
noise = self.sigma * eps
noise += self.mu
return exponential.exp(noise)
@property
def support(self):
return 'positive'
@cache.cached_property
def variance(self):
return (
exponential.exp(2 * self.mu + self.sigma ** 2)
* (exponential.exp(self.sigma ** 2) - 1))
@distribution.register_kl(LogNormal, LogNormal)
def _kl_log_normal_log_normal(dist1, dist2):
return (
0.5 * ((dist1.mu - dist2.mu)**2 + dist1.sigma**2) / dist2.sigma**2
- 0.5
+ dist2._log_sigma
- dist1._log_sigma)
| 2,707
| 25.038462
| 74
|
py
|
chainer
|
chainer-master/chainer/distributions/utils.py
|
import warnings
import chainer
from chainer.functions.array import where
from chainer.functions.math import exponential
from chainer import utils
class ModifiedXLogX(chainer.function_node.FunctionNode):
def __init__(self, logx):
self._logx = logx
def forward(self, inputs):
x, = inputs
self.x_zero = utils.force_array(x == 0)
y = utils.force_array(x * self._logx.array)
y[self.x_zero] = 0.
return y,
def backward(self, indexes, grad_outputs):
if self.x_zero.any():
warnings.warn(
'cannot calculate gradient for zero input.',
RuntimeWarning)
gy, = grad_outputs
dx = (1 + self._logx) * (1 - self.x_zero)
return gy * dx,
def _modified_xlogx(x):
x = chainer.as_variable(x)
xp = x.xp
return ModifiedXLogX(
exponential.log(
where.where(
utils.force_array(x.array > 0),
x,
xp.ones_like(x.array)))).apply((x,))[0]
| 1,029
| 25.410256
| 60
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.