repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
chainer
|
chainer-master/chainer/functions/pooling/roi_average_align_2d.py
|
# Modified work:
# -----------------------------------------------------------------------------
# Copyright (c) 2018 Preferred Infrastructure, Inc.
# Copyright (c) 2018 Preferred Networks, Inc.
# -----------------------------------------------------------------------------
# Original work:
# -----------------------------------------------------------------------------
# Copyright (c) 2015 by Contributors
# \file roi_pooling.cu
# \brief roi pooling operator
# \author Ross Girshick, Kye-Hyeon Kim, Jian Guo
# \changed to roi_align by Elaine Bao
# \file roi_align.cu
# \roi align operator described in Mask RCNN
# -----------------------------------------------------------------------------
import numbers
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import function
from chainer import utils
from chainer.utils import type_check
def _pair(x):
if isinstance(x, chainer.utils.collections_abc.Iterable):
return x
return x, x
def _get_bounds(p, limit):
if p < -1 or p > limit:
# out of range, so it is empty
return None, None, None
low = int(numpy.floor(p))
if low == limit:
low = low - 1
high = low + 1
if low <= -1:
p = 0
elif high >= limit:
p = limit - 1
return p, low, high
def _get_bilinear_interp_params(y, x, y_low, x_low, y_high, x_high):
ly = y - y_low
lx = x - x_low
hy = y_high - y
hx = x_high - x
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
return w1, w2, w3, w4
_GET_BILINEAR_INTERP_KERNEL = '''
__device__
bool get_bounds(
T &p, const int limit, int &low, int &high) {
if (p < -1. || p > limit) {
// empty
return false;
}
low = (int)floor(p);
if (low == limit) {
low = low - 1;
}
high = low + 1;
if (low <= -1) {
p = (T) 0.0;
} else if (high >= limit) {
p = (T) (limit - 1);
}
return true;
}
__device__
void get_bilinear_interp_params(
T y, T x, int y_low, int x_low, int y_high, int x_high,
T &w1, T &w2, T &w3, T &w4) {
T ly = y - y_low;
T lx = x - x_low;
T hy = y_high - y;
T hx = x_high - x;
w1 = hy * hx;
w2 = hy * lx;
w3 = ly * hx;
w4 = ly * lx;
}
'''
class ROIAverageAlign2D(function.Function):
"""ROI average align over a set of 2d planes."""
def __init__(self, outsize, spatial_scale, sampling_ratio=None):
outh, outw = _pair(outsize)
if not (isinstance(outh, numbers.Integral) and outh > 0):
raise TypeError(
'outsize[0] must be positive integer: {}, {}'
.format(type(outh), outh))
if not (isinstance(outw, numbers.Integral) and outw > 0):
raise TypeError(
'outsize[1] must be positive integer: {}, {}'
.format(type(outw), outw))
if isinstance(spatial_scale, numbers.Integral):
spatial_scale = float(spatial_scale)
if not (isinstance(spatial_scale, numbers.Real) and
spatial_scale > 0):
raise TypeError(
'spatial_scale must be a positive float number: {}, {}'
.format(type(spatial_scale), spatial_scale))
sampling_ratio = _pair(sampling_ratio)
if not all((isinstance(s, numbers.Integral) and s >= 1) or
s is None for s in sampling_ratio):
raise TypeError(
'sampling_ratio must be integer >= 1 or a pair of it: {}'
.format(sampling_ratio))
self.outh, self.outw = outh, outw
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, roi_type, roi_index_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim == 4,
roi_type.dtype == numpy.float32,
roi_type.ndim == 2,
roi_type.shape[1] == 4,
roi_index_type.dtype == numpy.int32,
roi_index_type.ndim == 1,
roi_type.shape[0] == roi_index_type.shape[0],
)
def forward_cpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channels, height, width = bottom_data.shape[1:]
n_rois = bottom_rois.shape[0]
top_data = numpy.empty((n_rois, channels, self.outh,
self.outw), dtype=bottom_data.dtype)
pooled_width, pooled_height = self.outw, self.outh
spatial_scale = self.spatial_scale
for i in six.moves.range(top_data.size):
pw = i % pooled_width
ph = int(i / pooled_width) % pooled_height
c = int(i / pooled_width / pooled_height) % channels
n = int(i / pooled_width / pooled_height / channels)
roi_batch_ind = int(bottom_roi_indices[n])
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 1.)
roi_width = max(roi_end_w - roi_start_w, 1.)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
if self.sampling_ratio[0] is None:
roi_bin_grid_h = int(numpy.ceil(roi_height / pooled_height))
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = int(numpy.ceil(roi_width / pooled_width))
else:
roi_bin_grid_w = self.sampling_ratio[1]
count = roi_bin_grid_h * roi_bin_grid_w
output_val = 0.
for iy in six.moves.range(roi_bin_grid_h):
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
for ix in six.moves.range(roi_bin_grid_w):
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
# bilinear interpolation {{
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
if w1 > 0 and y_low >= 0 and x_low >= 0:
v1 = bottom_data[roi_batch_ind, c, y_low, x_low]
output_val += w1 * v1
if w2 > 0 and y_low >= 0 and x_high <= width - 1:
v2 = bottom_data[roi_batch_ind, c, y_low, x_high]
output_val += w2 * v2
if w3 > 0 and y_high <= height - 1 and x_low >= 0:
v3 = bottom_data[roi_batch_ind, c, y_high, x_low]
output_val += w3 * v3
if w4 > 0 and y_high <= height - 1 and x_high <= width - 1:
v4 = bottom_data[roi_batch_ind, c, y_high, x_high]
output_val += w4 * v4
# }}
output_val /= count
top_data[n, c, ph, pw] = output_val
return top_data,
def forward_gpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channels, height, width = bottom_data.shape[1:]
n_rois = bottom_rois.shape[0]
top_data = cuda.cupy.empty((n_rois, channels, self.outh,
self.outw), dtype=bottom_data.dtype)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T bottom_data, T spatial_scale, int32 channels,
int32 height, int32 width, int32 pooled_height, int32 pooled_width,
int32 sampling_ratio_h, int32 sampling_ratio_w,
raw T bottom_rois, raw int32 bottom_roi_indices
''',
'T top_data',
'''
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T bin_size_h = static_cast<T>(roi_height)
/ static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width)
/ static_cast<T>(pooled_width);
int bottom_data_offset =
(roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g. iy = 0, 1
{
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
int y_low, y_high;
bool y_ret = get_bounds(y, height, y_low, y_high);
if (!y_ret) continue;
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
int x_low, x_high;
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret) continue;
// bilinear_interpolation_gradient {{
T w1, w2, w3, w4;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
if (w1 > 0 && y_low >= 0 && x_low >= 0) {
T v1 = bottom_data[
bottom_data_offset + y_low * width + x_low];
output_val += w1 * v1;
}
if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {
T v2 = bottom_data[
bottom_data_offset + y_low * width + x_high];
output_val += w2 * v2;
}
if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {
T v3 = bottom_data[
bottom_data_offset + y_high * width + x_low];
output_val += w3 * v3;
}
if (w4 > 0 && y_high <= height - 1 &&
x_high <= width - 1) {
T v4 = bottom_data[
bottom_data_offset + y_high * width + x_high];
output_val += w4 * v4;
}
// }}
}
}
output_val /= count;
top_data = output_val;
''',
'roi_average_align_2d_fwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(bottom_data, self.spatial_scale, channels, height, width,
self.outh, self.outw, sampling_ratio_h, sampling_ratio_w,
bottom_rois, bottom_roi_indices, top_data)
return top_data,
def backward_cpu(self, inputs, gy):
bottom_rois, bottom_roi_indices = inputs[1:]
channels, height, width = self._bottom_data_shape[1:]
bottom_diff = numpy.zeros(self._bottom_data_shape, gy[0].dtype)
spatial_scale = self.spatial_scale
pooled_height = self.outh
pooled_width = self.outw
top_diff = gy[0]
for i in six.moves.range(top_diff.size):
pw = i % pooled_width
ph = int(i / pooled_width) % pooled_height
c = int(i / pooled_width / pooled_height) % channels
n = int(i / pooled_width / pooled_height / channels)
roi_batch_ind = int(bottom_roi_indices[n])
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 1.)
roi_width = max(roi_end_w - roi_start_w, 1.)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
top_diff_this_bin = top_diff[n, c, ph, pw]
if self.sampling_ratio[0] is None:
roi_bin_grid_h = int(numpy.ceil(roi_height / pooled_height))
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = int(numpy.ceil(roi_width / pooled_width))
else:
roi_bin_grid_w = self.sampling_ratio[1]
count = roi_bin_grid_h * roi_bin_grid_w
for iy in six.moves.range(roi_bin_grid_h):
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
for ix in six.moves.range(roi_bin_grid_w):
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
# bilinear_interpolation_gradient {{
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
if w1 > 0 and y_low >= 0 and x_low >= 0:
g1 = top_diff_this_bin * w1 / count
bottom_diff[roi_batch_ind, c, y_low, x_low] += g1
if w2 > 0 and y_low >= 0 and x_high <= width - 1:
g2 = top_diff_this_bin * w2 / count
bottom_diff[roi_batch_ind, c, y_low, x_high] += g2
if w3 > 0 and y_high <= height - 1 and x_low >= 0:
g3 = top_diff_this_bin * w3 / count
bottom_diff[roi_batch_ind, c, y_high, x_low] += g3
if w4 > 0 and y_high <= height - 1 and x_high <= width - 1:
g4 = top_diff_this_bin * w4 / count
bottom_diff[roi_batch_ind, c, y_high, x_high] += g4
# }}
return bottom_diff, None, None
def backward_gpu(self, inputs, gy):
utils.nondeterministic('atomicAdd')
bottom_rois, bottom_roi_indices = inputs[1:]
channels, height, width = self._bottom_data_shape[1:]
bottom_diff = cuda.cupy.zeros(self._bottom_data_shape, gy[0].dtype)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T top_diff,
int32 num_rois, T spatial_scale,
int32 channels, int32 height, int32 width,
int32 pooled_height, int32 pooled_width,
int32 sampling_ratio_h, int32 sampling_ratio_w,
raw T bottom_rois, raw int32 bottom_roi_indices
''',
'raw T bottom_diff',
'''
// (n, c, h, w) coords in bottom data
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
// Do not using rounding; this implementation detail is critical
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) /
static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) /
static_cast<T>(pooled_width);
int bottom_diff_offset =
(roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
T top_diff_this_bin =
top_diff[top_offset + ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
int y_low, y_high;
bool y_ret = get_bounds(y, height, y_low, y_high);
if (!y_ret) continue;
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
int x_low, x_high;
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret) continue;
// bilinear_interpolation_gradient {{
T w1, w2, w3, w4;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
if (w1 > 0 && y_low >= 0 && x_low >= 0) {
T g1 = top_diff_this_bin * w1 / count;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_low * width + x_low], g1);
}
if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {
T g2 = top_diff_this_bin * w2 / count;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_low * width + x_high], g2);
}
if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {
T g3 = top_diff_this_bin * w3 / count;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_high * width + x_low], g3);
}
if (w4 > 0 && y_high <= height - 1 &&
x_high <= width - 1) {
T g4 = top_diff_this_bin * w4 / count;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_high * width + x_high], g4);
}
// }}
}
}
''',
'roi_average_align_2d_bwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(gy[0], bottom_rois.shape[0],
self.spatial_scale, channels, height, width, self.outh, self.outw,
sampling_ratio_h, sampling_ratio_w, bottom_rois, bottom_roi_indices,
bottom_diff, size=gy[0].size)
return bottom_diff, None, None
def roi_average_align_2d(
x, rois, roi_indices, outsize, spatial_scale, sampling_ratio=None
):
"""Spatial Region of Interest (ROI) average align function.
This function acts similarly to
:func:`~chainer.functions.roi_average_pooling_2d`, but it computes average
of input spatial patch with bilinear interpolation for each channel with
the region of interest.
Args:
x (~chainer.Variable): Input variable. The shape is expected to be
4 dimensional: ``(n: batch, c: channel, h, height, w: width)``.
rois (~chainer.Variable): Input roi variable. The shape is expected to
be ``(n: data size, 4)``, and each datum is set as below:
``(y_min, x_min, y_max, x_max)``.
roi_indices (~chainer.Variable): Input roi variable. The shape is
expected to be ``(n: data size, )``.
outsize ((int, int) or int): Expected output size after pooled
(height, width). ``outsize=o`` and ``outsize=(o, o)``
are equivalent.
spatial_scale (float): Scale of the roi is resized.
sampling_ratio ((int, int) or int): Sampling step for the alignment.
It must be an integer over :math:`1` or :obj:`None`, and the value
is automatically decided when :obj:`None` is passed. Use of
different ratio in height and width axis is also supported by
passing tuple of int as ``(sampling_ratio_h, sampling_ratio_w)``.
``sampling_ratio=s`` and ``sampling_ratio=(s, s)`` are equivalent.
Returns:
~chainer.Variable: Output variable.
See the original paper proposing ROIAlign:
`Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.
"""
return ROIAverageAlign2D(outsize, spatial_scale, sampling_ratio)(
x, rois, roi_indices)
| 23,553
| 39.332192
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/pooling/average_pooling_2d.py
|
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import function_node
from chainer.functions.pooling import average_pooling_nd
from chainer.functions.pooling import pooling_2d
from chainer.utils import conv
import chainerx
class AveragePooling2D(pooling_2d.Pooling2D):
"""Average pooling over a set of 2d planes."""
# TODO(beam2d): Support cover_all mode.
def forward_cpu(self, x):
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(x)):
return self._forward_ideep(x)
self._in_shape = x[0].shape
self._in_dtype = x[0].dtype
col = conv.im2col_cpu(x[0], self.kh, self.kw, self.sy, self.sx,
self.ph, self.pw)
y = col.mean(axis=(2, 3))
return y,
def _forward_ideep(self, x):
self._in_shape = x[0].shape
self._in_dtype = x[0].dtype
self.retain_inputs((0,))
n, c, h, w = x[0].shape
y_h = conv.get_conv_outsize(
h, self.kh, self.sy, self.ph, self.cover_all)
assert y_h > 0, 'Height in the output should be positive.'
y_w = conv.get_conv_outsize(
w, self.kw, self.sx, self.pw, self.cover_all)
assert y_w > 0, 'Width in the output should be positive.'
pd = self.sy * (y_h - 1) + self.kh - h - self.ph
pr = self.sx * (y_w - 1) + self.kw - w - self.pw
pp = intel64.ideep.pooling2DParam(
(n, c, y_h, y_w),
self.kh, self.kw,
self.sy, self.sx,
self.ph, self.pw,
pd, pr,
intel64.ideep.pooling2DParam.pooling_avg_include_padding)
y, = intel64.ideep.pooling2D.Forward(intel64.ideep.array(x[0]), pp)
return y,
def forward_gpu(self, x):
if chainer.should_use_cudnn('>=auto'):
self.retain_inputs((0,))
return super(AveragePooling2D, self).forward_gpu(x)
self._in_shape = x[0].shape
self._in_dtype = x[0].dtype
n, c, h, w = x[0].shape
y_h = conv.get_conv_outsize(h, self.kh, self.sy, self.ph)
y_w = conv.get_conv_outsize(w, self.kw, self.sx, self.pw)
y = cuda.cupy.empty((n, c, y_h, y_w), dtype=x[0].dtype)
coeff = 1. / (self.kh * self.kw)
kern = cuda.elementwise(
'raw T in, int32 h, int32 w,'
'int32 out_h, int32 out_w, int32 kh, int32 kw,'
'int32 sy, int32 sx, int32 ph, int32 pw, T coeff',
'T out', '''
int c0 = i / (out_h * out_w);
int out_y = i / out_w % out_h;
int out_x = i % out_w;
int in_y_0 = max(0, out_y * sy - ph);
int in_y_1 = min(h, out_y * sy + kh - ph);
int in_x_0 = max(0, out_x * sx - pw);
int in_x_1 = min(w, out_x * sx + kw - pw);
T val = 0;
for (int y = in_y_0; y < in_y_1; ++y) {
int offset_y = w * (y + h * c0);
for (int x = in_x_0; x < in_x_1; ++x) {
val = val + in[x + offset_y];
}
}
out = val * coeff;
''', 'avg_pool_fwd')
kern(x[0].reduced_view(), h, w, y_h, y_w, self.kh, self.kw,
self.sy, self.sx, self.ph, self.pw, coeff, y)
return y,
def backward(self, indexes, gy):
return AveragePooling2DGrad(self).apply(gy)
def _get_pool_mode(self):
return cuda.cuda.cudnn.CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING
class AveragePooling2DGrad(function_node.FunctionNode):
def __init__(self, apool2d):
self.kh = apool2d.kh
self.kw = apool2d.kw
self.sy = apool2d.sy
self.sx = apool2d.sx
self.ph = apool2d.ph
self.pw = apool2d.pw
self._used_cudnn = apool2d._used_cudnn
if not self._used_cudnn:
self._in_shape = apool2d._in_shape
self._in_dtype = apool2d._in_dtype
self.apool2d = apool2d
def forward_cpu(self, gy):
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(gy)):
return self._forward_ideep(gy)
h, w = self._in_shape[2:]
gcol = numpy.tile(gy[0][:, :, None, None],
(1, 1, self.kh, self.kw, 1, 1))
gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)
gx /= self.kh * self.kw
return gx,
def _forward_ideep(self, gy):
n, c, h, w = self._in_shape
y_h, y_w = gy[0].shape[2:]
x, = self.apool2d.get_retained_inputs()
pd = self.sy * (y_h - 1) + self.kh - h - self.ph
pr = self.sx * (y_w - 1) + self.kw - w - self.pw
pp = intel64.ideep.pooling2DParam(
self._in_shape,
self.kh, self.kw,
self.sy, self.sx,
self.ph, self.pw,
pd, pr,
intel64.ideep.pooling2DParam.pooling_avg_include_padding)
gx = intel64.ideep.pooling2D.Backward(
intel64.ideep.array(x.data),
intel64.ideep.array(gy[0]), None, pp)
return gx,
def forward_gpu(self, gy):
if self._used_cudnn:
x, = self.apool2d.get_retained_inputs()
return self.apool2d.backward_gpu((x.data,), gy)
n, c, h, w = self._in_shape
y_h, y_w = gy[0].shape[2:]
gx = cuda.cupy.empty(self._in_shape, self._in_dtype)
coeff = 1. / (self.kh * self.kw)
cuda.elementwise(
'raw T gy, int32 h, int32 w,'
'int32 out_h, int32 out_w, int32 kh, int32 kw,'
'int32 sy, int32 sx, int32 ph, int32 pw, T coeff',
'T gx',
'''
int c0 = i / (h * w);
int y = i / w % h + ph;
int x = i % w + pw;
int out_y_0 = max(0, (y - kh + sy) / sy);
int out_y_1 = min(out_h, (y + sy) / sy);
int out_x_0 = max(0, (x - kw + sx) / sx);
int out_x_1 = min(out_w, (x + sx) / sx);
int hc0 = out_h * c0;
T val = 0;
for (int out_y = out_y_0; out_y < out_y_1; ++out_y) {
for (int out_x = out_x_0; out_x < out_x_1; ++out_x) {
val = val + gy[out_x + out_w * (out_y + hc0)];
}
}
gx = val * coeff;
''', 'avg_pool_bwd')(gy[0].reduced_view(),
h, w, y_h, y_w, self.kh, self.kw,
self.sy, self.sx, self.ph, self.pw, coeff,
gx)
return gx,
def backward(self, indexes, grad_outputs):
return AveragePooling2D(
(self.kh, self.kw), (self.sy, self.sx), (self.ph, self.pw),
False).apply(grad_outputs)
def average_pooling_2d(x, ksize, stride=None, pad=0):
"""Spatial average pooling function.
This function acts similarly to :func:`~chainer.functions.convolution_2d`,
but it computes the average of input spatial patch for each channel without
any parameter instead of computing the inner products.
Args:
x (~chainer.Variable): Input variable.
ksize (int or pair of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k)`` are equivalent.
stride (int or pair of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s)`` are equivalent. If ``None`` is
specified, then it uses same stride as the pooling window size.
pad (int or pair of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p)`` are equivalent.
Returns:
~chainer.Variable: Output variable.
.. note::
This function currently does not support ``cover_all`` mode as
:func:`max_pooling_2d`. Average pooling runs in non-cover-all mode.
.. note::
The values in the padded region is treated as 0, leading the averages
biased towards zero.
To obtain unbiased averages, use :func:`average_pooling_nd` with
``pad_value=None``.
"""
if backend.get_array_module(x) is chainerx:
return average_pooling_nd.average_pooling_nd(x, ksize, stride, pad)
return AveragePooling2D(ksize, stride, pad, False).apply((x,))[0]
| 8,383
| 35.77193
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/pooling/upsampling_2d.py
|
import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer.functions.pooling import pooling_2d
from chainer.utils import conv
from chainer.utils import type_check
class Upsampling2D(pooling_2d.Pooling2D):
"""Upsampling over a set of 2d planes w/ indices used for max pooling."""
def __init__(self, indexes, ksize, stride=None, pad=0, outsize=None,
cover_all=True):
super(Upsampling2D, self).__init__(ksize, stride, pad, cover_all)
self.indexes = indexes
self.outh, self.outw = (None, None) if outsize is None else outsize
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4,
x_type.shape == self.indexes.shape,
)
if self.outh is not None:
expected_h = conv.get_conv_outsize(
self.outh, self.kh, self.sy, self.ph, cover_all=self.cover_all)
type_check.expect(x_type.shape[2] == expected_h)
if self.outw is not None:
expected_w = conv.get_conv_outsize(
self.outw, self.kw, self.sx, self.pw, cover_all=self.cover_all)
type_check.expect(x_type.shape[3] == expected_w)
def forward_cpu(self, x):
self._in_dtype = x[0].dtype
n, c, h, w = x[0].shape
if self.outh is None:
self.outh = conv.get_deconv_outsize(
h, self.kh, self.sy, self.ph, cover_all=self.cover_all)
if self.outw is None:
self.outw = conv.get_deconv_outsize(
w, self.kw, self.sx, self.pw, cover_all=self.cover_all)
up_y = numpy.zeros((n, c, self.outh, self.outw), dtype=self._in_dtype)
up_y = conv.im2col_cpu(
up_y, self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all).transpose(0, 1, 4, 5, 2, 3)
colh, colw = up_y.shape[2:4]
up_y = up_y.reshape(-1, self.kh * self.kw)
indexes = self.indexes.ravel()
up_y[numpy.arange(len(indexes)), indexes] = x[0].ravel()
up_y = up_y.reshape(n, c, colh, colw, self.kh, self.kw)
up_y = conv.col2im_cpu(
up_y.transpose(0, 1, 4, 5, 2, 3), self.sy, self.sx, self.ph,
self.pw, self.outh, self.outw)
return up_y,
def forward_gpu(self, x):
self._in_dtype = x[0].dtype
xp = cuda.cupy
n, c, h, w = x[0].shape
if self.outh is None:
self.outh = conv.get_deconv_outsize(
h, self.kh, self.sy, self.ph, cover_all=self.cover_all)
if self.outw is None:
self.outw = conv.get_deconv_outsize(
w, self.kw, self.sx, self.pw, cover_all=self.cover_all)
up_y = xp.zeros((n, c, self.outh, self.outw), dtype=self._in_dtype)
up_y = conv.im2col_gpu(
up_y, self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all)
up_y = up_y.transpose(0, 1, 4, 5, 2, 3)
n, c, oy, ox, ky, kx = up_y.shape
indexes = xp.asarray(self.indexes, dtype=numpy.int32)
cuda.elementwise(
'int32 index, T x, int32 n, int32 c, int32 oy, int32 ox,'
'int32 ky, int32 kx', 'raw T up_y',
'''
int yn = i / c / oy / ox;
int yc = (i / oy / ox) % c;
int yoy = (i / ox) % oy;
int yox = i % ox;
up_y[yn * c * oy * ox * ky * kx +
yc * oy * ox * ky * kx +
yoy * ox * ky * kx +
yox * ky * kx +
index] = x;
''',
'upsampling_2d_fwd')(indexes, x[0], n, c, oy, ox, ky, kx, up_y)
up_y = up_y.transpose(0, 1, 4, 5, 2, 3)
up_y = conv.col2im_gpu(up_y, self.sy, self.sx, self.ph, self.pw,
self.outh, self.outw)
return up_y,
def backward(self, indexes, grad_outputs):
return Upsampling2DGrad(self).apply(grad_outputs)
class Upsampling2DGrad(function_node.FunctionNode):
def __init__(self, upsampling2d):
self.kh = upsampling2d.kh
self.kw = upsampling2d.kw
self.sy = upsampling2d.sy
self.sx = upsampling2d.sx
self.ph = upsampling2d.ph
self.pw = upsampling2d.pw
self.outh = upsampling2d.outh
self.outw = upsampling2d.outw
self.cover_all = upsampling2d.cover_all
self.indexes = upsampling2d.indexes
self._in_dtype = upsampling2d._in_dtype
def forward_cpu(self, gy):
gcol = conv.im2col_cpu(
gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all)
n, c, kh, kw, out_h, out_w = gcol.shape
gcol = gcol.transpose(0, 1, 4, 5, 2, 3).reshape(-1, kh * kw)
indexes = self.indexes.ravel()
gx = gcol[numpy.arange(len(indexes)), indexes]
return gx.reshape(n, c, out_h, out_w),
def forward_gpu(self, gy):
xp = cuda.cupy
gcol = conv.im2col_gpu(
gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all)
gcol = gcol.transpose(0, 1, 4, 5, 2, 3)
n, c, oy, ox, ky, kx = gcol.shape
gcol = gcol.reshape((n, c, oy, ox, ky * kx))
indexes = xp.asarray(self.indexes, dtype=numpy.int32)
gx = xp.empty((n, c, oy, ox), dtype=self._in_dtype)
cuda.elementwise(
'int32 indexes, raw T gcol, int32 n, int32 c, int32 oy,'
'int32 ox, int32 ky, int32 kx',
'raw T gx',
'''
int ind_n = i / c / oy / ox;
int ind_c = (i / oy / ox) % c;
int ind_oy = (i / ox) % oy;
int ind_ox = i % ox;
int gcol_ky = indexes / kx;
int gcol_kx = indexes % kx;
float top_gx = gcol[ind_n * c * oy * ox * ky * kx +
ind_c * oy * ox * ky * kx +
ind_oy * ox * ky * kx +
ind_ox * ky * kx +
gcol_ky * kx +
gcol_kx];
gx[ind_n * c * oy * ox +
ind_c * oy * ox +
ind_oy * ox +
ind_ox] = top_gx;
''',
'upsampling_2d_bwd')(indexes, gcol, n, c, oy, ox, ky, kx, gx)
return gx,
def backward(self, indexes, ggx):
return Upsampling2D(
self.indexes, (self.kh, self.kw), (self.sy, self.sx),
(self.ph, self.pw), (self.outh, self.outw),
self.cover_all).apply(ggx)
def upsampling_2d(
x, indexes, ksize, stride=None, pad=0, outsize=None, cover_all=True):
"""Upsampling using pooling indices.
This function produces an upsampled image using pooling indices.
.. admonition:: Example
>>> x = np.arange(1, 37).reshape(1, 1, 6, 6).astype(np.float32)
>>> x = chainer.Variable(x)
>>> x.array
array([[[[ 1., 2., 3., 4., 5., 6.],
[ 7., 8., 9., 10., 11., 12.],
[13., 14., 15., 16., 17., 18.],
[19., 20., 21., 22., 23., 24.],
[25., 26., 27., 28., 29., 30.],
[31., 32., 33., 34., 35., 36.]]]], dtype=float32)
This is the original ``x`` before max pooling.
>>> pooled_x, indexes = F.max_pooling_2d(
... x, ksize=2, stride=2, return_indices=True)
>>> pooled_x.array
array([[[[ 8., 10., 12.],
[20., 22., 24.],
[32., 34., 36.]]]], dtype=float32)
>>> indexes
array([[[[3, 3, 3],
[3, 3, 3],
[3, 3, 3]]]])
These are the outputs from the max pooling operation including the
resulting indices that will be used to upsample ``pooled_x``. Note
that the indices all point to the largest, in the case the last,
elements in each window.
>>> upsampled_x = F.upsampling_2d(
... pooled_x, indexes, ksize=2, stride=2, outsize=x.shape[2:])
>>> upsampled_x.shape
(1, 1, 6, 6)
>>> upsampled_x.array
array([[[[ 0., 0., 0., 0., 0., 0.],
[ 0., 8., 0., 10., 0., 12.],
[ 0., 0., 0., 0., 0., 0.],
[ 0., 20., 0., 22., 0., 24.],
[ 0., 0., 0., 0., 0., 0.],
[ 0., 32., 0., 34., 0., 36.]]]], dtype=float32)
Args:
x (~chainer.Variable): Input variable.
indexes (:ref:`ndarray`): Index array returned from
preceding call to :meth:`~chainer.functions.max_pooling_2d`.
ksize (int or pair of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k)`` are equivalent.
stride (int or pair of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s)`` are equivalent. If ``None`` is
specified, then it uses same stride as the pooling window size.
pad (int or pair of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p)`` are equivalent.
outsize ((int, int)): Expected output size (height, width).
cover_all (bool): Should be set to ``True`` if all spatial locations
were pooled into some output pixels during the preceding pooling
operation. ``False`` otherwise. See
:meth:`~chainer.functions.max_pooling_2d`.
Returns:
~chainer.Variable: Output variable.
"""
return Upsampling2D(
indexes, ksize, stride, pad, outsize, cover_all).apply((x,))[0]
| 9,757
| 38.666667
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/pooling/unpooling_nd.py
|
import numpy
import six
from chainer import backend
from chainer import function_node
from chainer.functions.pooling import pooling_nd
from chainer.utils import conv
from chainer.utils import conv_nd
from chainer.utils import type_check
class UnpoolingND(pooling_nd._PoolingND):
"""Unpooling over a set of N-dimensional planes.
.. warning::
This feature is experimental. The interface can change in the future.
"""
def __init__(self, ndim, ksize, stride=None, pad=0, outsize=None,
cover_all=True):
super(UnpoolingND, self).__init__(ndim, ksize, stride, pad, cover_all)
self.outs = None if outsize is None else outsize
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 2 + self.ndim,
)
if self.outs is not None:
expected_dims = tuple(
conv.get_conv_outsize(out, k, s, p, cover_all=self.cover_all)
for (out, k, s, p)
in six.moves.zip(self.outs, self.ksize, self.stride, self.pad))
type_check.expect(x_type.shape[2:] == expected_dims)
def forward(self, x):
self.retain_inputs(())
dims = x[0].shape[2:]
ndim = self.ndim
ksize = self.ksize
stride = self.stride
pad = self.pad
if self.outs is None:
self.outs = tuple(
conv.get_deconv_outsize(d, k, s, p, cover_all=self.cover_all)
for (d, k, s, p) in six.moves.zip(dims, ksize, stride, pad))
xp = backend.get_array_module(*x)
colon = slice(None)
# (:, :, None, None, ..., None)
tile_index = (colon, colon) + (None,) * ndim
# (1, 1, k_1, k_2, ..., k_n, 1, 1, ..., 1)
tile_reps = (1, 1) + ksize + (1,) * ndim
col = xp.tile(x[0][tile_index], tile_reps)
if xp is numpy:
col2im_nd = conv_nd.col2im_nd_cpu
else:
col2im_nd = conv_nd.col2im_nd_gpu
y = col2im_nd(col, stride, pad, self.outs)
return y,
def backward(self, indexes, grad_outputs):
return UnpoolingNDGrad(self).apply(grad_outputs)
class UnpoolingNDGrad(function_node.FunctionNode):
def __init__(self, unpoolingnd):
self.ndim = unpoolingnd.ndim
self.ksize = unpoolingnd.ksize
self.stride = unpoolingnd.stride
self.pad = unpoolingnd.pad
self.outs = unpoolingnd.outs
self.cover_all = unpoolingnd.cover_all
def forward(self, gy):
xp = backend.get_array_module(*gy)
if xp is numpy:
im2col_nd = conv_nd.im2col_nd_cpu
else:
im2col_nd = conv_nd.im2col_nd_gpu
gcol = im2col_nd(
gy[0], self.ksize, self.stride, self.pad, cover_all=self.cover_all)
gcol_axis = tuple(six.moves.range(2, 2 + self.ndim))
gx = gcol.sum(axis=gcol_axis)
return gx,
def backward(self, indexes, ggx):
return UnpoolingND(
self.ndim, self.ksize, self.stride, self.pad, self.outs,
self.cover_all).apply(ggx)
def unpooling_nd(x, ksize, stride=None, pad=0, outsize=None, cover_all=True):
"""Inverse operation of N-dimensional spatial pooling.
.. warning::
This feature is experimental. The interface can change in the future.
This function acts similarly to
:class:`~functions.connection.deconvolution_nd.DeconvolutionND`, but
it spreads input N-dimensional array's value without any parameter instead
of computing the inner products.
Args:
x (~chainer.Variable): Input variable.
ksize (int or pair of ints): Size of pooling window
:math:`(k_1, k_2, ..., k_N)`. ``ksize=k`` is equivalent to
``(k, k, ..., k)``.
stride (int, pair of ints or None): Stride of pooling applications
:math:`(s_1, s_2, ..., s_N)`. ``stride=s`` is equivalent to
``(s, s, ..., s)``. If ``None`` is specified, then it uses same
stride as the pooling window size.
pad (int or pair of ints): Spatial padding width for the input array
:math:`(p_1, p_2, ..., p_N)`. ``pad=p`` is equivalent to
``(p, p, ..., p)``.
outsize (None or pair of ints): Expected output size of unpooling
operation :math:`(out_1, out_2, ..., out_N)`. If ``None``, the size
is estimated from input size, stride and padding.
cover_all (bool): If ``True``, the pooling window is assumed to cover
all of the output array, eventually the output size may be smaller
than that in the case ``cover_all`` is ``False``.
Returns:
~chainer.Variable: Output variable.
"""
ndim = len(x.shape[2:])
return UnpoolingND(
ndim, ksize, stride, pad, outsize, cover_all).apply((x,))[0]
def unpooling_1d(x, ksize, stride=None, pad=0, outsize=None, cover_all=True):
"""Inverse operation of 1-dimensional spatial pooling.
.. warning::
This feature is experimental. The interface can change in the future.
.. note::
This function calls :func:`~chainer.functions.unpooling_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.unpooling_nd`.
"""
if len(x.shape[2:]) != 1:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 1. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return unpooling_nd(x, ksize, stride, pad, outsize, cover_all)
def unpooling_3d(x, ksize, stride=None, pad=0, outsize=None, cover_all=True):
"""Inverse operation of 3-dimensional spatial pooling.
.. warning::
This feature is experimental. The interface can change in the future.
.. note::
This function calls :func:`~chainer.functions.unpooling_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.unpooling_nd`.
"""
if len(x.shape[2:]) != 3:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 3. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return unpooling_nd(x, ksize, stride, pad, outsize, cover_all)
| 6,503
| 33.967742
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/pooling/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/functions/pooling/max_pooling_nd.py
|
import functools
from operator import mul
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import configuration
from chainer import function_node
from chainer.functions.pooling import max_pooling_nd_kernel
from chainer.functions.pooling import pooling_nd
from chainer.utils import conv_nd
import chainerx
if cuda.cudnn_enabled:
_cudnn_version = cuda.cuda.cudnn.getVersion()
class MaxPoolingND(pooling_nd._PoolingND):
"""Max pooling over a set of N-dimensional planes.
.. warning::
This feature is experimental. The interface can change in the future.
"""
def __init__(self, ndim, ksize, stride=None, pad=0, cover_all=True,
return_indices=False):
super(MaxPoolingND, self).__init__(
ndim, ksize, stride=stride, pad=pad, cover_all=cover_all,
return_indices=return_indices)
def forward_chainerx(self, x):
ndim = self.ndim
ksize = self.ksize
stride = self.stride
pad = self.pad
cover_all = self.cover_all
# TODO(sonots): Support return_indices in ChainerX
if self.return_indices:
return chainer.Fallback
if x[0].device.backend.name == 'cuda':
# TODO(sonots): Support more ndim in ChainerX
if ndim not in [2, 3]:
return chainer.Fallback
y = chainerx.max_pool(x[0], ksize, stride, pad, cover_all)
return y,
def forward_cpu(self, x):
if (self.ndim == 2
and intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(x)):
return self._forward_2d_ideep(x)
ksize = self.ksize
stride = self.stride
pad = self.pad
cover_all = self.cover_all
in_shape = x[0].shape
in_dtype = x[0].dtype
col = conv_nd.im2col_nd_cpu(
x[0], ksize, stride, pad,
pval=-float('inf'),
cover_all=cover_all)
n, c = col.shape[:2]
mid = (len(col.shape) - 2) // 2 + 2
ksize = col.shape[2:mid]
outs = col.shape[mid:]
# (n, c, k_1 * k_2 * ... * k_N, out_1, out_2, ..., out_N)
col_shape = (n, c) + (functools.reduce(mul, ksize),) + outs
col = col.reshape(col_shape)
# We select maximum twice, since the implementation using numpy.choose
# hits its bug when kh * kw >= 32.
y = col.max(axis=2)
self._in_shape = in_shape
self._in_dtype = in_dtype
self.indexes = col.argmax(axis=2)
return y,
def _forward_2d_ideep(self, x):
assert self.ndim == 2
kh, kw = self.ksize
sy, sx = self.stride
ph, pw = self.pad
cover_all = self.cover_all
self._in_shape = x[0].shape
self._in_dtype = x[0].dtype
self.retain_inputs((0,))
n, c, h, w = x[0].shape
y_h = conv_nd.get_conv_outsize(h, kh, sy, ph, cover_all)
assert y_h > 0, 'Height in the output should be positive.'
y_w = conv_nd.get_conv_outsize(w, kw, sx, pw, cover_all)
assert y_w > 0, 'Width in the output should be positive.'
pd = sy * (y_h - 1) + kh - h - ph
pr = sx * (y_w - 1) + kw - w - pw
pp = intel64.ideep.pooling2DParam(
(n, c, y_h, y_w),
kh, kw,
sy, sx,
ph, pw,
pd, pr,
intel64.ideep.pooling2DParam.pooling_max)
y, indexes = intel64.ideep.pooling2D.Forward(
intel64.ideep.array(x[0]), pp)
self.indexes = indexes
return y,
def forward_gpu(self, x):
if chainer.should_use_cudnn('>=auto') and 2 <= self.ndim <= 3:
# With cuDNN v3 or greater, use cuDNN implementation for inputs
# with spatial dimensions of two or more.
return self.forward_cudnn(x)
ndim = self.ndim
ksize = self.ksize
stride = self.stride
pad = self.pad
cover_all = self.cover_all
in_shape = x[0].shape
in_dtype = x[0].dtype
n, c = in_shape[:2]
dims = in_shape[2:]
ys = tuple(conv_nd.get_conv_outsize(d, k, s, p, cover_all)
for (d, k, s, p) in six.moves.zip(dims, ksize, stride, pad))
# (n, c, y_1, y_2, ..., y_N)
y_shape = (n, c) + ys
y = cuda.cupy.empty(y_shape, dtype=x[0].dtype)
indexes = cuda.cupy.empty(y_shape, dtype=numpy.int32)
in_params, out_params, operation, name = \
max_pooling_nd_kernel.MaxPoolingNDKernelForward.generate(ndim)
cuda.elementwise(in_params, out_params, operation, name)(
x[0].reduced_view(),
*(dims + ys + ksize + stride + pad + (y, indexes)))
self._in_shape = in_shape
self._in_dtype = in_dtype
self.indexes = indexes
return y,
def backward(self, indexes, gy):
return MaxPoolingNDGrad(self).apply(gy)
def get_cudnn_pool_mode(self):
if _cudnn_version >= 6000 and configuration.config.cudnn_deterministic:
return cuda.cuda.cudnn.CUDNN_POOLING_MAX_DETERMINISTIC
else:
return cuda.cuda.cudnn.CUDNN_POOLING_MAX
class MaxPoolingNDGrad(function_node.FunctionNode):
def __init__(self, func):
self.func = func
def forward_cpu(self, gy):
func = self.func
if (func.ndim == 2
and intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(gy)):
return self._forward_2d_ideep(gy)
ndim = func.ndim
ksize = func.ksize
stride = func.stride
pad = func.pad
in_shape = func._in_shape
in_dtype = func._in_dtype
indexes = func.indexes
n, c = gy[0].shape[:2]
outs = gy[0].shape[2:]
dims = in_shape[2:]
prod_outs = functools.reduce(mul, outs)
prod_ksize = functools.reduce(mul, ksize)
gcol = numpy.zeros(n * c * prod_outs * prod_ksize, dtype=in_dtype)
indexes = (
indexes.flatten()
+ numpy.arange(0, indexes.size * prod_ksize, prod_ksize))
gcol[indexes] = gy[0].ravel()
gcol_shape = (n, c) + outs + ksize
gcol = gcol.reshape(gcol_shape)
for i in six.moves.range(ndim):
gcol = numpy.swapaxes(gcol, 2 + i, ndim + 2 + i)
gx = conv_nd.col2im_nd_cpu(gcol, stride, pad, dims)
return gx,
def _forward_2d_ideep(self, gy):
func = self.func
# FIXME
# Here we expect indexes is returned from MKL-DNN
# otherwise, there are dtype mismatch for reorder (int64-->uint8)
if not isinstance(func.indexes, intel64.ideep.mdarray):
return self.forward_cpu(gy)
kh, kw = func.ksize
sy, sx = func.stride
ph, pw = func.pad
indexes = func.indexes
in_shape = func._in_shape
n, c, h, w = in_shape
y_h, y_w = gy[0].shape[2:]
x = func.get_retained_inputs()[0].array
pd = sy * (y_h - 1) + kh - h - ph
pr = sx * (y_w - 1) + kw - w - pw
pp = intel64.ideep.pooling2DParam(
func._in_shape,
kh, kw,
sy, sx,
ph, pw,
pd, pr,
intel64.ideep.pooling2DParam.pooling_max)
indexes = intel64.ideep.array(indexes)
gx = intel64.ideep.pooling2D.Backward(
intel64.ideep.array(x),
intel64.ideep.array(gy[0]),
indexes, pp)
return gx,
def forward_gpu(self, gy):
func = self.func
if func.is_cudnn_used:
return func.backward_cudnn(gy)
ndim = func.ndim
ksize = func.ksize
stride = func.stride
pad = func.pad
in_shape = func._in_shape
in_dtype = func._in_dtype
indexes = backend.from_chx(func.indexes)
n, c = in_shape[:2]
dims = in_shape[2:]
ys = gy[0].shape[2:]
gx = cuda.cupy.empty(in_shape, in_dtype)
in_params, out_params, operation, name = \
max_pooling_nd_kernel.MaxPoolingNDKernelBackward.generate(ndim)
cuda.elementwise(in_params, out_params, operation, name)(
gy[0].reduced_view(), indexes.reduced_view(),
*(dims + ys + ksize + stride + pad + (gx,)))
return gx,
def backward(self, indexes, ggx):
return MaxPoolingNDWithIndexes(self.func).apply(ggx)
class MaxPoolingNDWithIndexes(function_node.FunctionNode):
def __init__(self, func):
self.func = func
def forward_cpu(self, x):
func = self.func
ndim = func.ndim
ksize = func.ksize
stride = func.stride
pad = func.pad
cover_all = func.cover_all
indexes = backend.from_chx(func.indexes)
col = conv_nd.im2col_nd_cpu(
x[0], ksize, stride, pad,
pval=-float('inf'),
cover_all=cover_all)
n, c = col.shape[:2]
mid = (len(col.shape) - 2) // 2 + 2
ksize = col.shape[2:mid]
outs = col.shape[mid:]
# (n, c, k_1 * k_2 * ... * k_N, out_1, out_2, ..., out_N)
ksize_total = functools.reduce(mul, ksize)
col_shape = (n, c) + (ksize_total,) + outs
col = col.reshape(col_shape)
# (n, c, out_1, ..., out_N, k_1 * .. * k_N)
col_indexes = (0, 1) + tuple(six.moves.range(3, 3 + ndim)) + (2,)
col = col.transpose(col_indexes)
col = col.reshape(-1, ksize_total)
indexes = indexes.ravel()
col = col[numpy.arange(len(indexes)), indexes]
return col.reshape((n, c) + outs),
def forward_gpu(self, inputs):
func = self.func
if func.is_cudnn_used:
x = func.get_retained_inputs()[0].array
return self._forward_gpu_compute_indexes_again((x, inputs[0]))
ndim = func.ndim
ksize = func.ksize
stride = func.stride
pad = func.pad
cover_all = func.cover_all
indexes = backend.from_chx(func.indexes)
x, = inputs
in_shape = x.shape
in_dtype = x.dtype
n, c = in_shape[:2]
dims = in_shape[2:]
ys = tuple(conv_nd.get_conv_outsize(d, k, s, p, cover_all)
for (d, k, s, p) in six.moves.zip(dims, ksize, stride, pad))
# (n, c, y_1, y_2, ..., y_N)
y_shape = (n, c) + ys
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
cls = max_pooling_nd_kernel.MaxPoolingNDKernelForwardWithIndexes
in_params, out_params, operation, name = cls.generate(ndim)
cuda.elementwise(in_params, out_params, operation, name)(
x.reduced_view(),
*(dims + ys + ksize + stride + pad + (indexes.reduced_view(), y)))
self._in_shape = in_shape
self._in_dtype = in_dtype
return y,
def _forward_gpu_compute_indexes_again(self, inputs):
func = self.func
ndim = func.ndim
ksize = func.ksize
stride = func.stride
pad = func.pad
cover_all = func.cover_all
x, ggx = inputs
in_shape = x.shape
in_dtype = x.dtype
n, c = in_shape[:2]
dims = in_shape[2:]
ys = tuple(conv_nd.get_conv_outsize(d, k, s, p, cover_all)
for (d, k, s, p) in six.moves.zip(dims, ksize, stride, pad))
# (n, c, y_1, y_2, ..., y_N)
y_shape = (n, c) + ys
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
cls = max_pooling_nd_kernel.MaxPoolingNDKernelForwardWithIndexes1
in_params, out_params, operation, name = cls.generate(ndim)
cuda.elementwise(in_params, out_params, operation, name)(
x.reduced_view(),
*(dims + ys + ksize + stride + pad + (ggx.reduced_view(), y)))
self._in_shape = in_shape
self._in_dtype = in_dtype
return y,
def max_pooling_nd(x, ksize, stride=None, pad=0, cover_all=True,
return_indices=False):
"""N-dimensionally spatial max pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
This function provides a N-dimensionally generalized version of
:func:`~chainer.functions.max_pooling_2d`. This acts similarly to
:func:`~chainer.functions.convolution_nd`, but it computes the maximum of
input spatial patch for each channel without any parameter instead of
computing the inner products.
Args:
x (~chainer.Variable): Input variable.
ksize (int or tuple of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s,s, ..., s)`` are equivalent. If
``None`` is specified, then it uses same stride as the pooling
window size.
pad (int or tuple of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
cover_all (bool): If ``True``, all spatial locations are pooled into
some output pixels. It may make the output size larger.
return_indices (bool): If ``True``, pooling indices array is returned
together with the output variable. The returned indices are
expected for use by :func:`chainer.functions.upsampling_nd`.
Note that cuDNN will not be used for this function if
``return_indices`` is set to ``True``, as cuDNN does not return
indices information.
Returns:
~chainer.Variable or tuple:
When ``return_indices`` is ``False`` (default), returns the output
variable.
When ``True``, returns the tuple of the output variable and
pooling indices (:ref:`ndarray`). Pooling indices will be on the
same device as the input.
"""
ndim = len(x.shape[2:])
func = MaxPoolingND(ndim, ksize, stride, pad, cover_all, return_indices)
if return_indices:
with chainer.using_config('use_cudnn', 'never'):
out = func.apply((x,))[0]
return out, func.indexes
return func.apply((x,))[0]
def max_pooling_1d(x, ksize, stride=None, pad=0, cover_all=True,
return_indices=False):
"""1-dimensional spatial max pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
.. note::
This function calls :func:`~chainer.functions.max_pooling_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.max_pooling_nd`.
"""
if len(x.shape[2:]) != 1:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 1. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return max_pooling_nd(x, ksize, stride, pad, cover_all, return_indices)
def max_pooling_2d(x, ksize, stride=None, pad=0, cover_all=True,
return_indices=False):
"""Spatial max pooling function.
This function acts similarly to :func:`~chainer.functions.convolution_2d`,
but it computes the maximum of input spatial patch for each channel without
any parameter instead of computing the inner products.
Args:
x (~chainer.Variable): Input variable.
ksize (int or pair of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k)`` are equivalent.
stride (int or pair of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s)`` are equivalent. If ``None`` is
specified, then it uses same stride as the pooling window size.
pad (int or pair of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p)`` are equivalent.
cover_all (bool): If ``True``, all spatial locations are pooled into
some output pixels. It may make the output size larger.
return_indices (bool): If ``True``, pooling indices array is returned
together with the output variable. The returned indices are
expected for use by :func:`chainer.functions.upsampling_2d`.
Note that cuDNN will not be used for this function if
``return_indices`` is set to ``True``, as cuDNN does not return
indices information.
Returns:
~chainer.Variable or tuple:
When ``return_indices`` is ``False`` (default), returns the output
variable.
When ``True``, returns the tuple of the output variable and
pooling indices (:ref:`ndarray`). Pooling indices will be on the
same device as the input.
"""
if len(x.shape[2:]) != 2:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 2. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return max_pooling_nd(x, ksize, stride, pad, cover_all, return_indices)
def max_pooling_3d(x, ksize, stride=None, pad=0, cover_all=True,
return_indices=False):
"""3-dimensional spatial max pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
.. note::
This function calls :func:`~chainer.functions.max_pooling_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.max_pooling_nd`.
"""
if len(x.shape[2:]) != 3:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 3. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return max_pooling_nd(x, ksize, stride, pad, cover_all, return_indices)
| 17,994
| 33.605769
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/pooling/unpooling_2d.py
|
import numpy
import numpy.lib.stride_tricks
try:
import cupy.lib.stride_tricks # NOQA
except Exception:
pass
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer.functions.pooling import pooling_2d
from chainer.utils import conv
from chainer.utils import type_check
class Unpooling2D(pooling_2d.Pooling2D):
"""Unpooling over a set of 2d planes."""
def __init__(self, ksize, stride=None, pad=0,
outsize=None, cover_all=True):
super(Unpooling2D, self).__init__(ksize, stride, pad, cover_all)
self.outh, self.outw = (None, None) if outsize is None else outsize
self._use_int_scale_forward = False
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4,
)
if self.outh is not None:
expected_h = conv.get_conv_outsize(
self.outh, self.kh, self.sy, self.ph, cover_all=self.cover_all)
type_check.expect(x_type.shape[2] == expected_h)
if self.outw is not None:
expected_w = conv.get_conv_outsize(
self.outw, self.kw, self.sx, self.pw, cover_all=self.cover_all)
type_check.expect(x_type.shape[3] == expected_w)
def _integer_scale_forward(self, x):
xp = backend.get_array_module(x)
b, c, h, w = x.shape
bs, cs, hs, ws = x.strides
if self.ph > 0 or self.pw > 0:
x = x[:, :, self.ph // 2:-self.ph // 2, self.pw // 2:-self.pw // 2]
y = xp.lib.stride_tricks.as_strided(
x,
(b, c, h - self.ph, self.kh, w - self.pw, self.kw),
(bs, cs, hs, 0, ws, 0))
y = y.reshape((b, c, self.kh * (h - self.ph), self.kw * (w - self.pw)))
return y,
def forward(self, x):
h, w = x[0].shape[2:]
if self.outh is None:
self.outh = conv.get_deconv_outsize(
h, self.kh, self.sy, self.ph, cover_all=self.cover_all)
if self.outw is None:
self.outw = conv.get_deconv_outsize(
w, self.kw, self.sx, self.pw, cover_all=self.cover_all)
if (self.outh % (h - self.ph) == 0 and
self.outw % (w - self.pw) == 0 and
self.outh // (h - self.ph) == self.kh and
self.outw // (w - self.pw) == self.kw and
self.ph % 2 == 0 and self.pw % 2 == 0 and
self.sx == self.kh and self.sy == self.kw):
self._use_int_scale_forward = True
return self._integer_scale_forward(x[0])
xp = backend.get_array_module(*x)
col = xp.tile(x[0][:, :, None, None],
(1, 1, self.kh, self.kw, 1, 1))
if xp is numpy:
y = conv.col2im_cpu(col, self.sy, self.sx, self.ph, self.pw,
self.outh, self.outw)
else:
y = conv.col2im_gpu(col, self.sy, self.sx, self.ph, self.pw,
self.outh, self.outw)
return y,
def backward(self, indexes, grad_outputs):
return Unpooling2DGrad(self).apply(grad_outputs)
class Unpooling2DGrad(function_node.FunctionNode):
def __init__(self, unpooling2d):
self.kh = unpooling2d.kh
self.kw = unpooling2d.kw
self.sy = unpooling2d.sy
self.sx = unpooling2d.sx
self.ph = unpooling2d.ph
self.pw = unpooling2d.pw
self.outh = unpooling2d.outh
self.outw = unpooling2d.outw
self.cover_all = unpooling2d.cover_all
self._use_int_scale_forward = unpooling2d._use_int_scale_forward
def _integer_scale_forward(self, gy):
xp = backend.get_array_module(gy)
b, c, h, w = gy.shape
gx = gy.reshape((b, c, h // self.kh, self.kh, w // self.kw, self.kw))
gx = xp.rollaxis(gx, 3, 5).sum((4, 5))
if self.ph > 0 or self.pw > 0:
tmp = xp.zeros((b, c, h // 2 + self.ph, w //
2 + self.pw), dtype=gx.dtype)
tmp[:, :, self.ph // 2:-self.ph // 2,
self.pw // 2:-self.pw // 2] = gx
gx = tmp
return gx,
def forward(self, gy):
if self._use_int_scale_forward:
return self._integer_scale_forward(gy[0])
if isinstance(gy[0], cuda.ndarray):
gcol = conv.im2col_gpu(
gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all)
else:
gcol = conv.im2col_cpu(
gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all)
gx = gcol.sum(axis=(2, 3))
return gx,
def backward(self, indexes, ggx):
return Unpooling2D(
(self.kh, self.kw), (self.sy, self.sx), (self.ph, self.pw),
(self.outh, self.outw), self.cover_all).apply(ggx)
def unpooling_2d(x, ksize, stride=None, pad=0, outsize=None, cover_all=True):
"""Inverse operation of pooling for 2d array.
This function acts similarly to
:class:`~functions.connection.deconvolution_2d.Deconvolution2DFunction`,
but it spreads input 2d array's value without any parameter instead of
computing the inner products.
Args:
x (~chainer.Variable): Input variable.
ksize (int or pair of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k)`` are equivalent.
stride (int, pair of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s)`` are equivalent. If ``None`` is
specified, then it uses same stride as the pooling window size.
pad (int or pair of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p)`` are equivalent.
outsize (None or pair of ints): Expected output size (height, width)
of array after the operation. If ``None``, the size
(height or width) is estimated from the size of input array
in first batch with
:func:`~chainer.utils.conv.get_deconv_outsize`.
If outsize is not ``None``, the result of outsize applied to
:func:`~chainer.utils.conv.get_conv_outsize` must be equal to
the shape of the 2d array in the input batch ``x``.
cover_all (bool): If ``True``, the output size may be smaller than
the size if ``cover_all`` is ``False``. This flag serves to
align behavior to the pooling functions which can cover all
input locations, see :func:`~chainer.functions.max_pooling_2d`
and :func:`~chainer.functions.convolution_2d`.
Returns:
~chainer.Variable: Output variable.
"""
return Unpooling2D(ksize, stride, pad, outsize, cover_all).apply((x,))[0]
| 6,940
| 39.121387
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/theano/theano_function.py
|
import six
from chainer import backend
from chainer.backends import cuda
from chainer import function
from chainer.utils import type_check
class TheanoFunction(function.Function):
def __init__(self, forward_func, backward_func):
self.forward_func = forward_func
self.backward_func = backward_func
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == len(self.forward_func.indices))
for actual_type, input_info in six.moves.zip(
in_types, self.forward_func.indices):
expect_type = input_info[0].variable.type
# Theano cannot check shapes of variables
type_check.expect(
actual_type.ndim == expect_type.ndim,
actual_type.dtype == expect_type.numpy_dtype,
)
def forward(self, inputs):
gpu = backend.get_array_module(*inputs) is cuda.cupy
inputs = [cuda.to_cpu(x) for x in inputs]
outputs = self.forward_func(*inputs)
if gpu:
# TODO(unno): We can remove redundant gpu-cpu copy using
# theano.sandbox.cuda.CudaNdarray.gpudata
device = cuda.get_device_from_array(inputs)
outputs = [cuda.to_gpu(x, device) for x in outputs]
return tuple(outputs)
def backward(self, inputs, grads):
gpu = backend.get_array_module(*inputs) is cuda.cupy
# TODO(unno): We can remove redundant gpu-cpu copy using
# theano.sandbox.cuda.basic_ops.gpu_from_host
args = [cuda.to_cpu(x) for x in inputs + grads]
outputs = self.backward_func(*args)
assert len(outputs) == len(inputs)
if gpu:
# TODO(unno): We can remove redundant gpu-cpu copy using
# theano.sandbox.cuda.CudaNdarray.gpudata
device = cuda.get_device_from_array(inputs)
outputs = [cuda.to_gpu(x, device) for x in outputs]
results = []
for o, i in zip(outputs, inputs):
if i.dtype.kind != 'f':
o = None
elif o.dtype != i.dtype:
o = o.astype(i.dtype)
results.append(o)
return tuple(results)
def theano_function(forward_func, backward_func, *inputs):
return TheanoFunction(forward_func, backward_func)(*inputs)
| 2,311
| 32.507246
| 76
|
py
|
chainer
|
chainer-master/chainer/functions/theano/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/functions/normalization/group_normalization.py
|
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import configuration
from chainer import function_node
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.cuda.cudnn
class GroupNormalization(function_node.FunctionNode):
def __init__(self, groups, eps=1e-5):
if not isinstance(groups, int):
raise TypeError('Argument: \'groups\' type must be (int).')
self.groups = groups
self.eps = eps
self.mean = None
self.inv_std = None
self.dummy_gamma = None
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, gamma_type, beta_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim >= 2,
gamma_type.ndim == 1,
beta_type.ndim == 1,
gamma_type.dtype.kind == 'f',
gamma_type.dtype == beta_type.dtype,
x_type.shape[1] == gamma_type.shape[0],
gamma_type.shape == beta_type.shape,
)
def forward(self, inputs):
if inputs[0].shape[1] % self.groups != 0:
raise ValueError('The number of channels {} is not divisible by '
'\'groups\' argument {}.'
.format(inputs[0].shape[1], self.groups))
xp = backend.get_array_module(*inputs)
if xp is cuda.cupy and chainer.should_use_cudnn('>=auto', 5000):
return self.forward_cudnn(inputs)
self.retain_inputs((0, 1))
x, gamma, beta = inputs
interm_dtype = numpy.promote_types(x.dtype, gamma.dtype)
gamma = gamma.astype(interm_dtype, copy=False)
beta = beta.astype(interm_dtype, copy=False)
orig_shape = x.shape
batch_size, channels = orig_shape[:2]
groups = self.groups
reduced_shape = (batch_size * groups, -1)
x = x.reshape(reduced_shape)
self.mean = x.mean(axis=1, dtype=interm_dtype)
x_hat = x - self.mean[:, None]
var = (x_hat * x_hat).mean(axis=1)
var += self.eps
self.inv_std = var
del var
xp.sqrt(self.inv_std, out=self.inv_std)
xp.reciprocal(self.inv_std, out=self.inv_std)
x_hat *= self.inv_std[:, None]
y = x_hat.reshape((batch_size, channels, -1))
y *= gamma[:, None]
y += beta[:, None]
y = y.reshape(orig_shape)
return y.astype(x.dtype, copy=False),
def forward_cudnn(self, inputs):
if self.eps < libcudnn.CUDNN_BN_MIN_EPSILON:
raise RuntimeError(
'cuDNN does not allow an eps value '
'less than {}.'.format(libcudnn.CUDNN_BN_MIN_EPSILON))
self.retain_inputs((0, 1))
x, gamma, beta = inputs
xp = cuda.cupy
interm_dtype = numpy.promote_types(x.dtype, gamma.dtype)
gamma = gamma.astype(interm_dtype, copy=False)
beta = beta.astype(interm_dtype, copy=False)
orig_shape = x.shape
batch_size, channels = orig_shape[:2]
groups = self.groups
cudnn_shape = (1, batch_size * groups, -1, 1)
x = x.reshape(cudnn_shape)
with x.device:
dummy_beta = xp.zeros(batch_size * groups, dtype=beta.dtype)
self.dummy_gamma = xp.ones_like(dummy_beta)
x_hat, self.mean, self.inv_std = \
cudnn.batch_normalization_forward_training(
x, self.dummy_gamma, dummy_beta, dummy_beta, dummy_beta, None,
None, self.eps, 1.0, True, libcudnn.CUDNN_BATCHNORM_SPATIAL,
configuration.config.debug)
y = x_hat.reshape((batch_size, channels, -1))
cuda.elementwise(
'T gamma, T beta', 'U y',
'y = y * gamma + beta',
'groupnorm_y')(gamma[:, None], beta[:, None], y)
y = y.reshape(orig_shape)
return y,
def backward(self, indexes, grad_outputs):
x, gamma = self.get_retained_inputs()
gy, = grad_outputs
interm_dtype = numpy.promote_types(x.dtype, gamma.dtype)
gamma = chainer.functions.cast(gamma, interm_dtype)
orig_shape = x.shape
batch_size = orig_shape[0]
groups = self.groups
reduced_shape = (batch_size * groups, -1)
x = x.reshape(reduced_shape)
x_ = chainer.functions.cast(x, interm_dtype)
x_hat, = _XHat(
self.eps, self.mean, self.inv_std,
self.dummy_gamma).apply((x_,))
gx_hat, ggamma, gbeta = _ScaleShiftGrad().apply((
x_hat, gamma, chainer.functions.cast(gy, interm_dtype)))
gx, = _XHatGrad(
self.eps, self.mean, self.inv_std,
self.dummy_gamma, x_hat.array).apply(
(x_, gx_hat))
gx = gx.reshape(orig_shape)
return chainer.functions.cast(gx, x.dtype), ggamma, gbeta
class _ScaleShiftGrad(function_node.FunctionNode):
def forward(self, inputs):
self.retain_inputs((0, 1, 2))
x_hat, gamma, gy = inputs
batch_size, channels = gy.shape[:2]
gy = gy.reshape((batch_size, channels, -1))
reduced_shape = x_hat.shape
x_hat = x_hat.reshape((batch_size, channels, -1))
gx_hat = gy * gamma[:, None]
gbeta = gy.sum(axis=(0, 2))
if backend.get_array_module(x_hat) is cuda.cupy:
ggamma = cuda.reduce(
'T gy, T x_hat', 'T ggamma',
'gy * x_hat', 'a + b', 'ggamma = a', '0',
'groupnorm_ggamma')(gy, x_hat, axis=(0, 2))
else:
ggamma = (gy * x_hat).sum(axis=(0, 2))
gx_hat = gx_hat.reshape(reduced_shape)
return gx_hat, ggamma, gbeta
def backward(self, indexes, grad_outputs):
x_hat, gamma, gy = self.get_retained_inputs()
ggx_hat, gggamma, ggbeta = grad_outputs
orig_shape = gy.shape
batch_size, channels = gy.shape[:2]
gy = gy.reshape((batch_size, channels, -1))
reduced_shape = x_hat.shape
x_hat = x_hat.reshape((batch_size, channels, -1))
ggx_hat = ggx_hat.reshape((batch_size, channels, -1))
gx_hat2 = gggamma[:, None] * gy
ggamma2 = chainer.functions.sum(ggx_hat * gy, axis=(0, 2))
ggy = (ggx_hat * gamma[:, None] + gggamma[:, None] * x_hat +
ggbeta[:, None])
gx_hat2 = gx_hat2.reshape(reduced_shape)
ggy = ggy.reshape(orig_shape)
return gx_hat2, ggamma2, ggy
class _XHat(function_node.FunctionNode):
def __init__(self, eps, mean, inv_std, dummy_gamma):
self.eps = eps
self.mean = mean
self.inv_std = inv_std
self.dummy_gamma = dummy_gamma
def forward_cpu(self, inputs):
self.retain_inputs((0,))
x, = inputs
x_hat = x - self.mean[:, None]
x_hat *= self.inv_std[:, None]
self.retain_outputs((0,))
return x_hat,
def forward_gpu(self, inputs):
self.retain_inputs((0,))
x, = inputs
x_hat = cuda.elementwise(
'T x, U mean, U inv_std', 'T x_hat',
'x_hat = (x - mean) * inv_std',
'groupnorm_x_hat')(x, self.mean[:, None], self.inv_std[:, None])
self.retain_outputs((0,))
return x_hat,
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
x_hat, = self.get_retained_outputs()
gx_hat, = grad_outputs
return _XHatGrad(
self.eps, self.mean, self.inv_std,
self.dummy_gamma, x_hat.array).apply((x, gx_hat))
class _XHatGrad(function_node.FunctionNode):
def __init__(self, eps, mean, inv_std, dummy_gamma, x_hat):
self.eps = eps
self.mean = mean
self.inv_std = inv_std
self.dummy_gamma = dummy_gamma
self.x_hat = x_hat
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
if xp is cuda.cupy and chainer.should_use_cudnn('>=auto', 5000) and \
self.dummy_gamma is not None:
return self.forward_cudnn(inputs)
self.retain_inputs((0, 1))
_, gx_hat = inputs
x_hat = self.x_hat
self.x_hat = None
gx_hat_avg = gx_hat.mean(axis=1, keepdims=True)
gx_hat_x_hat_avg = (gx_hat * x_hat).mean(axis=1, keepdims=True)
gx_std = gx_hat - gx_hat_avg - x_hat * gx_hat_x_hat_avg
gx = self.inv_std[:, None] * gx_std
self.retain_outputs((0,))
return gx,
def forward_cudnn(self, inputs):
if self.eps < libcudnn.CUDNN_BN_MIN_EPSILON:
raise RuntimeError(
'cuDNN does not allow an eps value '
'less than {}.'.format(libcudnn.CUDNN_BN_MIN_EPSILON))
self.retain_inputs((0, 1))
x, gx_hat = inputs
self.x_hat = None
# `x[None, :, :, None]` is slower because it results in a different
# strides and cuDNN doesn't recognize it as a contiguous array.
reduced_shape = x.shape
cudnn_shape = (1,) + reduced_shape + (1,)
x = x.reshape(cudnn_shape)
gx_hat = gx_hat.reshape(cudnn_shape)
gx, _, _ = cudnn.batch_normalization_backward(
x, self.dummy_gamma, gx_hat,
self.mean, self.inv_std, self.eps,
True, libcudnn.CUDNN_BATCHNORM_SPATIAL,
configuration.config.debug)
gx = gx.reshape(reduced_shape)
self.retain_outputs((0,))
return gx,
def backward(self, indexes, grad_outputs):
F = chainer.functions
x, gx_hat = self.get_retained_inputs()
gx, = self.get_retained_outputs()
ggx, = grad_outputs
x_hat, = _XHat(
self.eps, self.mean, self.inv_std,
self.dummy_gamma).apply((x,))
ret = []
if 0 in indexes:
# -- sketch of gx2, which is grad of x through gx
# gx = inv_std * gx_std
# dgx = dinv_std * gx_std + inv_std * dgx_std
#
# -gx2l = (ggx * dinv_std * gx_std) / dx
# = sum(ggx * gx_std) * (dinv_std / dx)
# = -sum(ggx * gx_std) * inv_std^2 * x_hat / N
# = -inv_std * x_hat * mean(ggx * gx)
#
# By `gx_std = gx_hat - gx_hat_avg - x_hat * gx_hat_x_hat_avg`,
# -gx_hat2r = (ggx * inv_std * dgx_std) / dx_hat
# = -inv_std * (ggx * mean(gx_hat * x_hat) +
# gx_hat * mean(ggx * x_hat))
gx2l_std = x_hat * F.mean(ggx * gx, axis=1, keepdims=True)
gx2l, = _MulInvStd(
self.eps, self.mean, self.inv_std,
self.dummy_gamma).apply((x, gx2l_std))
gx_hat2r_std = (
ggx * F.mean(gx_hat * x_hat, axis=1, keepdims=True) +
gx_hat * F.mean(ggx * x_hat, axis=1, keepdims=True))
gx_hat2r, = _MulInvStd(
self.eps, self.mean, self.inv_std,
self.dummy_gamma).apply((x, gx_hat2r_std))
gx2r, = _XHatGrad(
self.eps, self.mean, self.inv_std,
self.dummy_gamma, x_hat.array).apply((x, gx_hat2r))
gx2 = -(gx2l + gx2r)
ret.append(gx2)
if 1 in indexes:
ggx_hat, = _XHatGrad(
self.eps, self.mean, self.inv_std,
self.dummy_gamma, x_hat.array).apply((x, ggx))
ret.append(ggx_hat)
return ret
class _MulInvStd(function_node.FunctionNode):
def __init__(self, eps, mean, inv_std, dummy_gamma):
self.eps = eps
self.mean = mean
self.inv_std = inv_std
self.dummy_gamma = dummy_gamma
def forward(self, inputs):
self.retain_inputs((0,))
_, y = inputs
z = self.inv_std[:, None] * y
self.retain_outputs((0,))
return z,
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
z, = self.get_retained_outputs()
gz, = grad_outputs
x_hat, = _XHat(
self.eps, self.mean, self.inv_std,
self.dummy_gamma).apply((x,))
gx_std = x_hat * chainer.functions.mean(gz * z, axis=1, keepdims=True)
gx, = _MulInvStd(
self.eps, self.mean, self.inv_std,
self.dummy_gamma).apply((x, gx_std))
gy, = _MulInvStd(
self.eps, self.mean, self.inv_std,
self.dummy_gamma).apply((x, gz))
return -gx, gy
def group_normalization(x, groups, gamma, beta, eps=1e-5):
"""Group normalization function.
This function implements a "group normalization"
which divides the channels into groups and computes within each group
the mean and variance, then normalize by these statistics,
scales and shifts them.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Batch tensors.
First dimension of this value must be the size of minibatch and
second dimension must be the number of channels.
Moreover, this value must have one or more following dimensions,
such as height and width.
groups (int):
The number of channel groups.
This value must be a divisor of the number of channels.
gamma (:class:`~chainer.Variable` or :ref:`ndarray`):
Scaling parameter.
beta (:class:`~chainer.Variable` or :ref:`ndarray`):
Shifting parameter.
eps (float): Epsilon value for numerical stability of normalization.
Returns:
~chainer.Variable: The output variable which has the same shape
as :math:`x`.
See: `Group Normalization <https://arxiv.org/abs/1803.08494>`_
.. seealso::
:class:`~chainer.links.GroupNormalization` to manage the model
parameters ``gamma`` and ``beta``.
"""
return GroupNormalization(groups, eps).apply((x, gamma, beta))[0]
| 13,988
| 32.953883
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/normalization/l2_normalization.py
|
import six
from chainer import backend
from chainer import function_node
import chainer.functions
from chainer import utils
from chainer.utils import type_check
class _SetItemZero(function_node.FunctionNode):
"""Write values to mask of zero-initialized array"""
def __init__(self, mask):
self.mask = mask
def forward(self, inputs):
x, = inputs
xp = backend.get_array_module(x)
y = xp.zeros(self.mask.shape, x.dtype)
y[self.mask] = x
return y,
def backward(self, indices, grad_outputs):
g, = grad_outputs
return g[self.mask],
class NormalizeL2(function_node.FunctionNode):
"""L2 normalization"""
def __init__(self, eps=1e-5, axis=1):
self.eps = eps
if isinstance(axis, six.integer_types):
axis = axis,
self.axis = axis
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(x_type.dtype.kind == 'f')
def forward(self, inputs):
self.retain_inputs((0,))
x, = inputs
xp = backend.get_array_module(x)
# Note: Passing dtype argument to numpy.sqrt() because NumPy in
# Python 2 looks to return a casted value to float32 when it takes a
# float16 value.
norm = (xp.sqrt(xp.sum(xp.square(x), axis=self.axis, keepdims=True),
dtype=x.dtype)
+ x.dtype.type(self.eps))
return utils.force_array(x / norm),
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
gy, = grad_outputs
F = chainer.functions
norm_noeps = F.sqrt(F.sum(F.square(x), axis=self.axis, keepdims=True))
norm = norm_noeps + self.eps
x_gy_reduced = F.sum((x * gy), axis=self.axis, keepdims=True)
# L2 normalize with eps has continuous backward. However,
# the backward is not differentiable for the indices of zero vectors.
# To avoid nan in double backward, do not compute outside of mask.
mask = norm_noeps.array != 0
x_gy_reduced, = _SetItemZero(mask).apply((
x_gy_reduced[mask] / norm_noeps[mask],))
gx = gy * norm - x_gy_reduced * x
gx = gx / norm ** 2
return gx,
def normalize(x, eps=1e-5, axis=1):
"""Normalize input by L2 norm.
This function implements L2 normalization on a sample along the given
axis/axes. No reduction is done along the normalization axis.
In the case when :obj:`axis=1` and :math:`\\mathbf{x}` is a matrix of
dimension :math:`(N, K)`, where :math:`N` and :math:`K` denote mini-batch
size and the dimension of the input vectors, this function computes an
output matrix :math:`\\mathbf{y}` of dimension :math:`(N, K)` by the
following equation:
.. math::
\\mathbf{y}_i =
{\\mathbf{x}_i \\over \\| \\mathbf{x}_i \\|_2 + \\epsilon}
:obj:`eps` is used to avoid division by zero when norm of
:math:`\\mathbf{x}` along the given axis is zero.
The default value of :obj:`axis` is determined for backward compatibility.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
multi-dimensional output variable. The first
dimension is assumed to be the mini-batch dimension.
eps (float): Epsilon value for numerical stability.
axis (int or tuple of ints): Axis along which to normalize.
Returns:
~chainer.Variable: The output variable which has the same shape
as :math:`x`.
"""
return NormalizeL2(eps, axis).apply((x,))[0]
| 3,642
| 30.95614
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/normalization/layer_normalization.py
|
from chainer import backend
from chainer import function_node
import chainer.functions
from chainer.utils import type_check
class LayerNormalization(function_node.FunctionNode):
"""Layer normalization"""
def __init__(self, eps=1e-5):
self.eps = eps
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, gamma_type, beta_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 2,
gamma_type.ndim == 1,
beta_type.ndim == 1,
gamma_type.dtype == x_type.dtype,
beta_type.dtype == x_type.dtype,
gamma_type.shape == beta_type.shape,
)
def _compute(self, xp, x):
# xp: numpy, cupy, or chainer.functions
mu = xp.mean(x, axis=1, keepdims=True)
x_mu = x - mu
squ_x_mu = xp.square(x_mu)
var = xp.mean(squ_x_mu, axis=1, keepdims=True)
std = xp.sqrt(var + self.eps)
inv_std = 1. / std
x_hat = x_mu * inv_std
return x_mu, var, inv_std, x_hat
def forward(self, inputs):
self.retain_inputs((0, 1))
xp = backend.get_array_module(*inputs)
x, gamma, beta = inputs
x_mu, var, inv_std, x_hat = self._compute(xp, x)
scaled_x = x_hat * gamma[None, ]
shifted_x = scaled_x + beta[None, ]
return shifted_x,
def backward(self, indexes, grad_outputs):
F = chainer.functions
x, gamma = self.get_retained_inputs()
gy, = grad_outputs
x_mu, var, inv_std, x_hat = self._compute(F, x)
g_beta = F.sum(gy, axis=0)
g_scaled_x = gy
g_gamma = F.sum(g_scaled_x * x_hat, axis=0)
g_x_hat = g_scaled_x * gamma
g_inv_std = F.sum(g_x_hat * x_mu, axis=1, keepdims=True)
g_x_mu_1 = g_x_hat * inv_std
g_std = g_inv_std * (- 1. / (var + self.eps))
g_var = g_std * 0.5 * inv_std
n_units = x.shape[1]
g_squ_x_mu = g_var * (1. / n_units)
g_x_mu_2 = g_squ_x_mu * 2 * x_mu
g_x_1 = g_x_mu_1 + g_x_mu_2
g_mu = F.sum(g_x_1, axis=1, keepdims=True) * (- 1.)
g_x_2 = g_mu * (1. / n_units)
g_x = g_x_1 + g_x_2
return g_x, g_gamma, g_beta,
def layer_normalization(x, gamma, beta, eps=1e-5):
"""Layer normalization.
This function implements a "layer normalization"
which normalizes the input units by statistics
that are computed along the second axis,
scales and shifts them.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Batch vectors.
Shape of this value must be `(batch_size, unit_size)`,
e.g., the output of :func:`~chainer.functions.linear`.
gamma (:class:`~chainer.Variable` or :ref:`ndarray`): Scaling vectors.
beta (:class:`~chainer.Variable` or :ref:`ndarray`): Shifting vectors.
Returns:
~chainer.Variable: The output variable which has the same shape
as :math:`x`.
See: `Layer Normalization <https://arxiv.org/abs/1607.06450>`_
.. seealso::
:class:`~chainer.links.LayerNormalization` to manage the model
parameters ``gamma`` and ``beta``.
"""
return LayerNormalization(eps).apply((x, gamma, beta))[0]
| 3,301
| 29.293578
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/normalization/batch_renormalization.py
|
import warnings
import numpy
from chainer import backend
from chainer.backends import cuda
from chainer import configuration
from chainer import function
from chainer.functions.normalization import batch_normalization
from chainer.utils import type_check
def _xhat(x, mean, std, expander):
x_mu = x - mean[expander]
x_mu /= std[expander]
return x_mu
class BatchRenormalizationFunction(function.Function):
def __init__(self, eps=2e-5, mean=None, var=None, decay=0.9,
rmax=1, dmax=0, update_statistics=True):
self._running_mean = mean
self._running_var = var
self.rmax = rmax
self.dmax = dmax
self.r = None
self.update_statistics = update_statistics
self.eps = eps
self.decay = decay
def _warn_accessing_property(self):
warnings.warn(
'The attributes of BatchRenormalizationFunction '
'are deprecated. '
'Consider setting update_statistics=True to '
'batch_renormalization to update running statistics.',
DeprecationWarning)
@property
def running_mean(self):
self._warn_accessing_property()
return self._running_mean
@property
def running_var(self):
self._warn_accessing_property()
return self._running_var
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, gamma_type, beta_type = in_types
M = type_check.eval(gamma_type.ndim)
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim >= gamma_type.ndim + 1,
x_type.shape[1:1 + M] == gamma_type.shape,
# TODO(tkerola): Check shape
gamma_type.dtype.kind == 'f',
gamma_type.dtype == beta_type.dtype,
gamma_type.shape == beta_type.shape,
)
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
x, gamma, beta = inputs
# Note: we must be in train mode.
assert configuration.config.train
head_ndim = gamma.ndim + 1
expander = (None, Ellipsis) + (None,) * (x.ndim - head_ndim)
# NOTE(tommi): cuDNN is not used since it does not support
# batch renormalization
axis = (0,) + tuple(range(head_ndim, x.ndim))
mean = x.mean(axis=axis, dtype=gamma.dtype)
var = x.var(axis=axis, dtype=gamma.dtype)
self.std = xp.sqrt(var + self.eps, dtype=var.dtype)
running_sigma = xp.sqrt(self._running_var + self.eps,
dtype=self._running_mean.dtype)
self.r = xp.clip(self.std / running_sigma,
1.0 / self.rmax, self.rmax)
d = xp.clip(
(mean - self._running_mean) / running_sigma,
-self.dmax, self.dmax)
gamma = gamma[expander]
beta = beta[expander]
if xp is numpy:
self.x_hat = _xhat(x, mean, self.std, expander)
self.x_hat_renorm = self.x_hat * self.r[expander] + d[expander]
y = gamma * self.x_hat_renorm
y += beta
y = y.astype(dtype=x.dtype)
else:
self.x_hat, self.x_hat_renorm, y = cuda.elementwise(
'T x, U mean, U std, U gamma, U beta, U r, U d',
'U x_hat, U x_hat_renorm, T y',
'''
x_hat = (x - mean) / std;
x_hat_renorm = x_hat * r + d;
y = gamma * x_hat_renorm + beta;
''',
'brn_fwd')(
x, mean[expander], self.std[expander], gamma, beta,
self.r[expander], d[expander])
if self.update_statistics:
m = x.size // gamma[expander].size
self._running_mean *= self.decay
adjust = m / max(m - 1., 1.) # unbiased estimation
temp_ar = xp.array(mean)
temp_ar *= (1 - self.decay)
self._running_mean += temp_ar
del temp_ar
self._running_var *= self.decay
temp_ar = xp.array(var)
temp_ar *= (1 - self.decay) * adjust
self._running_var += temp_ar
del temp_ar
return y,
def backward(self, inputs, grad_outputs):
x, gamma, _ = inputs
gy = grad_outputs[0]
head_ndim = gamma.ndim + 1
expander = (None, Ellipsis) + (None,) * (x.ndim - head_ndim)
m = gamma.dtype.type(x.size // gamma.size)
axis = (0,) + tuple(range(head_ndim, x.ndim))
xp = backend.get_array_module(x)
# Note: we must be in train mode.
assert configuration.config.train
# NOTE(tommi): cuDNN is not used since it does not support
# batch renormalization
gbeta = gy.sum(axis=axis, dtype=gamma.dtype)
ggamma = (gy * self.x_hat_renorm).sum(axis=axis)
gsigma_batch = (gy * self.x_hat).sum(axis=axis)
if xp is numpy:
scale = (self.r * gamma / self.std)[expander]
gx = scale * (gy - (self.x_hat * gsigma_batch[expander] +
gbeta[expander]) / m)
gx = gx.astype(dtype=x.dtype)
else:
inv_m = numpy.float32(1) / m
gx = cuda.elementwise(
'T gy, U x_hat, U gamma, U std, U gsigma_batch, U gbeta, \
U inv_m, U r',
'T gx',
'gx = (r * gamma / std) * (gy - (x_hat * gsigma_batch + gbeta) * \
inv_m)',
'brn_bwd')(
gy, self.x_hat, gamma[expander],
self.std[expander], gsigma_batch[expander],
gbeta[expander], inv_m, self.r[expander])
return gx, ggamma, gbeta
def batch_renormalization(x, gamma, beta, rmax, dmax, eps=2e-5,
running_mean=None, running_var=None, decay=0.9,
update_statistics=False):
"""Batch renormalization function.
This is an extension of batch normalization, which ensures that the
training and inference models generate the same outputs that depend on
individual examples rather than the entire minibatch.
.. note::
This function does not perform in-place update to
``running_mean`` and ``running_var`` by default, contrary to
:func:`~chainer.functions.batch_normalization`.
If the function is called, it will not be possible to access the
updated running mean and variance statistics, because they are members
of the function object, which cannot be accessed by the caller.
If it is desired to update the running statistics, call the function
with ``update_statistics=True`` option.
.. note::
For the consistency with Batch Normalization, this function
intentionally ignores some of the theoretical flaws in Algorithm 1 of
the Batch Renormalization paper:
- ``F.batch_renormalization`` maintains the moving average of variances
:math:`\\sigma^2`, while the original paper maintains the moving
average of standard deviations :math:`\\sigma`.
- ``F.batch_renormalization`` applies Bessel's correction to update the
moving average of variances.
See: `Batch Renormalization: Towards Reducing Minibatch Dependence in
Batch-Normalized Models <https://arxiv.org/abs/1702.03275>`_
.. seealso::
:class:`~chainer.links.BatchRenormalization` to manage the model
parameters (``gamma``, ``beta``) and the statistics (``running_mean``,
``running_var``).
"""
if running_mean is None:
raise TypeError('running_mean is required')
if running_var is None:
raise TypeError('running_var is required')
return BatchRenormalizationFunction(
eps, running_mean, running_var, decay, rmax, dmax, update_statistics
)(x, gamma, beta)
def fixed_batch_renormalization(x, gamma, beta, mean, var, eps=2e-5):
warnings.warn(
'fixed_batch_renormalization is deprecated. '
'Use fixed_batch_normalization instead.',
DeprecationWarning)
with configuration.using_config('train', False):
return batch_normalization.fixed_batch_normalization(
x, gamma, beta, mean, var, eps
)
| 8,316
| 36.129464
| 82
|
py
|
chainer
|
chainer-master/chainer/functions/normalization/decorrelated_batch_normalization.py
|
import numpy
from chainer import backend
from chainer import function_node
from chainer.utils import argument
from chainer.utils import type_check
# {numpy: True, cupy: False}
_xp_supports_batch_eigh = {}
# routines for batched matrices
def _eigh(a, xp):
if xp not in _xp_supports_batch_eigh:
try:
xp.linalg.eigh(xp.ones((2, 2, 2), xp.float32))
except ValueError:
_xp_supports_batch_eigh[xp] = False
else:
_xp_supports_batch_eigh[xp] = True
if _xp_supports_batch_eigh[xp]:
return xp.linalg.eigh(a)
ws = []
vs = []
for ai in a:
w, v = xp.linalg.eigh(ai)
ws.append(w)
vs.append(v)
return xp.stack(ws), xp.stack(vs)
def _matmul(a, b, xp):
if hasattr(xp, 'matmul'): # numpy.matmul is supported from version 1.10.0
return xp.matmul(a, b)
else:
return xp.einsum('bij,bjk->bik', a, b)
def _diag(a, xp):
s0, s1 = a.shape
ret = xp.zeros((s0, s1, s1), a.dtype)
arange_s1 = numpy.arange(s1)
ret[:, arange_s1, arange_s1] = a
return ret
def _calc_axis_and_m(x_shape, batch_size):
m = batch_size
spatial_ndim = len(x_shape) - 2
spatial_axis = tuple(range(2, 2 + spatial_ndim))
for i in spatial_axis:
m *= x_shape[i]
return spatial_axis, m
class DecorrelatedBatchNormalization(function_node.FunctionNode):
def __init__(self, groups=16, eps=2e-5, mean=None, projection=None,
decay=0.9):
self.groups = groups
self.running_mean = mean
self.running_projection = projection
self.eps = eps
self.decay = decay
self.axis = None
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.shape[1] % self.groups == 0,
)
type_check.expect(
x_type.ndim >= 2,
)
def forward(self, inputs):
self.retain_inputs(())
x = inputs[0]
xp = backend.get_array_module(x)
x_shape = x.shape
b, c = x_shape[:2]
g = self.groups
C = c // g
spatial_axis, m = _calc_axis_and_m(x_shape, b)
# (g, C, m)
x_hat = x.transpose((1, 0) + spatial_axis).reshape(g, C, m)
mean = x_hat.mean(axis=2, keepdims=True)
x_hat = x_hat - mean
self.eps = x.dtype.type(self.eps)
eps_matrix = self.eps * xp.eye(C, dtype=x.dtype)
cov = _matmul(
x_hat, x_hat.transpose(0, 2, 1),
xp) / x.dtype.type(m) + eps_matrix
# (g, C), (g, C, C)
self.eigvals, self.eigvectors = _eigh(cov, xp)
U = _matmul(
_diag(self.eigvals ** -0.5, xp),
self.eigvectors.transpose(0, 2, 1),
xp)
self.y_hat_pca = _matmul(U, x_hat, xp) # PCA whitening
# ZCA whitening
y_hat = _matmul(self.eigvectors, self.y_hat_pca, xp)
y = y_hat.reshape((c, b) + x_shape[2:]).transpose(
(1, 0) + spatial_axis)
# Update running statistics
if self.running_mean is not None:
mean = mean.squeeze(axis=2)
self.running_mean *= self.decay
self.running_mean += (1 - self.decay) * mean
if self.running_projection is not None:
adjust = m / max(m - 1., 1.) # unbiased estimation
self.running_projection *= self.decay
projection = _matmul(self.eigvectors, U, xp)
self.running_projection += (1 - self.decay) * adjust * projection
return y,
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
f = DecorrelatedBatchNormalizationGrad(
self.groups, self.eigvals, self.eigvectors, self.y_hat_pca)
return f.apply((gy,))
class DecorrelatedBatchNormalizationGrad(function_node.FunctionNode):
def __init__(self, groups, eigvals, eigvectors, y_hat_pca):
self.groups = groups
self.eigvals = eigvals
self.eigvectors = eigvectors
self.y_hat_pca = y_hat_pca
def forward(self, inputs):
self.retain_inputs(())
gy = inputs[0]
xp = backend.get_array_module(gy)
gy_shape = gy.shape
b, c = gy_shape[:2]
g = self.groups
C = c // g
spatial_axis, m = _calc_axis_and_m(gy_shape, b)
arange_C = numpy.arange(C)
diag_indices = slice(None), arange_C, arange_C
gy_hat = gy.transpose((1, 0) + spatial_axis).reshape(g, C, m)
eigvectors = self.eigvectors
eigvals = self.eigvals
y_hat_pca = self.y_hat_pca
gy_hat_pca = _matmul(eigvectors.transpose(0, 2, 1), gy_hat, xp)
f = gy_hat_pca.mean(axis=2, keepdims=True)
K = eigvals[:, :, None] - eigvals[:, None, :]
valid = K != 0 # to avoid nan, use eig_i != eig_j instead of i != j
K[valid] = xp.reciprocal(K[valid])
V = _diag(eigvals, xp)
V_sqrt = _diag(eigvals ** 0.5, xp)
V_invsqrt = _diag(eigvals ** -0.5, xp)
F_c = _matmul(
gy_hat_pca, y_hat_pca.transpose(0, 2, 1),
xp) / gy.dtype.type(m)
M = xp.zeros_like(F_c)
M[diag_indices] = F_c[diag_indices]
mat = K.transpose(0, 2, 1) * (
_matmul(V, F_c.transpose(0, 2, 1), xp)
+ _matmul(_matmul(V_sqrt, F_c, xp), V_sqrt, xp)
)
S = mat + mat.transpose(0, 2, 1)
R = gy_hat_pca - f + _matmul(
(S - M).transpose(0, 2, 1), y_hat_pca, xp)
gx_hat = _matmul(
_matmul(R.transpose(0, 2, 1), V_invsqrt, xp),
eigvectors.transpose(0, 2, 1), xp
).transpose(0, 2, 1)
gx = gx_hat.reshape((c, b) + gy_shape[2:]).transpose(
(1, 0) + spatial_axis)
self.retain_outputs(())
return gx,
def backward(self, inputs, grad_outputs):
# TODO(crcrpar): Implement this.
raise NotImplementedError('Double backward is not implemented for'
' decorrelated batch normalization.')
class FixedDecorrelatedBatchNormalization(function_node.FunctionNode):
def __init__(self, groups):
self.groups = groups
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, mean_type, var_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
mean_type.dtype == x_type.dtype,
var_type.dtype == x_type.dtype,
)
type_check.expect(
x_type.ndim >= 2,
)
def forward(self, inputs):
self.retain_inputs((0, 1, 2))
x, mean, projection = inputs
xp = backend.get_array_module(x)
x_shape = x.shape
b, c = x_shape[:2]
g = self.groups
C = c // g
spatial_axis, m = _calc_axis_and_m(x_shape, b)
x_hat = x.transpose((1, 0) + spatial_axis).reshape(g, C, m)
x_hat = x_hat - xp.expand_dims(mean, axis=2)
y_hat = _matmul(projection, x_hat, xp)
y = y_hat.reshape((c, b) + x_shape[2:]).transpose(
(1, 0) + spatial_axis)
return y,
def backward(self, indexes, grad_outputs):
x, mean, projection = self.get_retained_inputs()
gy, = grad_outputs
f = FixedDecorrelatedBatchNormalizationGrad(self.groups)
return f.apply((x, mean, projection, gy))
class FixedDecorrelatedBatchNormalizationGrad(function_node.FunctionNode):
def __init__(self, groups):
self.groups = groups
def forward(self, inputs):
self.retain_inputs(())
x, mean, projection, gy = inputs
xp = backend.get_array_module(x)
gy_shape = gy.shape
b, c = gy_shape[:2]
g = self.groups
C = c // g
spatial_axis, m = _calc_axis_and_m(gy_shape, b)
gy_hat = gy.transpose((1, 0) + spatial_axis).reshape(g, C, m)
x_hat = x.transpose((1, 0) + spatial_axis).reshape(g, C, m)
gy_hat_pca = _matmul(projection.transpose(0, 2, 1), gy_hat, xp)
gx = gy_hat_pca.reshape((c, b) + gy_shape[2:]).transpose(
(1, 0) + spatial_axis)
rhs = x_hat - xp.expand_dims(mean, axis=2)
gprojection = _matmul((x_hat - rhs).transpose(0, 2, 1), gy_hat, xp)
gmean = -gy_hat_pca[..., 0]
self.retain_outputs(())
return gx, gmean, gprojection
def backward(self, inputs, grad_outputs):
# TODO(crcrpar): Implement this.
raise NotImplementedError('Double backward is not implemented for'
' fixed decorrelated batch normalization.')
def decorrelated_batch_normalization(x, **kwargs):
"""decorrelated_batch_normalization(x, *, groups=16, eps=2e-5, \
running_mean=None, running_projection=None, decay=0.9)
Decorrelated batch normalization function.
It takes the input variable ``x`` and normalizes it using
batch statistics to make the output zero-mean and decorrelated.
Args:
x (:class:`~chainer.Variable`): Input variable.
groups (int): Number of groups to use for group whitening.
eps (float): Epsilon value for numerical stability.
running_mean (:ref:`ndarray`): Expected value of the mean. This is a
running average of the mean over several mini-batches using
the decay parameter. If ``None``, the expected mean is initialized
to zero.
running_projection (:ref:`ndarray`):
Expected value of the project matrix. This is a
running average of the projection over several mini-batches using
the decay parameter. If ``None``, the expected projected is
initialized to the identity matrix.
decay (float): Decay rate of moving average. It is used during
training.
Returns:
~chainer.Variable: The output variable which has the same shape as
:math:`x`.
See: `Decorrelated Batch Normalization <https://arxiv.org/abs/1804.08450>`_
.. seealso:: :class:`~chainer.links.DecorrelatedBatchNormalization`
"""
groups, eps, running_mean, running_projection, decay = \
argument.parse_kwargs(
kwargs, ('groups', 16), ('eps', 2e-5), ('running_mean', None),
('running_projection', None), ('decay', 0.9))
f = DecorrelatedBatchNormalization(
groups, eps, running_mean, running_projection, decay)
return f.apply((x,))[0]
def fixed_decorrelated_batch_normalization(x, mean, projection, groups=16):
"""Decorrelated batch normalization function with fixed statistics.
This is a variant of decorrelated batch normalization, where the mean and
projection statistics are given by the caller as fixed variables. This is
used in testing mode of the decorrelated batch normalization layer, where
batch statistics cannot be used for prediction consistency.
Args:
x (:class:`~chainer.Variable`): Input variable.
mean (:class:`~chainer.Variable` or :ref:`ndarray`):
Shifting parameter of input.
projection (:class:`~chainer.Variable` or :ref:`ndarray`):
Projection matrix for decorrelation of input.
groups (int): Number of groups to use for group whitening.
Returns:
~chainer.Variable: The output variable which has the same shape as
:math:`x`.
.. seealso::
:func:`~chainer.functions.decorrelated_batch_normalization`,
:class:`~chainer.links.DecorrelatedBatchNormalization`
"""
f = FixedDecorrelatedBatchNormalization(groups)
return f.apply((x, mean, projection))[0]
| 11,689
| 32.591954
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/normalization/local_response_normalization.py
|
import numpy
import six
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import function_node
from chainer.utils import type_check
def _cu_conv_sum(y, x, n):
# Convolutional sum
# TODO(beam2d): Use scan computation
rdim = x.size // (x.shape[0] * x.shape[1])
cuda.elementwise(
'raw T x, int32 rdim, int32 N, int32 n_', 'raw T y',
'''
int half_n = n_ / 2;
int offset = i / rdim * N * rdim + i % rdim;
float sum_part = 0;
for (int j = 0; j < N + half_n; ++j) {
if (j < N) {
sum_part += x[offset + j * rdim];
}
if (j >= n_) {
sum_part -= x[offset + (j - n_) * rdim];
}
if (j >= half_n) {
y[offset + (j - half_n) * rdim] = sum_part;
}
}
''', 'lrn_conv_sum')(x, rdim, x.shape[1], n, y,
size=x.shape[0] * rdim)
class LocalResponseNormalization(function_node.FunctionNode):
"""Cross-channel normalization function used in AlexNet."""
_use_ideep = False
def __init__(self, n=5, k=2, alpha=1e-4, beta=.75):
self.n = n
self.k = k
self.alpha = alpha
self.beta = beta
self.scale = None
self.indexes = None
self.unit_scale = None
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim >= 2,
)
def forward_cpu(self, inputs):
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs, (4,))):
self._use_ideep = True
return self.forward_ideep(inputs)
x, = inputs
self.retain_inputs((0,))
self.retain_outputs((0,))
half_n = self.n // 2
x2 = numpy.square(x)
sum_part = x2.copy()
for i in six.moves.range(1, half_n + 1):
sum_part[:, i:] += x2[:, :-i]
sum_part[:, :-i] += x2[:, i:]
self.unit_scale = self.k + self.alpha * sum_part
self.scale = self.unit_scale ** -self.beta
y = x * self.scale
return y,
def forward_ideep(self, inputs):
x, = inputs
self.retain_inputs((0,))
self.retain_outputs((0,))
param = intel64.ideep.localResponseNormalizationParam(
self.n, self.k, self.n * self.alpha, self.beta,
intel64.ideep.localResponseNormalizationParam.lrn_across_channels)
y, indexes = intel64.ideep.localResponseNormalization.Forward(
intel64.ideep.array(x), param)
self.indexes = indexes
return y,
def forward_gpu(self, inputs):
x, = inputs
self.retain_inputs((0,))
self.retain_outputs((0,))
self.y = cuda.cupy.square(x) # temporary
self.scale = cuda.cupy.empty_like(self.y)
_cu_conv_sum(self.scale, self.y, self.n)
cuda.elementwise(
'T x, T k, T alpha, T beta',
'T y, T scale',
'''scale = k + alpha * scale;
y = x * pow(scale, -beta);''',
'lrn_fwd')(x, self.k, self.alpha, self.beta,
self.y, self.scale)
return self.y,
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
y, = self.get_retained_outputs()
gy, = grad_outputs
f = LocalResponseNormalizationGrad(
self.n, self.k, self.alpha, self.beta, self._use_ideep,
self.scale, self.indexes, self.unit_scale,)
return f.apply((x, y, gy))
class LocalResponseNormalizationGrad(function_node.FunctionNode):
def __init__(self, n, k, alpha, beta, use_ideep,
scale=None, indexes=None, unit_scale=None):
self.n = n
self.k = k
self.alpha = alpha
self.beta = beta
self._use_ideep = use_ideep
self.scale = scale
self.indexes = indexes
self.unit_scale = unit_scale
def forward_cpu(self, inputs):
if self._use_ideep:
return self._backward_ideep(inputs)
x, y, gy = inputs
half_n = self.n // 2
summand = y * gy / self.unit_scale
sum_part = summand.copy()
for i in six.moves.range(1, half_n + 1):
sum_part[:, i:] += summand[:, :-i]
sum_part[:, :-i] += summand[:, i:]
gx = gy * self.scale - 2 * self.alpha * self.beta * x * sum_part
return gx,
def _backward_ideep(self, inputs):
x, y, gy = inputs
param = intel64.ideep.localResponseNormalizationParam(
self.n, self.k, self.n * self.alpha, self.beta,
intel64.ideep.localResponseNormalizationParam.lrn_across_channels
)
gx = intel64.ideep.localResponseNormalization.Backward(
intel64.ideep.array(x),
intel64.ideep.array(gy),
self.indexes,
param)
return gx,
def forward_gpu(self, inputs):
x, y, gy = inputs
summand = cuda.elementwise(
'T scale, T y, T gy', 'T summand',
'summand = y * gy / scale',
'lrn_bwd_summand')(self.scale, y, gy)
gx = cuda.cupy.empty_like(x)
_cu_conv_sum(gx, summand, self.n)
cuda.elementwise(
' T x, T gy, T scale, T beta, T coeff', 'T gx',
'gx = pow(scale, -beta) * gy - coeff * x * gx',
'lrn_bwd')(x, gy, self.scale,
self.beta, 2 * self.alpha * self.beta, gx)
return gx,
def backward(self, indexes, grad_outputs):
# No trivial way to implement double-backward for this function.
raise NotImplementedError
def local_response_normalization(x, n=5, k=2, alpha=1e-4, beta=.75):
"""Local response normalization across neighboring channels.
This function implements normalization across channels. Let :math:`x` an
input image with :math:`N` channels. Then, this function computes an output
image :math:`y` by following formula:
.. math::
y_i = {x_i \\over \\left( k + \\
\\alpha \\sum_{j=\\max{1, i - n/2}}^{\\min{N, i + n/2}} \\
x_j^2 \\right)^\\beta}.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
n (int): Normalization window width.
k (float): Smoothing parameter.
alpha (float): Normalizer scaling parameter.
beta (float): Normalizer power parameter.
Returns:
~chainer.Variable: Output variable.
See: Section 3.3 of `ImageNet Classification with Deep Convolutional
Neural Networks <https://www.cs.toronto.edu/~fritz/absps/imagenet.pdf>`_
"""
return LocalResponseNormalization(n, k, alpha, beta).apply((x,))[0]
| 6,877
| 31.29108
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/normalization/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/functions/normalization/batch_normalization.py
|
import warnings
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import configuration
from chainer import function_node
from chainer import memory_layouts
from chainer.utils import argument
from chainer.utils import collections_abc
from chainer.utils import type_check
import chainerx
class _BatchNormalizationImpl:
def forward(self, axis, gamma, x, x_layout, xp, expander,
beta, eps, decay, running_mean, running_var):
raise NotImplementedError()
def backward(self, axis, gamma, gy, x, x_layout, xp,
expander, mean, inv_std, eps, var, forward_data):
raise NotImplementedError()
class GeneralBatchNormalizationImpl(_BatchNormalizationImpl):
def forward(self, axis, gamma, x, x_layout, xp, expander,
beta, eps, decay, running_mean, running_var):
interm_dtype = numpy.promote_types(x.dtype, gamma.dtype)
gamma = gamma[expander].astype(interm_dtype, copy=False)
beta = beta[expander].astype(interm_dtype, copy=False)
mean, var = self.get_mean_and_var(axis, gamma,
x, xp, interm_dtype)
if xp is numpy:
inv_std = numpy.reciprocal(numpy.sqrt(
var + eps, dtype=interm_dtype))
else:
inv_std = cuda.cupyx.rsqrt(
var + eps, dtype=interm_dtype)
y = _apply_bn_fwd(xp, x, mean[expander],
inv_std[expander], gamma, beta)
# Update running statistics if given
if running_mean is not None:
m = x.size // gamma.size
adjust = m / max(m - 1., 1.) # unbiased estimation
xp = backend.get_array_module(
running_mean, running_var)
if xp is chainerx:
running_mean, running_var = backend.from_chx(
(running_mean, running_var))
if xp is numpy:
running_mean *= decay
running_mean += (1 - decay) * mean
running_var *= decay
running_var += (1 - decay) * adjust * var
else:
# running_mean and running_var has the same type as x
# while mean and var is interm_dtype which is promoted from x
cuda.elementwise(
'T mean, T var, U decay, U adjust',
'U r_mean, U r_var',
'''
r_mean = r_mean * decay + mean * (1 - decay);
r_var = r_var * decay + var * (1 - decay) * adjust;
''',
'update_mean_var')(mean, var, decay, adjust,
running_mean, running_var)
if xp is chainerx:
running_mean = backend.to_chx(running_mean)
running_var = backend.to_chx(running_var)
y_layout = x_layout
return y, y_layout, running_mean, running_var, mean, var, inv_std, None
def get_mean_and_var(self, axis, gamma, x, xp, interm_dtype):
mean = x.mean(axis=axis, dtype=interm_dtype)
var = x.var(axis=axis, dtype=interm_dtype)
return mean, var
def get_ggamma_and_gbeta(self, axis, gamma, gy, x_hat, xp):
gbeta = gy.sum(axis=axis, dtype=gamma.dtype)
ggamma = (gy * x_hat).sum(axis=axis, dtype=gamma.dtype)
return gbeta, ggamma
def backward(self, axis, gamma, gy, x, x_layout, xp,
expander, mean, inv_std, eps, var, forward_data):
interm_dtype = numpy.promote_types(x.dtype, gamma.dtype)
if isinstance(gy, intel64.mdarray):
# intel64.mdarray does not support dtype option in sum, so we
# convert it to numpy here.
gy = numpy.asarray(gy)
x_hat = _x_hat(x, mean[expander],
inv_std[expander])
assert x_hat.dtype == interm_dtype
gbeta, ggamma = self.get_ggamma_and_gbeta(axis, gamma, gy, x_hat, xp)
inv_m = gamma.dtype.type(1. / (x.size // gamma.size))
if xp is numpy:
if (isinstance(gamma, intel64.mdarray)
and interm_dtype != numpy.float32):
# Convert to numpy to avoid an error of "mkldnn::error"
gamma = numpy.asarray(gamma)
gx = (gamma * inv_std)[expander] * (
gy - (x_hat * ggamma[expander] + gbeta[expander]) * inv_m)
gx = gx.astype(dtype=x.dtype, copy=False)
else:
# x_hat and inv_std have a promoted type of x and gamma
gx = cuda.elementwise(
'''
T gy, X x_hat, U gamma, X inv_std, U ggamma, U gbeta,
U inv_m
''',
'T gx',
'''
gx = (gamma * inv_std) * (
gy - (x_hat * ggamma + gbeta) * inv_m)
''', 'bn_bwd')(gy, x_hat, gamma[expander],
inv_std[expander], ggamma[expander],
gbeta[expander],
inv_m).astype(x.dtype, copy=False)
return gx, None, ggamma, gbeta
class _IDeepBatchNormalizationImpl(_BatchNormalizationImpl):
def forward(self, axis, gamma, x, x_layout, xp, expander,
beta, eps, decay, running_mean, running_var):
expand_dim = False
if x.ndim == 2:
expand_dim = True
x = x[:, :, None, None]
y, mean, var, inv_std = (
intel64.ideep.batchNormalization.Forward(
intel64.ideep.array(x.astype(gamma.dtype, copy=False)),
intel64.ideep.array(gamma),
intel64.ideep.array(beta),
None,
None,
eps
))
y = y.astype(x.dtype, copy=False)
if expand_dim:
y = numpy.squeeze(y, axis=(2, 3))
# Update running statistics if given
if running_mean is not None:
m = x.size // gamma.size
adjust = m / max(m - 1., 1.)
# Update running_mean
if isinstance(running_mean, intel64.ideep.mdarray):
running_mean.inplace_axpby(
decay, (1 - decay), mean)
else:
running_mean *= decay
running_mean += mean * (1 - decay)
# Update running_var
if isinstance(running_var, intel64.ideep.mdarray):
running_var.inplace_axpby(
decay, (1 - decay), var * adjust)
else:
running_var *= decay
running_var += var * adjust * (1 - decay)
return y, None, running_mean, running_var, mean, var, inv_std, None
def backward(self, axis, gamma, gy, x, x_layout, xp,
expander, mean, inv_std, eps, var, forward_data):
expand_dim = False
if x.ndim == 2:
expand_dim = True
x = x[:, :, None, None]
gy = gy[:, :, None, None]
gx, gW = intel64.ideep.batchNormalization.Backward(
intel64.ideep.array(x.astype(gamma.dtype, copy=False)),
intel64.ideep.array(gy.astype(gamma.dtype, copy=False)),
mean,
var,
intel64.ideep.array(gamma),
eps)
ggamma, gbeta = gW[:2]
if expand_dim:
gx = numpy.squeeze(gx, axis=(2, 3))
gx = gx.astype(x.dtype, copy=False)
ggamma = ggamma.astype(gamma.dtype, copy=False)
gbeta = gbeta.astype(gamma.dtype, copy=False)
return gx, None, ggamma, gbeta
class _CudnnBatchNormalizationImpl(_BatchNormalizationImpl):
def __init__(self, is_for_conv2d, cudnn_mode):
self.is_for_conv2d = is_for_conv2d
self.cudnn_mode = cudnn_mode
def forward(self, axis, gamma, x, x_layout, xp, expander,
beta, eps, decay, running_mean, running_var):
if running_mean is not None:
mean = running_mean
var = running_var
else:
# Create dummies.
mean = xp.zeros_like(gamma, dtype=x.dtype)
var = xp.zeros_like(gamma, dtype=x.dtype)
# mean and inv_std are used as buffers to save
# intermediate results computed during forward pass. These buffers
# are used to speed-up backward pass.
cudnn_x_layout = cuda._get_cudnn_tensor_layout_x(x_layout)
reserve_space, y, mean, inv_std = (
cudnn.batch_normalization_forward_training_ex(
x, gamma, beta, mean, var, None, None,
eps, decay, self.is_for_conv2d,
self.cudnn_mode, chainer.is_debug(),
d_layout=cudnn_x_layout))
y_layout = x_layout
return (
y, y_layout, running_mean, running_var, mean, var, inv_std,
reserve_space)
def backward(self, axis, gamma, gy, x, x_layout, xp,
expander, mean, inv_std, eps, var, forward_data):
cudnn_x_layout = cuda._get_cudnn_tensor_layout_x(x_layout)
gx, ggamma, gbeta = cudnn.batch_normalization_backward(
x, gamma, gy, mean, inv_std, eps,
self.is_for_conv2d, self.cudnn_mode,
chainer.is_debug(),
d_layout=cudnn_x_layout,
reserve_space=forward_data)
gx = gx.astype(x.dtype, copy=False)
ggamma = ggamma.astype(gamma.dtype, copy=False)
gbeta = gbeta.astype(gamma.dtype, copy=False)
gx_layout = x_layout
return gx, gx_layout, ggamma, gbeta
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.libcudnn
_cudnn_version = cuda.cuda.cudnn.getVersion()
def _compute_axis(x_ndim, gamma_ndim, axis):
if axis is not None:
return axis
return (0,) + tuple(range(gamma_ndim + 1, x_ndim))
# Computes a complementary set of axis
def _compute_key_axis(x_ndim, gamma_ndim, axis):
axis = _compute_axis(x_ndim, gamma_ndim, axis)
key_axis = tuple([i for i in range(x_ndim) if i not in axis])
return key_axis
def _impl_selector(batch_norm_func, inputs):
x, gamma, _ = inputs
xp = backend.get_array_module(x)
mode = _BNMode(x, gamma, batch_norm_func.key_axis)
use_cudnn = mode.can_use_cudnn(xp)
use_ideep = mode.can_use_ideep()
if use_ideep:
return _IDeepBatchNormalizationImpl()
elif use_cudnn:
return _CudnnBatchNormalizationImpl(mode.is_for_conv2d,
mode.get_cudnn_mode())
else:
return GeneralBatchNormalizationImpl()
class BatchNormalization(function_node.FunctionNode):
mean = None
inv_std = None
forward_data = None # Arbitrary data passed from forward to backward
def __init__(self, eps=2e-5, mean=None, var=None, decay=0.9, axis=None,
impl_selector=_impl_selector):
self.running_mean = mean
self.running_var = var
# Note: cuDNN requires that eps be greater than or equals to
# CUDNN_BN_MIN_EPSILON. Otherwise, an error will occur.
# See CUDNN_BN_MIN_EPSILON value in cudnn.h to verify minimum allowable
# value.
self.eps = eps
if chainer.should_use_cudnn('>=auto'):
if eps < libcudnn.CUDNN_BN_MIN_EPSILON:
raise RuntimeError(
'cuDNN does not allow an eps value '
'less than {}.'.format(libcudnn.CUDNN_BN_MIN_EPSILON))
self.decay = decay
if isinstance(axis, collections_abc.Sequence):
for i in range(1, len(axis)):
if axis[i - 1] >= axis[i]:
msg = 'numbers in axis must be sorted in ascending order'
raise RuntimeError(msg)
elif isinstance(axis, six.integer_types):
axis = axis,
elif axis is not None:
raise RuntimeError('axis must be int, tuple of int or None')
self.axis = axis
self._impl_selector = impl_selector
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, gamma_type, beta_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
gamma_type.dtype.kind == 'f',
gamma_type.dtype == beta_type.dtype,
gamma_type.shape == beta_type.shape,
)
_x_ndim = type_check.eval(x_type.ndim)
_gamma_ndim = type_check.eval(gamma_type.ndim)
_axis = _compute_axis(_x_ndim, _gamma_ndim, self.axis)
type_check.expect(
x_type.ndim >= len(_axis),
)
_key_axis = _compute_key_axis(_x_ndim, _gamma_ndim, _axis)
type_check.expect(
gamma_type.ndim == len(_key_axis),
)
for i in range(len(_key_axis)):
type_check.expect(
x_type.shape[_key_axis[i]] == gamma_type.shape[i],
)
def check_layout_forward(self, inputs):
# TODO(niboshi): Write input layout check
pass
def forward_chainerx(self, inputs):
# TODO(niboshi): Support conditions implemented as fallback
# Running statistics are required.
if self.running_mean is None or self.running_var is None:
return chainer.Fallback
# Fall back if the running statistics are non-contiguous CUDA arrays
# since they are not supported by cuDNN.
# Assert that both running statistics belong to the same backend.
if self.running_mean.device.backend.name == 'cuda' and not (
self.running_mean.is_contiguous
and self.running_var.is_contiguous):
return chainer.Fallback
x, gamma, beta = inputs
axis_chx = _chainerx_compute_axis(x.ndim, gamma.ndim, self.axis)
if not _chainerx_is_supported(x, axis_chx):
return chainer.Fallback
y = chainerx.batch_norm(
x, gamma, beta, self.running_mean, self.running_var,
self.eps, self.decay, axis_chx)
return y,
def forward(self, inputs):
self.retain_inputs((0, 1))
x, gamma, beta = inputs
x_layout, _, _ = self.input_layouts
self.output_layouts = (x_layout,)
self.axis = _compute_axis(x.ndim, gamma.ndim, self.axis)
self.key_axis = _compute_key_axis(x.ndim, gamma.ndim, self.axis)
x_shape = memory_layouts._transpose_shape(x.shape, x_layout, None)
if all(x_shape[i] == 1 for i in self.axis):
if 0 in self.axis:
warnings.warn(
'A batch with no more than one sample has been given'
' to F.batch_normalization. F.batch_normalization'
' will always output a zero tensor for such batches.'
' This could be caused by incorrect configuration in'
' your code (such as running evaluation while'
' chainer.config.train=True),'
' but could also happen in the last batch of training'
' if non-repeating iterator is used.',
UserWarning)
else:
warnings.warn(
'F.batch_normalization received a batch with single'
' dimensions along all axes that are used for aggregating'
' statistics. F.batch_normalization'
' will always output a zero tensor for such batches.',
UserWarning)
# TODO(niboshi): Refactor calculation of expander and axis into a
# function and call it just before they are used.
# expander inserts singleton dimensions to gamma and beta so that they
# can be broadcasted with x.
expander = [None for _ in range(x.ndim)]
for i in self.key_axis:
expander[i] = slice(None)
expander = tuple(expander)
expander = memory_layouts._transpose_shape(expander, None, x_layout)
self.expander = expander
xp = backend.get_array_module(x)
self._impl = self._impl_selector(self, inputs)
raw_axis = self.axis
t = chainer.memory_layouts._get_layout_transpose_axes(
x.ndim, None, x_layout, True)
if t is not None:
raw_axis = tuple(t[i] for i in self.axis)
(
y, y_layout, self.running_mean, self.running_var,
self.mean, self.var, self.inv_std,
self.forward_data) = (
self._impl.forward(
axis=raw_axis, gamma=gamma, x=x, x_layout=x_layout,
xp=xp, expander=expander, beta=beta, eps=self.eps,
decay=self.decay,
running_mean=self.running_mean,
running_var=self.running_var))
self.output_layouts = (y_layout,)
return y,
def backward(self, indexes, grad_outputs):
x, gamma = self.get_retained_inputs()
gy, = grad_outputs
if isinstance(self._impl, _IDeepBatchNormalizationImpl):
assert self.var is not None
var = self.var
else:
var = None
f = BatchNormalizationGrad(
self.eps, self.expander,
self.axis,
self.mean, var,
self.inv_std, self.key_axis,
self._impl,
self.forward_data,
)
return f.apply((x, gamma, gy))
class BatchNormalizationGrad(function_node.FunctionNode):
def __init__(self, eps, expander, axis, mean, var,
inv_std, key_axis, impl, forward_data):
self.eps = eps
self.expander = expander
self.axis = axis
self.mean = mean
self.var = var # Only used in iDeep implementation
self.inv_std = inv_std
self.key_axis = key_axis
self._impl = impl
self.forward_data = forward_data
def check_layout_forward(self, inputs):
pass
def forward(self, inputs):
self.retain_inputs((0, 1, 2))
x, gamma, gy = inputs
x_layout, _, _ = self.input_layouts
self.output_layouts = (x_layout, None, None)
expander = self.expander
xp = backend.get_array_module(x)
gx, gx_layout, ggamma, gbeta = self._impl.backward(
self.axis, gamma, gy, x, x_layout, xp, expander,
self.mean, self.inv_std, self.eps, self.var, self.forward_data)
self.retain_inputs((0, 1, 2))
self.retain_outputs((0, 1))
self.output_layouts = (gx_layout, None, None)
return gx, ggamma, gbeta
def backward(self, indexes, grad_outputs):
F = chainer.functions
expander = self.expander
x, gamma, gy = self.get_retained_inputs()
gx1, ggamma1 = self.get_retained_outputs()
ggx1, gggamma1, ggbeta1 = grad_outputs
xp = backend.get_array_module(x)
original_gamma_dtype = gamma.dtype
if gamma.dtype != x.dtype:
gamma = F.cast(gamma, x.dtype)
ggamma1 = F.cast(ggamma1, x.dtype)
gggamma1 = F.cast(gggamma1, x.dtype)
ggbeta1 = F.cast(ggbeta1, x.dtype)
# auxiliary values
inv_m = gamma.dtype.type(1. / (x.size // gamma.size))
r = 0 if ggx1 is None else F.sum(gx1 * ggx1, axis=self.axis)
coeff = gamma * self.inv_std
coeff_m = coeff * inv_m
x_hat = _x_hat(x, self.mean[expander], self.inv_std[expander])
# handle None in output gradients
ggx1 = _zero_if_none(xp, ggx1, x.shape, x.dtype)
gggamma1 = _zero_if_none(xp, gggamma1, gamma.shape, gamma.dtype)
ggbeta1 = _zero_if_none(xp, ggbeta1, gamma.shape, gamma.dtype)
gggamma2 = gggamma1 - coeff_m * F.sum(x_hat * ggx1, axis=self.axis)
ggbeta2 = ggbeta1 - coeff_m * F.sum(ggx1, axis=self.axis)
ggamma2 = r / gamma
gx_hat2 = (gggamma2[expander] * gy -
(coeff_m * ggamma1)[expander] * ggx1)
gstd2 = -self.inv_std * (r + F.sum(x_hat * gx_hat2, axis=self.axis))
gmean2 = -self.inv_std * F.sum(gx_hat2, axis=self.axis)
gx2 = self.inv_std[expander] * gx_hat2 + inv_m * (
gmean2[expander] + x_hat * gstd2[expander])
ggy2 = (gggamma2[expander] * x_hat + ggbeta2[expander]
+ coeff[expander] * ggx1)
gx2 = chainer.functions.cast(gx2, x.dtype)
ggy2 = chainer.functions.cast(ggy2, gy.dtype)
ggamma2 = chainer.functions.cast(ggamma2, original_gamma_dtype)
return gx2, ggamma2, ggy2
class FixedBatchNormalization(function_node.FunctionNode):
inv_std = None
inv_var = None
def __init__(self, eps=2e-5, axis=None):
# Note: cuDNN requires that eps be greater than or equals to
# CUDNN_BN_MIN_EPSILON. Otherwise, an error will occur.
# See CUDNN_BN_MIN_EPSILON value in cudnn.h to verify minimum allowable
# value.
self.eps = eps
if chainer.should_use_cudnn('>=auto'):
if eps < libcudnn.CUDNN_BN_MIN_EPSILON:
raise RuntimeError(
'cuDNN does not allow an eps value '
'less than {}.'.format(libcudnn.CUDNN_BN_MIN_EPSILON))
if isinstance(axis, collections_abc.Sequence):
for i in range(1, len(axis)):
if axis[i - 1] >= axis[i]:
msg = 'numbers in axis must be sorted in ascending order'
raise RuntimeError(msg)
elif isinstance(axis, six.integer_types):
axis = axis,
elif axis is not None:
raise RuntimeError('axis must be int, tuple of int or None')
self.axis = axis
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 5)
x_type, gamma_type, beta_type, mean_type, var_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
# TODO(beam2d): Check shape
gamma_type.dtype.kind == 'f',
beta_type.dtype == gamma_type.dtype,
mean_type.dtype == gamma_type.dtype,
var_type.dtype == gamma_type.dtype,
beta_type.shape == gamma_type.shape,
mean_type.shape == gamma_type.shape,
var_type.shape == gamma_type.shape,
)
_x_ndim = type_check.eval(x_type.ndim)
_gamma_ndim = type_check.eval(gamma_type.ndim)
_axis = _compute_axis(_x_ndim, _gamma_ndim, self.axis)
type_check.expect(
x_type.ndim >= len(_axis),
)
_key_axis = _compute_key_axis(_x_ndim, _gamma_ndim, _axis)
type_check.expect(
gamma_type.ndim == len(_key_axis),
)
for i in range(len(_key_axis)):
type_check.expect(
x_type.shape[_key_axis[i]] == gamma_type.shape[i],
)
def forward_chainerx(self, inputs):
# TODO(niboshi): Support conditions implemented as fallback
# TODO(niboshi): chainerx.fixed_batch_norm does not support backward
if chainer.config.enable_backprop:
return chainer.Fallback
x, gamma, beta, mean, var = inputs
axis_chx = _chainerx_compute_axis(x.ndim, gamma.ndim, self.axis)
if not _chainerx_is_supported(x, axis_chx):
return chainer.Fallback
y = chainerx.fixed_batch_norm(
x, gamma, beta, mean, var, self.eps, axis_chx)
return y,
def forward(self, inputs):
self.retain_inputs((0, 1, 3, 4))
x, gamma, beta, mean, var = inputs
xp = backend.get_array_module(x)
self.axis = _compute_axis(x.ndim, gamma.ndim, self.axis)
self.key_axis = _compute_key_axis(x.ndim, gamma.ndim, self.axis)
# expander inserts singleton dimensions to gamma and beta so that they
# can be broadcasted with x.
expander = [None for _ in range(x.ndim)]
for i in self.key_axis:
expander[i] = slice(None)
expander = tuple(expander)
self.expander = expander
mode = _BNMode(x, gamma, self.key_axis, inference=True)
if mode.can_use_ideep():
# TODO(niboshi): Refactor iDeep part into a separate method
expand_dim = False
if x.ndim == 2:
expand_dim = True
x = x[:, :, None, None]
y, = intel64.ideep.batchNormalization.Forward(
intel64.ideep.array(x.astype(gamma.dtype, copy=False)),
intel64.ideep.array(gamma),
intel64.ideep.array(beta),
intel64.ideep.array(mean),
intel64.ideep.array(var),
self.eps
)
y = y.astype(x.dtype, copy=False)
if expand_dim:
y = numpy.squeeze(y, axis=(2, 3))
# lazy
self.inv_var = None
self.inv_std = None
elif mode.can_use_cudnn(xp):
y = cudnn.batch_normalization_forward_inference(
x, gamma, beta, mean, var, self.eps,
mode.is_for_conv2d, mode.get_cudnn_mode())
else:
# Generic CPU and GPU implementation
gamma = gamma[expander]
beta = beta[expander]
var = var + self.eps
self.inv_var = xp.reciprocal(var)
self.inv_std = xp.sqrt(self.inv_var, dtype=self.inv_var.dtype)
y = _apply_bn_fwd(xp, x, mean[expander], self.inv_std[expander],
gamma, beta)
return y,
def backward(self, indexes, grad_outputs):
x, gamma, mean, var = self.get_retained_inputs()
gy, = grad_outputs
f = FixedBatchNormalizationGrad(
self.eps, self.expander, self.axis, self.inv_std, self.inv_var)
return f.apply((x, gamma, mean, var, gy))
class FixedBatchNormalizationGrad(function_node.FunctionNode):
def __init__(self, eps, expander, axis, inv_std, inv_var):
self.eps = eps
self.expander = expander
self.axis = axis
self.inv_std = inv_std # may be None
self.inv_var = inv_var # may be None
def forward(self, inputs):
self.retain_inputs((0, 1, 2, 4))
x, gamma, mean, var, gy = inputs
expander = self.expander
xp = backend.get_array_module(x)
if self.inv_std is None or self.inv_var is None:
self.inv_var = xp.reciprocal(var + self.eps)
self.inv_std = xp.sqrt(self.inv_var, dtype=self.inv_var.dtype)
self.gamma_over_std = gamma * self.inv_std
x_hat = _x_hat(x, mean[expander], self.inv_std[expander])
gx = self.gamma_over_std[expander] * gy
gbeta = gy.sum(axis=self.axis, dtype=gamma.dtype)
ggamma = (x_hat * gy).sum(axis=self.axis)
gmean = -self.gamma_over_std * gbeta
gvar = - 0.5 * self.inv_var * (
gamma * ggamma).astype(var.dtype, copy=False)
gx = gx.astype(dtype=x.dtype)
self.retain_outputs((0, 1, 2, 3, 4))
return gx, ggamma, gbeta, gmean, gvar
def backward(self, indexes, grad_outputs):
F = chainer.functions
x, gamma, mean, gy = self.get_retained_inputs()
ggx1, gggamma1, ggbeta1, ggmean1, ggvar1 = grad_outputs
gx1, ggamma1, gbeta1, gmean1, gvar1 = self.get_retained_outputs()
# Handle None in output gradients.
xp = backend.get_array_module(x)
ggx1 = _zero_if_none(xp, ggx1, x.shape, x.dtype)
gggamma1 = _zero_if_none(xp, gggamma1, gamma.shape, gamma.dtype)
ggbeta1 = _zero_if_none(xp, ggbeta1, gamma.shape, gamma.dtype)
ggmean1 = _zero_if_none(xp, ggmean1, mean.shape, mean.dtype)
ggvar1 = _zero_if_none(xp, ggvar1, mean.shape, mean.dtype)
if x.dtype != gamma.dtype:
gamma = F.cast(gamma, x.dtype)
ggamma1 = F.cast(ggamma1, x.dtype)
gggamma1 = F.cast(gggamma1, x.dtype)
gbeta1 = F.cast(gbeta1, x.dtype)
ggbeta1 = F.cast(ggbeta1, x.dtype)
mean = F.cast(mean, x.dtype)
gmean1 = F.cast(gmean1, x.dtype)
ggmean1 = F.cast(ggmean1, x.dtype)
gvar1 = F.cast(gvar1, x.dtype)
ggvar1 = F.cast(ggvar1, x.dtype)
expander = self.expander
x_hat = _x_hat(x, mean[expander], self.inv_std[expander])
tmp = -0.5 * ggvar1
gamma_over_var = gamma * self.inv_var
g_gamma_over_var = tmp * ggamma1
gggamma2 = gggamma1 + tmp * gamma_over_var
gx_hat = gy * gggamma2[expander]
gx2 = self.inv_std[expander] * gx_hat
gmean2 = -self.inv_std * F.sum(gx_hat, axis=self.axis)
g_gamma_over_std = F.sum(ggx1 * gy, axis=self.axis) - ggmean1 * gbeta1
ggbeta2 = ggbeta1 - ggmean1 * self.gamma_over_std
ggy2 = (gggamma2[expander] * x_hat + ggbeta2[expander]
+ self.gamma_over_std[expander] * ggx1)
ggamma2 = (self.inv_var * g_gamma_over_var
+ self.inv_std * g_gamma_over_std)
gvar2 = -(ggamma2 * gamma_over_var + 0.5 * self.inv_var * (
F.sum(x_hat * gx_hat, axis=self.axis)
- self.gamma_over_std * g_gamma_over_std))
gx2 = chainer.functions.cast(gx2, x.dtype)
ggy2 = chainer.functions.cast(ggy2, gy.dtype)
return gx2, ggamma2, gmean2, gvar2, ggy2
class _BNMode(object):
def __init__(self, x, gamma, key_axis, inference=False):
is_gamma_1d = gamma.ndim == 1
# cuDNN only supports these tensor dimensions because they are
# the most commonly used. If there is a need to support other
# dimensions with cuDNN, we could consider reshaping the input
# into a 2-dim array with channels as second dim and m=<product
# of all dimensions except the 2nd dimension> as the first
# dimension.
# ideep only support when x.dtype == gamma.dtype
self.is_for_conv2d = is_gamma_1d and x.ndim == 4 and key_axis[0] == 1
self.is_for_linear = is_gamma_1d and key_axis[0] == x.ndim - 1
self.cudnn_dim_ok = self.is_for_conv2d or self.is_for_linear
self.cudnn_dtype_ok = self.is_for_conv2d or (x.dtype != numpy.float16)
self.ideep_ok = (
x.dtype == gamma.dtype
and is_gamma_1d
and intel64.inputs_all_ready((x,)))
self.inference = inference
def get_cudnn_mode(self):
assert self.cudnn_dim_ok
if self.is_for_linear:
return libcudnn.CUDNN_BATCHNORM_PER_ACTIVATION
if (not self.inference and _cudnn_version >= 7000 and
configuration.config.cudnn_fast_batch_normalization):
return libcudnn.CUDNN_BATCHNORM_SPATIAL_PERSISTENT
return libcudnn.CUDNN_BATCHNORM_SPATIAL
def can_use_ideep(self):
return self.ideep_ok and intel64.should_use_ideep('>=auto')
def can_use_cudnn(self, xp):
# TODO(bkvogel): Check for float16 support again in next cuDNN version.
# cuDNN v5 batch normalization does not seem to support float16.
return (xp is cuda.cupy and
chainer.should_use_cudnn('>=auto', 5000) and
self.cudnn_dim_ok and
self.cudnn_dtype_ok)
def _x_hat(x, mean, inv_std):
x_mu = x - mean
x_mu *= inv_std
return x_mu
def _chainerx_compute_axis(x_ndim, gamma_ndim, axis):
# Returns processed axis for ChainerX.
axis_chx = (
None if axis is None
else axis if isinstance(axis, tuple)
else (axis,))
axis_chx = _compute_axis(x_ndim, gamma_ndim, axis_chx)
assert isinstance(axis_chx, tuple)
return axis_chx
def _chainerx_is_supported(x, axis_chx):
# Checks if the input configuration is supported in ChainerX
device = x.device
if device.backend.name == 'cuda':
# cuDNN batch norm restriction
return (x.ndim, axis_chx) in [
(4, (0, 2, 3)),
(5, (0, 2, 3, 4)),
]
return True
def _apply_bn_fwd(xp, x, mean, inv_std, gamma, beta):
# NOTE: all arguments should be broadcasted to x.shape
# (mean, inv_std, gamma, and beta have to already be expanded)
if xp is numpy:
x_hat = _x_hat(x, mean, inv_std)
y = x_hat * gamma
y += beta
y = y.astype(x.dtype)
else:
y = cuda.elementwise(
'T x, U mean, U inv_std, U gamma, U beta', 'T y',
'y = gamma * (x - mean) * inv_std + beta', 'bn_fwd'
)(x, mean, inv_std, gamma, beta)
return y
def _zero_if_none(xp, x, shape, dtype):
# TODO(Tokui): Return broadcasted 0 instead of a zeroed array.
if x is None:
return xp.zeros(shape, dtype=dtype)
return x
def batch_normalization(x, gamma, beta, **kwargs):
"""batch_normalization(x, gamma, beta, eps=2e-5, running_mean=None, \
running_var=None, decay=0.9, axis=None)
Batch normalization function.
It takes the input variable ``x`` and two parameter variables ``gamma`` and
``beta``. The parameter variables must both have the same dimensionality,
which is referred to as the channel shape. This channel shape corresponds
to the dimensions in the input which are not averaged over. Since the
first dimension of the input corresponds to the batch size, the second
dimension of ``x`` will correspond to the first dimension of the channel
shape, the third dimension of ``x`` will correspond to the second channel
dimension (if it exists) and so on. Therefore, the dimensionality of the
input must be at least one plus the number of channel dimensions. The
total effective "batch size" will then be considered to be the product of
all dimensions in ``x`` except for the channel dimensions.
As an example, if the input is four dimensional and the parameter
variables are one dimensional, then it is assumed that the first
dimension of the input is the batch size, the second dimension is the
channel size, and the remaining two dimensions are considered
to be spatial dimensions that will be averaged over along with the
batch size in the batch normalization computations. That is,
the total batch size will be considered to be the product of all
input dimensions except the second dimension.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
gamma (:class:`~chainer.Variable` or :ref:`ndarray`): Scaling parameter
of normalized data.
beta (:class:`~chainer.Variable` or :ref:`ndarray`): Shifting parameter
of scaled normalized data.
eps (float): Epsilon value for numerical stability.
running_mean (:ref:`ndarray`):
Running average of the mean. This is a running average of
the mean over several mini-batches using the decay parameter.
The function takes a previous running average, and updates
the array in-place by the new running average.
If ``None``, the running average is not computed. If this is
``None``, then ``runnng_var`` must also be ``None``.
running_var (:ref:`ndarray`):
Running average of the variance. This is a running average of
the variance over several mini-batches using the decay parameter.
The function takes a previous running average, and updates
the array in-place by the new running average.
If ``None``, the running average is not computed. If this is
``None``, then ``running_mean`` must also be ``None``.
decay (float): Decay rate of moving average. It is used during
training.
axis (int, tuple of int or None): Axis over which normalization is
performed. When axis is ``None``, it is determined from input
dimensions. For example, if ``x.ndim`` is 4, axis becomes (0, 2, 3)
and normalization is performed over 0th, 2nd and 3rd axis of input.
If it is 2, axis becomes (0) and normalization is performed
over 0th axis of input. When a tuple of int is given to this
option, numbers in the tuple must be being sorted in ascending
order. For example, (0, 2) is OK, but (2, 0) is not.
See: `Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`_
.. seealso::
:class:`~chainer.links.BatchNormalization` to manage the model
parameters (``gamma``, ``beta``) and the statistics (``running_mean``,
``running_var``).
"""
eps, running_mean, running_var, decay, axis = argument.parse_kwargs(
kwargs, ('eps', 2e-5), ('running_mean', None),
('running_var', None), ('decay', 0.9), ('axis', None),
train='train argument is not supported anymore. '
'Use chainer.using_config')
return BatchNormalization(eps, running_mean, running_var, decay,
axis).apply((x, gamma, beta))[0]
def fixed_batch_normalization(x, gamma, beta, mean, var, eps=2e-5, axis=None):
"""Batch normalization function with fixed statistics.
This is a variant of batch normalization, where the mean and variance
statistics are given by the caller as fixed variables. This is
used on testing mode of the batch normalization layer, where batch
statistics cannot be used for prediction consistency.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
gamma (:class:`~chainer.Variable` or :ref:`ndarray`): Scaling parameter
of normalized data.
beta (:class:`~chainer.Variable` or :ref:`ndarray`): Shifting parameter
of scaled normalized data.
mean (:class:`~chainer.Variable` or :ref:`ndarray`): Shifting parameter
of input.
var (:class:`~chainer.Variable` or :ref:`ndarray`): Square of scaling
parameter of input.
eps (float): Epsilon value for numerical stability.
axis (int, tuple of int or None): Axis over which normalization is
performed. When axis is ``None``, it is determined from input
dimensions. For example, if ``x.ndim is 4``, axis becomes (0, 2, 3)
and normalization is performed over 0th, 2nd and 3rd axis of input.
If it is 2, axis becomes (0) and normalization is performed
over 0th axis of input. When a tuple of int is given to this
option, numbers in the tuple must be being sorted in ascending
order. For example, (0, 2) is OK, but (2, 0) is not.
.. seealso::
:func:`~chainer.functions.batch_normalization`,
:class:`~chainer.links.BatchNormalization`
"""
return FixedBatchNormalization(eps, axis).apply((x, gamma, beta, mean,
var))[0]
| 38,911
| 38.186304
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/rnn/lstm.py
|
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import function
from chainer import function_node
from chainer.utils import type_check
import chainerx
def _extract_gates(x):
r = x.reshape((len(x), x.shape[1] // 4, 4) + x.shape[2:])
return [r[:, :, i] for i in six.moves.range(4)]
def _sigmoid(x, xp=numpy):
half = x.dtype.type(0.5)
return xp.tanh(x * half) * half + half
def _grad_sigmoid(x):
return x * (1 - x)
def _grad_grad_sigmoid(x):
return x * (1 - x) * (1 - 2 * x)
def _grad_tanh(x):
return 1 - x * x
def _grad_grad_tanh(x, gx):
return -2 * x * gx
_preamble = '''
template <typename T> __device__ T sigmoid(T x) {
const T half = 0.5;
return tanh(x * half) * half + half;
}
template <typename T> __device__ T grad_sigmoid(T y) { return y * (1 - y); }
template <typename T> __device__ T grad_tanh(T y) { return 1 - y * y; }
#define COMMON_ROUTINE \
T aa = tanh(a); \
T ai = sigmoid(i_); \
T af = sigmoid(f); \
T ao = sigmoid(o);
'''
class LSTM(function_node.FunctionNode):
"""Long short-term memory unit with forget gate.
It has two inputs (c, x) and two outputs (c, h), where c indicates the cell
state. x must have four times channels compared to the number of units.
"""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('c', 'x'))
c_type, x_type = in_types
type_check.expect(
c_type.dtype.kind == 'f',
x_type.dtype == c_type.dtype,
c_type.ndim >= 2,
x_type.ndim >= 2,
c_type.ndim == x_type.ndim,
x_type.shape[0] <= c_type.shape[0],
x_type.shape[1] == 4 * c_type.shape[1],
)
for i in six.moves.range(2, type_check.eval(c_type.ndim)):
type_check.expect(x_type.shape[i] == c_type.shape[i])
def forward_chainerx(self, inputs):
c, x = inputs
c_next, h = chainerx.lstm(c, x)
return c_next, h
def forward(self, inputs):
self.retain_inputs((0, 1))
c_prev, x = inputs
a, i, f, o = _extract_gates(x)
batch = len(x)
if isinstance(x, chainer.get_cpu_array_types()):
if intel64.should_use_ideep('>=auto'):
xp = intel64.ideep.get_array_module(x)
else:
xp = numpy
a = xp.tanh(a)
i = _sigmoid(i, xp)
f = _sigmoid(f, xp)
o = _sigmoid(o, xp)
c_next = numpy.empty_like(c_prev)
c_next[:batch] = a * i + f * c_prev[:batch]
h = o * xp.tanh(c_next[:batch])
else:
c_next = cuda.cupy.empty_like(c_prev)
h = cuda.cupy.empty_like(c_next[:batch])
cuda.elementwise(
'T c_prev, T a, T i_, T f, T o', 'T c, T h',
'''
COMMON_ROUTINE;
c = aa * ai + af * c_prev;
h = ao * tanh(c);
''',
'lstm_fwd', preamble=_preamble)(
c_prev[:batch], a, i, f, o, c_next[:batch], h)
c_next[batch:] = c_prev[batch:]
self.retain_outputs((0,))
return c_next, h
def backward(self, indexes, grads):
grad_inputs = (
self.get_retained_inputs() + self.get_retained_outputs() + grads)
return LSTMGrad()(*grad_inputs)
class LSTMGrad(function.Function):
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
c_prev, x, c_next, gc, gh = inputs
batch = len(x)
gx = xp.empty_like(x)
ga, gi, gf, go = _extract_gates(gx)
# Consider the case that either gradient is not given
if gc is None:
gc_update = 0
gc_rest = 0
else:
gc_update = gc[:batch]
gc_rest = gc[batch:]
if gh is None:
gh = 0
a, i, f, o = _extract_gates(x)
if xp is numpy:
if intel64.should_use_ideep('>=auto'):
xp = intel64.ideep.get_array_module(x)
tanh_a = xp.tanh(a)
sig_i = _sigmoid(i, xp)
sig_f = _sigmoid(f, xp)
sig_o = _sigmoid(o, xp)
co = xp.tanh(c_next[:batch])
gc_prev = numpy.empty_like(c_prev)
# multiply f later
gc_prev[:batch] = gh * sig_o * _grad_tanh(co) + gc_update
gc = gc_prev[:batch]
ga[:] = gc * sig_i * _grad_tanh(tanh_a)
gi[:] = gc * tanh_a * _grad_sigmoid(sig_i)
gf[:] = gc * c_prev[:batch] * _grad_sigmoid(sig_f)
go[:] = gh * co * _grad_sigmoid(sig_o)
gc_prev[:batch] *= sig_f # multiply f here
gc_prev[batch:] = gc_rest
else:
gc_prev = xp.empty_like(c_prev)
cuda.elementwise(
'T c_prev, T c, T gc, T gh, T a, T i_, T f, T o',
'T gc_prev, T ga, T gi, T gf, T go',
'''
COMMON_ROUTINE;
T co = tanh(c);
T temp = gh * ao * grad_tanh(co) + gc;
ga = temp * ai * grad_tanh(aa);
gi = temp * aa * grad_sigmoid(ai);
gf = temp * c_prev * grad_sigmoid(af);
go = gh * co * grad_sigmoid(ao);
gc_prev = temp * af;
''',
'lstm_bwd', preamble=_preamble)(
c_prev[:batch], c_next[:batch], gc_update, gh, a, i, f, o,
gc_prev[:batch], ga, gi, gf, go)
gc_prev[batch:] = gc_rest
return gc_prev, gx
def backward(self, inputs, grads):
xp = backend.get_array_module(*inputs)
c_prev, x, c, gc, gh = inputs
ggc_prev, ggx = grads
batch = len(x)
gc_is_none = gc is None
gh_is_none = gh is None
ggc_prev_is_none = ggc_prev is None
ggx_is_none = ggx is None
if gc_is_none:
gc = 0
if gh_is_none:
gh = 0
if ggc_prev_is_none:
ggc_prev = 0
if ggx_is_none:
ggx = 0
gc_prev = xp.empty_like(c_prev)
gx = xp.empty_like(x)
gc_next = xp.empty_like(c)
ggc = xp.empty_like(c_prev)
ggh = xp.empty_like(c[:batch])
gc_prev[batch:] = 0
gc_next[batch:] = 0
ggc[batch:] = 0 if ggc_prev_is_none else ggc_prev[batch:]
ggh[batch:] = 0
c_prev = c_prev[:batch]
c = c[:batch]
if not gc_is_none:
gc = gc[:batch]
if not ggc_prev_is_none:
ggc_prev = ggc_prev[:batch]
if not ggx_is_none:
ggx = ggx[:batch]
a, i, f, o = _extract_gates(x)
if not ggx_is_none:
gga, ggi, ggf, ggo = _extract_gates(ggx)
else:
gga = 0
ggi = 0
ggf = 0
ggo = 0
ga, gi, gf, go = _extract_gates(gx)
lstm_grad_grad(
c_prev, a, i, f, o, c, gc, gh, ggc_prev, gga, ggi, ggf, ggo,
gc_prev[:batch], ga[:], gi[:], gf[:], go[:], gc_next[:batch],
ggc[:batch], ggh[:batch])
if gc_is_none:
ggc = None
if gh_is_none:
ggh = None
return gc_prev, gx, gc_next, ggc, ggh
@cuda.fuse()
def lstm_grad_grad(
c_prev, a, i, f, o, c, gc, gh, ggc_prev, gga, ggi, ggf, ggo,
gc_prev, ga, gi, gf, go, gc_next, ggc, ggh):
xp = backend.get_array_module(a)
sig_o = _sigmoid(o, xp)
gsig_o = _grad_sigmoid(sig_o)
ggsig_o = _grad_grad_sigmoid(sig_o)
sig_i = _sigmoid(i, xp)
gsig_i = _grad_sigmoid(sig_i)
ggsig_i = _grad_grad_sigmoid(sig_i)
sig_f = _sigmoid(f, xp)
gsig_f = _grad_sigmoid(sig_f)
ggsig_f = _grad_grad_sigmoid(sig_f)
tanh_a = xp.tanh(a)
gtanh_a = _grad_tanh(tanh_a)
ggtanh_a = _grad_grad_tanh(tanh_a, gtanh_a)
tanh_c = xp.tanh(c)
gtanh_c = _grad_tanh(tanh_c)
ggtanh_c = _grad_grad_tanh(tanh_c, gtanh_c)
gc_bar = gh * sig_o * gtanh_c + gc
gc_prev[:] = ggf * gc_bar * gsig_f
ga[:] = (gga * sig_i * ggtanh_a + ggi * gtanh_a * gsig_i) * gc_bar
gi[:] = (gga * gtanh_a * gsig_i + ggi * tanh_a * ggsig_i) * gc_bar
gf[:] = (ggc_prev * (gh * sig_o * gtanh_c + gc) * gsig_f +
ggf * gc_bar * c_prev * ggsig_f)
ggc[:] = (ggc_prev * sig_f +
gga * sig_i * gtanh_a +
ggi * tanh_a * gsig_i +
ggf * c_prev * gsig_f)
dgc_do = gh * gsig_o * gtanh_c
go[:] = ggc * dgc_do + ggo * gh * tanh_c * ggsig_o
dgc_dc = gh * sig_o * ggtanh_c
gc_next[:] = ggc * dgc_dc + ggo * gh * gtanh_c * gsig_o
ggh[:] = ggc * sig_o * gtanh_c + ggo * tanh_c * gsig_o
return gc_prev, ga, gi, gf, go, gc_next, ggc, ggh
def lstm(c_prev, x):
"""Long Short-Term Memory units as an activation function.
This function implements LSTM units with forget gates. Let the previous
cell state ``c_prev`` and the input array ``x``.
First, the input array ``x`` is split into four arrays
:math:`a, i, f, o` of the same shapes along the second axis. It means that
``x`` 's second axis must have 4 times the ``c_prev`` 's second axis.
The split input arrays are corresponding to:
- :math:`a` : sources of cell input
- :math:`i` : sources of input gate
- :math:`f` : sources of forget gate
- :math:`o` : sources of output gate
Second, it computes the updated cell state ``c`` and the outgoing signal
``h`` as:
.. math::
c &= \\tanh(a) \\sigma(i)
+ c_{\\text{prev}} \\sigma(f), \\\\
h &= \\tanh(c) \\sigma(o),
where :math:`\\sigma` is the elementwise sigmoid function.
These are returned as a tuple of two variables.
This function supports variable length inputs. The mini-batch size of
the current input must be equal to or smaller than that of the previous
one. When mini-batch size of ``x`` is smaller than that of ``c``, this
function only updates ``c[0:len(x)]`` and doesn't change the rest of ``c``,
``c[len(x):]``.
So, please sort input sequences in descending order of lengths before
applying the function.
Args:
c_prev (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable that holds the previous cell state. The cell state
should be a zero array or the output of the previous call of LSTM.
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable that holds the sources of cell input, input gate, forget
gate and output gate. It must have the second dimension whose size
is four times of that of the cell state.
Returns:
tuple: Two :class:`~chainer.Variable` objects ``c`` and ``h``.
``c`` is the updated cell state. ``h`` indicates the outgoing signal.
See the original paper proposing LSTM with forget gates:
`Long Short-Term Memory in Recurrent Neural Networks
<http://www.felixgers.de/papers/phd.pdf>`_.
.. seealso::
:class:`~chainer.links.LSTM`
.. admonition:: Example
Assuming ``y`` is the current incoming signal, ``c`` is the previous
cell state, and ``h`` is the previous outgoing signal from an ``lstm``
function. Each of ``y``, ``c`` and ``h`` has ``n_units`` channels.
Most typical preparation of ``x`` is:
>>> n_units = 100
>>> y = chainer.Variable(np.zeros((1, n_units), np.float32))
>>> h = chainer.Variable(np.zeros((1, n_units), np.float32))
>>> c = chainer.Variable(np.zeros((1, n_units), np.float32))
>>> model = chainer.Chain()
>>> with model.init_scope():
... model.w = L.Linear(n_units, 4 * n_units)
... model.v = L.Linear(n_units, 4 * n_units)
>>> x = model.w(y) + model.v(h)
>>> c, h = F.lstm(c, x)
It corresponds to calculate the input array ``x``, or the input
sources :math:`a, i, f, o`, from the current incoming signal ``y`` and
the previous outgoing signal ``h``. Different parameters are used for
different kind of input sources.
.. note::
We use the naming rule below.
- incoming signal
The formal input of the formulation of LSTM (e.g. in NLP, word
vector or output of lower RNN layer). The input of
:class:`chainer.links.LSTM` is the *incoming signal*.
- input array
The array which is linear transformed from *incoming signal* and
the previous outgoing signal. The *input array* contains four
sources, the sources of cell input, input gate, forget gate and
output gate. The input of
:class:`chainer.functions.activation.lstm.LSTM` is the
*input array*.
"""
return LSTM().apply((c_prev, x))
| 12,992
| 31.810606
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/rnn/n_step_gru.py
|
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.functions.activation import sigmoid
from chainer.functions.activation import tanh
from chainer.functions.array import concat
from chainer.functions.array import split_axis
from chainer.functions.connection import linear
from chainer.functions.rnn import n_step_rnn
from chainer.utils import argument
from chainer import variable
import chainerx
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
def _extract_apply_in_data(inputs):
if not inputs:
return False, ()
if chainerx.is_available():
has_chainerx_array = False
# Unwrap arrays
arrays = []
for x in inputs:
if isinstance(x, variable.Variable):
if x._has_chainerx_array:
arrays.append(x._data[0])
has_chainerx_array = True
else:
arrays.append(x.array)
else: # x is ndarray
arrays.append(x)
if not has_chainerx_array:
if isinstance(x, chainerx.ndarray):
has_chainerx_array = True
return has_chainerx_array, tuple(arrays)
else:
return False, tuple([
x.array if isinstance(x, variable.Variable) else x
for x in inputs])
def _combine_inputs(hx, ws, bs, xs, num_layers, directions):
combined = []
combined.append(hx)
for x in xs:
combined.append(x)
for n in range(num_layers):
for direction in range(directions):
idx = directions * n + direction
for i in range(6):
combined.append(ws[idx][i])
for i in range(6):
combined.append(bs[idx][i])
return combined
def _seperate_inputs(combined, num_layers, seq_length, directions):
hx = combined[0]
xs = combined[1: 1 + seq_length]
ws = []
bs = []
index = 1 + seq_length
for n in range(num_layers):
ws.append(combined[index: index + 6])
bs.append(combined[index + 6: index + 12])
index += 12
if directions == 2:
ws.append(combined[index: index + 6])
bs.append(combined[index + 6: index + 12])
index += 12
return hx, ws, bs, xs
class NStepGRU(n_step_rnn.BaseNStepRNN):
def __init__(self, n_layers, states, lengths, **kwargs):
n_step_rnn.BaseNStepRNN.__init__(
self, n_layers, states, lengths,
rnn_dir='uni', rnn_mode='gru', **kwargs)
class NStepBiGRU(n_step_rnn.BaseNStepRNN):
def __init__(self, n_layers, states, lengths, **kwargs):
n_step_rnn.BaseNStepRNN.__init__(
self, n_layers, states, lengths,
rnn_dir='bi', rnn_mode='gru', **kwargs)
def n_step_gru(
n_layers, dropout_ratio, hx, ws, bs, xs, **kwargs):
"""n_step_gru(n_layers, dropout_ratio, hx, ws, bs, xs)
Stacked Uni-directional Gated Recurrent Unit function.
This function calculates stacked Uni-directional GRU with sequences.
This function gets an initial hidden state :math:`h_0`, an input
sequence :math:`x`, weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` for each time :math:`t`
from input :math:`x_t`.
.. math::
r_t &= \\sigma(W_0 x_t + W_3 h_{t-1} + b_0 + b_3) \\\\
z_t &= \\sigma(W_1 x_t + W_4 h_{t-1} + b_1 + b_4) \\\\
h'_t &= \\tanh(W_2 x_t + b_2 + r_t \\cdot (W_5 h_{t-1} + b_5)) \\\\
h_t &= (1 - z_t) \\cdot h'_t + z_t \\cdot h_{t-1}
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Six weight matrices and six bias vectors are
required for each layers. So, when :math:`S` layers exists, you need to
prepare :math:`6S` weight matrices and :math:`6S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
Args:
n_layers(int): Number of layers.
dropout_ratio(float): Dropout ratio.
hx (~chainer.Variable):
Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[i]`` represents weights for i-th layer.
Each ``ws[i]`` is a list containing six matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 3`` is ``(N, I)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[i]`` represnents biases for i-th layer.
Each ``bs[i]`` is a list containing six vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length, and transpose the sorted sequence.
:func:`~chainer.functions.transpose_sequence` transpose a list
of :func:`~chainer.Variable` holding sequence.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing two elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
"""
return n_step_gru_base(n_layers, dropout_ratio, hx, ws, bs, xs,
use_bi_direction=False, **kwargs)
def n_step_bigru(
n_layers, dropout_ratio, hx, ws, bs, xs, **kwargs):
"""n_step_bigru(n_layers, dropout_ratio, hx, ws, bs, xs)
Stacked Bi-directional Gated Recurrent Unit function.
This function calculates stacked Bi-directional GRU with sequences.
This function gets an initial hidden state :math:`h_0`, an input
sequence :math:`x`, weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` for each time :math:`t`
from input :math:`x_t`.
.. math::
r^{f}_t &= \\sigma(W^{f}_0 x_t + W^{f}_3 h_{t-1} + b^{f}_0 + b^{f}_3)
\\\\
z^{f}_t &= \\sigma(W^{f}_1 x_t + W^{f}_4 h_{t-1} + b^{f}_1 + b^{f}_4)
\\\\
h^{f'}_t &= \\tanh(W^{f}_2 x_t + b^{f}_2 + r^{f}_t \\cdot (W^{f}_5
h_{t-1} + b^{f}_5)) \\\\
h^{f}_t &= (1 - z^{f}_t) \\cdot h^{f'}_t + z^{f}_t \\cdot h_{t-1}
\\\\
r^{b}_t &= \\sigma(W^{b}_0 x_t + W^{b}_3 h_{t-1} + b^{b}_0 + b^{b}_3)
\\\\
z^{b}_t &= \\sigma(W^{b}_1 x_t + W^{b}_4 h_{t-1} + b^{b}_1 + b^{b}_4)
\\\\
h^{b'}_t &= \\tanh(W^{b}_2 x_t + b^{b}_2 + r^{b}_t \\cdot (W^{b}_5
h_{t-1} + b^{b}_5)) \\\\
h^{b}_t &= (1 - z^{b}_t) \\cdot h^{b'}_t + z^{b}_t \\cdot h_{t-1}
\\\\
h_t &= [h^{f}_t; h^{b}_t] \\\\
where :math:`W^{f}` is weight matrices for forward-GRU, :math:`W^{b}` is
weight matrices for backward-GRU.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Six weight matrices and six bias vectors are
required for each layers. So, when :math:`S` layers exists, you need to
prepare :math:`6S` weight matrices and :math:`6S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
Args:
n_layers(int): Number of layers.
dropout_ratio(float): Dropout ratio.
hx (:class:`~chainer.Variable`):
Variable holding stacked hidden states.
Its shape is ``(2S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[i]`` represents weights for i-th layer.
Each ``ws[i]`` is a list containing six matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 3`` is ``(N, I)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[i]`` represnents biases for i-th layer.
Each ``bs[i]`` is a list containing six vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable` holding input values.
Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length, and transpose the sorted sequence.
:func:`~chainer.functions.transpose_sequence` transpose a list
of :func:`~chainer.Variable` holding sequence.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
use_bi_direction (bool): If ``True``, this function uses
Bi-direction GRU.
Returns:
tuple: This function returns a tuple containing three elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
"""
return n_step_gru_base(n_layers, dropout_ratio, hx, ws, bs, xs,
use_bi_direction=True, **kwargs)
def n_step_gru_base(n_layers, dropout_ratio, hx, ws, bs, xs,
use_bi_direction, **kwargs):
"""n_step_gru_base(n_layers, dropout_ratio, hx, ws, bs, xs, \
use_bi_direction)
Base function for Stack GRU/BiGRU functions.
This function is used at :func:`chainer.functions.n_step_bigru` and
:func:`chainer.functions.n_step_gru`.
This function's behavior depends on argument ``use_bi_direction``.
Args:
n_layers(int): Number of layers.
dropout_ratio(float): Dropout ratio.
hx (:class:`~chainer.Variable`):
Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units. Because of bi-direction, the
first dimension length is ``2S``.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[i]`` represents weights for i-th layer.
Each ``ws[i]`` is a list containing six matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 3`` is ``(N, I)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[i]`` represnents biases for i-th layer.
Each ``bs[i]`` is a list containing six vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable` holding input values.
Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length, and transpose the sorted sequence.
:func:`~chainer.functions.transpose_sequence` transpose a list
of :func:`~chainer.Variable` holding sequence.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
activation (str): Activation function name.
Please select ``tanh`` or ``relu``.
use_bi_direction (bool): If ``True``, this function uses
Bi-direction GRU.
.. seealso::
:func:`chainer.functions.n_step_rnn`
:func:`chainer.functions.n_step_birnn`
"""
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config',
use_cudnn='use_cudnn argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
xp = backend.get_array_module(hx, hx.data)
directions = 1
if use_bi_direction:
directions = 2
combined = _combine_inputs(hx, ws, bs, xs, n_layers, directions)
has_chainerx_array, combined = _extract_apply_in_data(combined)
hx_chx, ws_chx, bs_chx, xs_chx = _seperate_inputs(
combined, n_layers, len(xs), directions)
if has_chainerx_array and xp is chainerx and dropout_ratio == 0:
if use_bi_direction:
hy, ys = chainerx.n_step_bigru(
n_layers, hx_chx, ws_chx, bs_chx, xs_chx)
else:
hy, ys = chainerx.n_step_gru(
n_layers, hx_chx, ws_chx, bs_chx, xs_chx)
hy = variable.Variable._init_unchecked(
hy, requires_grad=hy.is_backprop_required(),
is_chainerx_array=True)
ys = [variable.Variable._init_unchecked(
y, requires_grad=y.is_backprop_required(),
is_chainerx_array=True)
for y in ys]
return hy, ys
if xp is cuda.cupy and chainer.should_use_cudnn('>=auto', 5000):
lengths = [len(x) for x in xs]
xs = chainer.functions.concat(xs, axis=0)
with chainer.using_device(xs.device):
states = cuda.get_cudnn_dropout_states()
states.set_dropout_ratio(dropout_ratio)
w = n_step_rnn.cudnn_rnn_weight_concat(
n_layers, states, use_bi_direction, 'gru', ws, bs)
if use_bi_direction:
rnn = NStepBiGRU
else:
rnn = NStepGRU
hy, ys = rnn(n_layers, states, lengths)(hx, w, xs)
sections = numpy.cumsum(lengths[:-1])
ys = chainer.functions.split_axis(ys, sections, 0)
return hy, ys
else:
hy, _, ys = n_step_rnn.n_step_rnn_impl(
_gru, n_layers, dropout_ratio, hx, None, ws, bs, xs,
use_bi_direction)
return hy, ys
def _gru(x, h, c, w, b):
xw = concat.concat([w[0], w[1], w[2]], axis=0)
hw = concat.concat([w[3], w[4], w[5]], axis=0)
xb = concat.concat([b[0], b[1], b[2]], axis=0)
hb = concat.concat([b[3], b[4], b[5]], axis=0)
gru_x = linear.linear(x, xw, xb)
gru_h = linear.linear(h, hw, hb)
W_r_x, W_z_x, W_x = split_axis.split_axis(gru_x, 3, axis=1)
U_r_h, U_z_h, U_x = split_axis.split_axis(gru_h, 3, axis=1)
r = sigmoid.sigmoid(W_r_x + U_r_h)
z = sigmoid.sigmoid(W_z_x + U_z_h)
h_bar = tanh.tanh(W_x + r * U_x)
return (1 - z) * h_bar + z * h, None
| 17,052
| 40.796569
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/rnn/tree_lstm.py
|
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function
from chainer.utils import type_check
import chainerx
def _extract_gates(x, n_split=5):
"""Extract gates by split.
This is different from ``_extract_gates`` in lstm.py,
which is as follows::
r = x.reshape((x.shape[0], x.shape[1] // 4, 4) + x.shape[2:])
return (r[:, :, i] for i in six.moves.range(4))
In other words, it thinly slices ``x`` and merge them,
while this thickly slices ``x``.
"""
r = x.reshape(
(x.shape[0], n_split, x.shape[1] // n_split) + x.shape[2:])
return (r[:, i, :] for i in six.moves.range(n_split))
def _sigmoid(x):
half = x.dtype.type(0.5)
return numpy.tanh(x * half) * half + half
def _grad_sigmoid(x):
return x * (1 - x)
def _grad_tanh(x):
return 1 - x * x
_preamble = '''
template <typename T> __device__ T sigmoid(T x) {
const T half = 0.5;
return tanh(x * half) * half + half;
}
template <typename T> __device__ T grad_sigmoid(T y) { return y * (1 - y); }
template <typename T> __device__ T grad_tanh(T y) { return 1 - y * y; }
#define COMMON_ROUTINE \
T aa = tanh(a); \
T ai = sigmoid(i_); \
T ao = sigmoid(o); \
'''
class TreeLSTM(function.Function):
"""TreeLSTM unit with N forget gates.
This have variable inputs ``(c1, c2, ..., cN, x)``
where ``x`` is (3 + N) times larger than each cell.
Forget gates ``(f1, f2, ..., fN)`` can depend in
different partitions of ``x[:, 3 * cell_units:]``.
There are two outputs ``(c, h)``.
"""
def check_type_forward(self, in_types):
type_check.expect(in_types.size() >= 2)
c_types = in_types[:-1]
x_type = in_types[-1]
n_ary = len(c_types)
type_check.expect(x_type.ndim >= 2)
for i in six.moves.range(len(c_types)):
type_check.expect(
c_types[i].dtype.kind == 'f',
x_type.dtype == c_types[i].dtype,
c_types[i].ndim >= 2,
c_types[i].ndim == x_type.ndim,
x_type.shape[0] == c_types[i].shape[0],
x_type.shape[1] == (3 + n_ary) * c_types[i].shape[1],
)
for j in six.moves.range(2, type_check.eval(c_types[i].ndim)):
type_check.expect(x_type.shape[i] == c_types[i].shape[j])
def forward_chainerx(self, inputs):
return chainerx.tree_lstm(*inputs)
def forward(self, inputs):
cs, x = inputs[:-1], inputs[-1]
n_ary = len(cs)
gates = list(_extract_gates(x, 3 + n_ary))
a, i, o = gates[:3]
fs = gates[3:]
if isinstance(x, chainer.get_cpu_array_types()):
self.a = numpy.tanh(a)
self.i = _sigmoid(i)
self.o = _sigmoid(o)
self.fs = [_sigmoid(f) for f in fs]
self.c = self.a * self.i + sum(f * c for f, c in zip(self.fs, cs))
h = self.o * numpy.tanh(self.c)
else:
preamble = _preamble + \
' '.join('T af{} = sigmoid(f{});'.format(j, j)
for j in six.moves.range(1, n_ary + 1))
cells_str = ', '.join('T c{}'.format(j)
for j in six.moves.range(1, n_ary + 1))
fgates_str = ', '.join('T f{}'.format(j)
for j in six.moves.range(1, n_ary + 1))
fc_calc_str = ' + '.join('af{} * c{}'.format(j, j)
for j in six.moves.range(1, n_ary + 1))
self.c, h = cuda.elementwise(
'T a, T i_, T o, {}, {}'.format(cells_str, fgates_str),
'T c, T h',
'''
COMMON_ROUTINE;
c = aa * ai + {};
h = ao * tanh(c);
'''.format(fc_calc_str),
'treelstm_fwd', preamble=preamble)(
a, i, o, *(list(cs) + fs))
return self.c, h
def backward(self, inputs, grad_outputs):
xp = backend.get_array_module(*inputs)
cs, x = inputs[:-1], inputs[-1]
n_ary = len(cs)
gc, gh = grad_outputs
gx = xp.empty_like(x)
gates = list(_extract_gates(gx, 3 + n_ary))
ga, gi, go = gates[:3]
gfs = gates[3:]
# Consider the case that either gradient is not given
if gc is None:
gc = 0
if gh is None:
gh = 0
if xp is numpy:
co = numpy.tanh(self.c)
tmp = gh * self.o * _grad_tanh(co) + gc
ga[:] = tmp * self.i * _grad_tanh(self.a)
gi[:] = tmp * self.a * _grad_sigmoid(self.i)
go[:] = gh * co * _grad_sigmoid(self.o)
gcs = []
for j in six.moves.range(0, n_ary):
gfs[j][:] = tmp * cs[j] * _grad_sigmoid(self.fs[j])
gcs.append(tmp * self.fs[j])
else:
gates = list(_extract_gates(x, 3 + n_ary))
a, i, o = gates[:3]
fs = gates[3:]
gcs = [xp.empty_like(c) for c in cs]
preamble = _preamble + \
' '.join('T af{} = sigmoid(f{});'.format(j, j)
for j in six.moves.range(1, n_ary + 1))
cells_str = ', '.join('T c{}'.format(j)
for j in six.moves.range(1, n_ary + 1))
fgates_str = ', '.join('T f{}'.format(j)
for j in six.moves.range(1, n_ary + 1))
g_cells_str = ', '.join('T gc{}'.format(j)
for j in six.moves.range(1, n_ary + 1))
g_fgates_str = ', '.join('T gf{}'.format(j)
for j in six.moves.range(1, n_ary + 1))
gf_calc_str = '\n '.join(
'gf{} = temp * c{} * grad_sigmoid(af{});'.format(j, j, j)
for j in six.moves.range(1, n_ary + 1))
gc_calc_str = '\n '.join(
'gc{} = temp * af{};'.format(j, j)
for j in six.moves.range(1, n_ary + 1))
cuda.elementwise(
'T c, T gc, T gh, T a, T i_, T o, ' +
'{}, {}'.format(cells_str, fgates_str),
'T ga, T gi, T go, {}, {}'.format(g_cells_str, g_fgates_str),
'''
COMMON_ROUTINE;
T co = tanh(c);
T temp = gh * ao * grad_tanh(co) + gc;
ga = temp * ai * grad_tanh(aa);
gi = temp * aa * grad_sigmoid(ai);
go = gh * co * grad_sigmoid(ao);
{}
{}
'''.format(gf_calc_str, gc_calc_str),
'treelstm_bwd', preamble=preamble)(
self.c, gc, gh, a, i, o,
*(list(cs) + fs + [ga, gi, go] + gcs + gfs))
return list(gcs) + [gx]
def tree_lstm(*inputs):
"""TreeLSTM unit as an activation function.
This function implements TreeLSTM units both for
N-ary TreeLSTM and Child-Sum TreeLSTM.
Let the children cell states
:math:`c_{\\text{1}}, c_{\\text{2}}, \\dots, c_{\\text{N}}`,
and the incoming signal :math:`x`.
First, the incoming signal :math:`x` is split into (3 + N) arrays
:math:`a, i, o, f_{\\text{1}}, f_{\\text{2}}, ..., f_{\\text{N}}`
of the same shapes along the second axis.
It means that :math:`x` 's second axis must have (3 + N) times
of the length of each :math:`c_{n}`.
The splitted input signals are corresponding to:
- :math:`a` : sources of cell input
- :math:`i` : sources of input gate
- :math:`o` : sources of output gate
- :math:`f_{n}` : sources of forget gate for n-th ary
Second, it computes outputs as:
.. math::
c &= \\tanh(a) \\text{sigmoid}(i) \\\\
& + c_{\\text{1}} \\text{sigmoid}(f_{\\text{1}}), \\\\
& + c_{\\text{2}} \\text{sigmoid}(f_{\\text{2}}), \\\\
& + ..., \\\\
& + c_{\\text{N}} \\text{sigmoid}(f_{\\text{N}}), \\\\
h &= \\tanh(c) \\text{sigmoid}(o).
These are returned as a tuple of (N + 1) variables.
Args:
inputs (list of :class:`~chainer.Variable`): Variable arguments which
include all cell vectors from child-nodes, and an input vector.
Each of the cell vectors and the input vector is
:class:`~chainer.Variable` or :ref:`ndarray`.
The input vector must have the second dimension whose size
is (N + 3) times of that of each cell,
where N denotes the total number of cells.
Returns:
tuple: Two :class:`~chainer.Variable` objects ``c`` and ``h``. ``c`` is
the updated cell state. ``h`` indicates the outgoing signal.
See the papers for details: `Improved Semantic Representations From
Tree-Structured Long Short-Term Memory Networks
<https://www.aclweb.org/anthology/P15-1150>`_ and
`A Fast Unified Model for Parsing and Sentence Understanding
<https://arxiv.org/pdf/1603.06021.pdf>`_.
Tai et al.'s N-Ary TreeLSTM is little extended in
Bowman et al., and this link is based on
the variant by Bowman et al.
Specifically, eq. 10 in Tai et al. only has one :math:`W` matrix
to be applied to :math:`x`, consistently for all children.
On the other hand, Bowman et al.'s model has multiple matrices,
each of which affects the forget gate for each child's cell individually.
.. admonition:: Example
Assuming ``y`` is the current input signal, ``c`` is the previous cell
state, and ``h`` is the previous output signal from an
:meth:`~chainer.functions.tree_lstm` function.
Each of ``y``, ``c`` and ``h`` has ``n_units`` channels.
Using 2-ary (binary) TreeLSTM,
most typical preparation of ``x`` is:
>>> model = chainer.Chain()
>>> with model.init_scope():
... model.w = L.Linear(10, 5 * 10)
... model.v1 = L.Linear(10, 5 * 10)
... model.v2 = L.Linear(10, 5 * 10)
>>> y = np.random.uniform(-1, 1, (4, 10)).astype(np.float32)
>>> h1 = np.random.uniform(-1, 1, (4, 10)).astype(np.float32)
>>> h2 = np.random.uniform(-1, 1, (4, 10)).astype(np.float32)
>>> c1 = np.random.uniform(-1, 1, (4, 10)).astype(np.float32)
>>> c2 = np.random.uniform(-1, 1, (4, 10)).astype(np.float32)
>>> x = model.w(y) + model.v1(h1) + model.v2(h2)
>>> c, h = F.tree_lstm(c1, c2, x)
It corresponds to calculate the input sources
:math:`a, i, o, f_{\\text{1}}, f_{\\text{2}}`
from the current input ``y`` and the children's outputs
``h1`` and ``h2``. Different parameters are used for different kind of
input sources.
"""
return TreeLSTM()(*inputs)
| 10,927
| 36.296928
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/rnn/slstm.py
|
import numpy
import six
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import function
from chainer import function_node
from chainer.utils import type_check
import chainerx
def _extract_gates(x):
r = x.reshape((x.shape[0], x.shape[1] // 4, 4) + x.shape[2:])
return (r[:, :, i] for i in six.moves.range(4))
def _sigmoid(x, xp=numpy):
half = x.dtype.type(0.5)
return xp.tanh(x * half) * half + half
def _grad_sigmoid(x):
return x * (1 - x)
def _grad_grad_sigmoid(x):
return x * (1 - x) * (1 - 2 * x)
def _grad_tanh(x):
return 1 - x * x
def _grad_grad_tanh(x, gx):
return -2 * x * gx
_preamble = '''
template <typename T> __device__ T sigmoid(T x) {
const T half = 0.5;
return tanh(x * half) * half + half;
}
template <typename T> __device__ T grad_sigmoid(T y) { return y * (1 - y); }
template <typename T> __device__ T grad_tanh(T y) { return 1 - y * y; }
#define COMMON_ROUTINE \
T aa1 = tanh(a1); \
T ai1 = sigmoid(i1); \
T af1 = sigmoid(f1); \
T aa2 = tanh(a2); \
T ai2 = sigmoid(i2); \
T af2 = sigmoid(f2); \
T ao = sigmoid(o1 + o2);
'''
class SLSTM(function_node.FunctionNode):
"""S-LSTM unit.
It has four inputs (c1, c2, x1, x2) and two outputs (c, h), where c
indicates the cell state. x1 and x2 must have four times channels compared
to the number of units.
"""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('c_prev1', 'c_prev2', 'x1', 'x2'))
c1_type, c2_type, x1_type, x2_type = in_types
type_check.expect(
c1_type.dtype.kind == 'f',
c2_type.dtype == c1_type.dtype,
x1_type.dtype == c1_type.dtype,
x2_type.dtype == c1_type.dtype,
c1_type.ndim >= 2,
c2_type.ndim >= 2,
x1_type.ndim >= 2,
x2_type.ndim >= 2,
c1_type.ndim == x1_type.ndim,
c1_type.ndim == c2_type.ndim,
c1_type.ndim == x2_type.ndim,
c1_type.shape[0] == x1_type.shape[0],
c1_type.shape[0] == c2_type.shape[0],
c1_type.shape[0] == x2_type.shape[0],
x1_type.shape[1] == 4 * c1_type.shape[1],
x2_type.shape[1] == 4 * c2_type.shape[1],
)
for i in range(2, type_check.eval(c1_type.ndim)):
type_check.expect(x1_type.shape[i] == c1_type.shape[i])
type_check.expect(x2_type.shape[i] == c2_type.shape[i])
type_check.expect(x1_type.shape[i] == x2_type.shape[i])
def forward_chainerx(self, inputs):
c_prev1, c_prev2, x1, x2 = inputs
c, h = chainerx.slstm(c_prev1, c_prev2, x1, x2)
return c, h
def forward(self, inputs):
self.retain_inputs((0, 1, 2, 3))
c_prev1, c_prev2, x1, x2 = inputs
a1, i1, f1, o1 = _extract_gates(x1)
a2, i2, f2, o2 = _extract_gates(x2)
if isinstance(x1, numpy.ndarray):
a1 = numpy.tanh(a1)
i1 = _sigmoid(i1)
f1 = _sigmoid(f1)
a2 = numpy.tanh(a2)
i2 = _sigmoid(i2)
f2 = _sigmoid(f2)
o = _sigmoid(o1 + o2)
c = a1 * i1 + a2 * i2 + \
f1 * c_prev1 + f2 * c_prev2
h = o * numpy.tanh(c)
else:
c, h = cuda.elementwise(
'''T c_prev1, T a1, T i1, T f1, T o1,
T c_prev2, T a2, T i2, T f2, T o2''',
'T c, T h',
'''
COMMON_ROUTINE;
c = aa1 * ai1 + af1 * c_prev1 + aa2 * ai2 + af2 * c_prev2;
h = ao * tanh(c);
''',
'slstm_fwd', preamble=_preamble)(
c_prev1, a1, i1, f1, o1, c_prev2, a2, i2, f2, o2)
self.retain_outputs((0,))
return c, h
def backward(self, indexes, grads):
grad_inputs = (
self.get_retained_inputs() + self.get_retained_outputs() + grads)
return SLSTMGrad()(*grad_inputs)
class SLSTMGrad(function.Function):
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
c_prev1, c_prev2, x1, x2, c_next, gc, gh = inputs
gx1 = xp.empty_like(x1)
gx2 = xp.empty_like(x2)
ga1, gi1, gf1, go1 = _extract_gates(gx1)
ga2, gi2, gf2, go2 = _extract_gates(gx2)
if gc is None:
gc = 0
if gh is None:
gh = 0
a1, i1, f1, o1 = _extract_gates(x1)
a2, i2, f2, o2 = _extract_gates(x2)
if xp is numpy:
if intel64.should_use_ideep('>=auto'):
xp = intel64.ideep.get_array_module(x1)
tanh_a1 = xp.tanh(a1)
sig_i1 = _sigmoid(i1, xp)
sig_f1 = _sigmoid(f1, xp)
tanh_a2 = xp.tanh(a2)
sig_i2 = _sigmoid(i2, xp)
sig_f2 = _sigmoid(f2, xp)
sig_o = _sigmoid(o1 + o2, xp)
co = xp.tanh(c_next)
# multiply f later
gc_prev = gh * sig_o * _grad_tanh(co) + gc
ga1[:] = gc_prev * sig_i1 * _grad_tanh(tanh_a1)
gi1[:] = gc_prev * tanh_a1 * _grad_sigmoid(sig_i1)
gf1[:] = gc_prev * c_prev1 * _grad_sigmoid(sig_f1)
go1[:] = gh * co * _grad_sigmoid(sig_o)
ga2[:] = gc_prev * sig_i2 * _grad_tanh(tanh_a2)
gi2[:] = gc_prev * tanh_a2 * _grad_sigmoid(sig_i2)
gf2[:] = gc_prev * c_prev2 * _grad_sigmoid(sig_f2)
go2[:] = gh * co * _grad_sigmoid(sig_o)
# multiply f here
gc_prev1 = gc_prev * sig_f1
gc_prev2 = gc_prev * sig_f2
else:
a1, i1, f1, o1 = _extract_gates(x1)
a2, i2, f2, o2 = _extract_gates(x2)
gc_prev1 = xp.empty_like(c_prev1)
gc_prev2 = xp.empty_like(c_prev2)
cuda.elementwise(
'''T c_prev1, T a1, T i1, T f1, T o1,
T c_prev2, T a2, T i2, T f2, T o2,
T c, T gc, T gh''',
'''T gc_prev1, T ga1, T gi1, T gf1, T go1,
T gc_prev2, T ga2, T gi2, T gf2, T go2''',
'''
COMMON_ROUTINE;
T co = tanh(c);
T temp = gh * ao * grad_tanh(co) + gc;
ga1 = temp * ai1 * grad_tanh(aa1);
gi1 = temp * aa1 * grad_sigmoid(ai1);
gf1 = temp * c_prev1 * grad_sigmoid(af1);
go1 = gh * co * grad_sigmoid(ao);
gc_prev1 = temp * af1;
ga2 = temp * ai2 * grad_tanh(aa2);
gi2 = temp * aa2 * grad_sigmoid(ai2);
gf2 = temp * c_prev2 * grad_sigmoid(af2);
go2 = gh * co * grad_sigmoid(ao);
gc_prev2 = temp * af2;
''',
'lstm_bwd', preamble=_preamble)(
c_prev1, a1, i1, f1, o1,
c_prev2, a2, i2, f2, o2,
c_next, gc, gh,
gc_prev1, ga1, gi1, gf1, go1,
gc_prev2, ga2, gi2, gf2, go2)
return gc_prev1, gc_prev2, gx1, gx2
def backward(self, inputs, grads):
xp = backend.get_array_module(*inputs)
c_prev1, c_prev2, x1, x2, c, gc, gh = inputs
ggc_prev1, ggc_prev2, ggx1, ggx2 = grads
gc_is_none = gc is None
gh_is_none = gh is None
if gc_is_none:
gc = 0
if gh_is_none:
gh = 0
if ggc_prev1 is None:
ggc_prev1 = 0
if ggc_prev2 is None:
ggc_prev2 = 0
gc_prev1 = xp.empty_like(c_prev1)
gc_prev2 = xp.empty_like(c_prev2)
gx1 = xp.empty_like(x1)
gx2 = xp.empty_like(x2)
gc_next = xp.empty_like(c)
ggc = xp.empty_like(c_prev1)
ggh = xp.empty_like(c)
a1, i1, f1, o1 = _extract_gates(x1)
a2, i2, f2, o2 = _extract_gates(x2)
gga1, ggi1, ggf1, ggo1 = _extract_gates(ggx1)
gga2, ggi2, ggf2, ggo2 = _extract_gates(ggx2)
ga1, gi1, gf1, go1 = _extract_gates(gx1)
ga2, gi2, gf2, go2 = _extract_gates(gx2)
o = o1 + o2
gc_prev1[:], ga1[:], gi1[:], gf1[:], go1[:], \
gc_prev2[:], ga2[:], gi2[:], gf2[:], go2[:], \
gc_next[:], ggc[:], ggh[:] \
= slstm_grad_grad(c_prev1, a1, i1, f1,
c_prev2, a2, i2, f2, o, c, gc, gh,
ggc_prev1, gga1, ggi1, ggf1, ggo1,
ggc_prev2, gga2, ggi2, ggf2, ggo2)
# If inputs were omitted, omit their gradients.
if gc_is_none:
ggc = None
if gh_is_none:
ggh = None
return gc_prev1, gc_prev2, gx1, gx2, gc_next, ggc, ggh
@cuda.fuse()
def slstm_grad_grad(c_prev1, a1, i1, f1,
c_prev2, a2, i2, f2,
o, c, gc, gh,
ggc_prev1, gga1, ggi1, ggf1, ggo1,
ggc_prev2, gga2, ggi2, ggf2, ggo2):
xp = backend.get_array_module(a1)
sig_o = _sigmoid(o, xp)
gsig_o = _grad_sigmoid(sig_o)
ggsig_o = _grad_grad_sigmoid(sig_o)
sig_i1 = _sigmoid(i1, xp)
gsig_i1 = _grad_sigmoid(sig_i1)
ggsig_i1 = _grad_grad_sigmoid(sig_i1)
sig_i2 = _sigmoid(i2, xp)
gsig_i2 = _grad_sigmoid(sig_i2)
ggsig_i2 = _grad_grad_sigmoid(sig_i2)
sig_f1 = _sigmoid(f1, xp)
gsig_f1 = _grad_sigmoid(sig_f1)
ggsig_f1 = _grad_grad_sigmoid(sig_f1)
sig_f2 = _sigmoid(f2, xp)
gsig_f2 = _grad_sigmoid(sig_f2)
ggsig_f2 = _grad_grad_sigmoid(sig_f2)
tanh_a1 = xp.tanh(a1)
gtanh_a1 = _grad_tanh(tanh_a1)
ggtanh_a1 = _grad_grad_tanh(tanh_a1, gtanh_a1)
tanh_a2 = xp.tanh(a2)
gtanh_a2 = _grad_tanh(tanh_a2)
ggtanh_a2 = _grad_grad_tanh(tanh_a2, gtanh_a2)
tanh_c = xp.tanh(c)
gtanh_c = _grad_tanh(tanh_c)
ggtanh_c = _grad_grad_tanh(tanh_c, gtanh_c)
gc_bar = gh * sig_o * gtanh_c + gc
gc_prev1 = ggf1 * gc_bar * gsig_f1
gc_prev2 = ggf2 * gc_bar * gsig_f2
ga1 = (gga1 * sig_i1 * ggtanh_a1 +
ggi1 * gtanh_a1 * gsig_i1) * gc_bar
ga2 = (gga2 * sig_i2 * ggtanh_a2 +
ggi2 * gtanh_a2 * gsig_i2) * gc_bar
gi1 = (gga1 * gtanh_a1 * gsig_i1 +
ggi1 * tanh_a1 * ggsig_i1) * gc_bar
gi2 = (gga2 * gtanh_a2 * gsig_i2 +
ggi2 * tanh_a2 * ggsig_i2) * gc_bar
gf1 = (ggc_prev1 * (gh * sig_o * gtanh_c + gc) * gsig_f1 +
ggf1 * gc_bar * c_prev1 * ggsig_f1)
gf2 = (ggc_prev2 * (gh * sig_o * gtanh_c + gc) * gsig_f2 +
ggf2 * gc_bar * c_prev2 * ggsig_f2)
ggc = (
ggc_prev1 * sig_f1 +
gga1 * sig_i1 * gtanh_a1 +
ggi1 * tanh_a1 * gsig_i1 +
ggf1 * c_prev1 * gsig_f1 +
ggc_prev2 * sig_f2 +
gga2 * sig_i2 * gtanh_a2 +
ggi2 * tanh_a2 * gsig_i2 +
ggf2 * c_prev2 * gsig_f2)
dgc_do = gh * gsig_o * gtanh_c
go1 = go2 = ggc * dgc_do + (ggo1 + ggo2) * gh * tanh_c * ggsig_o
dgc_dc = gh * sig_o * ggtanh_c
gc_next = ggc * dgc_dc + (ggo1 + ggo2) * gh * gtanh_c * gsig_o
ggh = ggc * sig_o * gtanh_c + (ggo1 + ggo2) * tanh_c * gsig_o
return gc_prev1, ga1, gi1, gf1, go1, gc_prev2, ga2, gi2, gf2, go2, \
gc_next, ggc, ggh
def slstm(c_prev1, c_prev2, x1, x2):
"""S-LSTM units as an activation function.
This function implements S-LSTM unit. It is an extension of LSTM unit
applied to tree structures.
The function is applied to binary trees. Each node has two child nodes.
It gets four arguments, previous cell states ``c_prev1`` and ``c_prev2``,
and input arrays ``x1`` and ``x2``.
First both input arrays ``x1`` and ``x2`` are split into eight arrays
:math:`a_1, i_1, f_1, o_1`, and :math:`a_2, i_2, f_2, o_2`. They have the
same shape along the second axis.
It means that ``x1`` and ``x2`` 's second axis must have 4 times
the length of ``c_prev1`` and ``c_prev2``.
The split input arrays are corresponding to:
- :math:`a_i` : sources of cell input
- :math:`i_i` : sources of input gate
- :math:`f_i` : sources of forget gate
- :math:`o_i` : sources of output gate
It computes the updated cell state ``c`` and the outgoing signal
``h`` as:
.. math::
c &= \\tanh(a_1 + a_2) \\sigma(i_1 + i_2)
+ c_{\\text{prev}1} \\sigma(f_1)
+ c_{\\text{prev}2} \\sigma(f_2), \\\\
h &= \\tanh(c) \\sigma(o_1 + o_2),
where :math:`\\sigma` is the elementwise sigmoid function.
The function returns ``c`` and ``h`` as a tuple.
Args:
c_prev1 (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable that holds the previous cell state of the first child
node. The cell state should be a zero array or the output of
the previous call of LSTM.
c_prev2 (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable that holds the previous cell state of the second child
node.
x1 (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable that holds the sources of cell input, input gate, forget
gate and output gate from the first child node. It must have the
second dimension whose size is four times of that of the cell
state.
x2 (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable that holds the input sources from the second child node.
Returns:
tuple: Two :class:`~chainer.Variable` objects ``c`` and ``h``. ``c`` is
the cell state. ``h`` indicates the outgoing signal.
See detail in paper: `Long Short-Term Memory Over Tree Structures
<https://arxiv.org/abs/1503.04881>`_.
.. admonition:: Example
Assuming ``c1``, ``c2`` is the previous cell state of children,
and ``h1``, ``h2`` is the previous outgoing signal from children.
Each of ``c1``, ``c2``, ``h1`` and ``h2`` has ``n_units`` channels.
Most typical preparation of ``x1``, ``x2`` is:
>>> n_units = 100
>>> h1 = chainer.Variable(np.zeros((1, n_units), np.float32))
>>> h2 = chainer.Variable(np.zeros((1, n_units), np.float32))
>>> c1 = chainer.Variable(np.zeros((1, n_units), np.float32))
>>> c2 = chainer.Variable(np.zeros((1, n_units), np.float32))
>>> model1 = chainer.Chain()
>>> with model1.init_scope():
... model1.w = L.Linear(n_units, 4 * n_units)
... model1.v = L.Linear(n_units, 4 * n_units)
>>> model2 = chainer.Chain()
>>> with model2.init_scope():
... model2.w = L.Linear(n_units, 4 * n_units)
... model2.v = L.Linear(n_units, 4 * n_units)
>>> x1 = model1.w(c1) + model1.v(h1)
>>> x2 = model2.w(c2) + model2.v(h2)
>>> c, h = F.slstm(c1, c2, x1, x2)
It corresponds to calculate the input array ``x1``, or the input
sources :math:`a_1, i_1, f_1, o_1` from the previous cell state of
first child node ``c1``, and the previous outgoing signal from first
child node ``h1``. Different parameters are used for different kind of
input sources.
"""
return SLSTM().apply((c_prev1, c_prev2, x1, x2))
| 15,311
| 34.526682
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/rnn/n_step_lstm.py
|
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.functions.array import reshape
from chainer.functions.array import stack
from chainer.functions.connection import linear
from chainer.functions.rnn import lstm
from chainer.functions.rnn import n_step_rnn
from chainer.utils import argument
from chainer import variable
import chainerx
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
def _extract_apply_in_data(inputs):
if not inputs:
return False, ()
if chainerx.is_available():
has_chainerx_array = False
# Unwrap arrays
arrays = []
for x in inputs:
if isinstance(x, variable.Variable):
if x._has_chainerx_array:
arrays.append(x._data[0])
has_chainerx_array = True
else:
arrays.append(x.array)
else: # x is ndarray
arrays.append(x)
if not has_chainerx_array:
if isinstance(x, chainerx.ndarray):
has_chainerx_array = True
return has_chainerx_array, tuple(arrays)
else:
return False, tuple([
x.array if isinstance(x, variable.Variable) else x
for x in inputs])
def _combine_inputs(hx, cx, ws, bs, xs, num_layers, directions):
combined = []
combined.append(hx)
combined.append(cx)
for x in xs:
combined.append(x)
for n in range(num_layers):
for direction in range(directions):
idx = directions * n + direction
for i in range(8):
combined.append(ws[idx][i])
for i in range(8):
combined.append(bs[idx][i])
return combined
def _seperate_inputs(combined, num_layers, seq_length, directions):
hx = combined[0]
cx = combined[1]
xs = combined[2: 2 + seq_length]
ws = []
bs = []
index = 2 + seq_length
for n in range(num_layers):
ws.append(combined[index: index + 8])
bs.append(combined[index + 8: index + 16])
index += 16
if directions == 2:
ws.append(combined[index: index + 8])
bs.append(combined[index + 8: index + 16])
index += 16
return hx, cx, ws, bs, xs
def _stack_weight(ws):
# TODO(unno): Input of the current LSTM implementation is shuffled
w = stack.stack(ws, axis=1)
shape = w.shape
return reshape.reshape(w, (shape[0] * shape[1],) + shape[2:])
class NStepLSTM(n_step_rnn.BaseNStepRNN):
def __init__(self, n_layers, states, lengths):
n_step_rnn.BaseNStepRNN.__init__(
self, n_layers, states, lengths,
rnn_dir='uni', rnn_mode='lstm')
class NStepBiLSTM(n_step_rnn.BaseNStepRNN):
def __init__(self, n_layers, states, lengths):
n_step_rnn.BaseNStepRNN.__init__(
self, n_layers, states, lengths,
rnn_dir='bi', rnn_mode='lstm')
def n_step_lstm(
n_layers, dropout_ratio, hx, cx, ws, bs, xs, **kwargs):
"""n_step_lstm(n_layers, dropout_ratio, hx, cx, ws, bs, xs)
Stacked Uni-directional Long Short-Term Memory function.
This function calculates stacked Uni-directional LSTM with sequences.
This function gets an initial hidden state :math:`h_0`, an initial cell
state :math:`c_0`, an input sequence :math:`x`, weight matrices :math:`W`,
and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
i_t &= \\sigma(W_0 x_t + W_4 h_{t-1} + b_0 + b_4) \\\\
f_t &= \\sigma(W_1 x_t + W_5 h_{t-1} + b_1 + b_5) \\\\
o_t &= \\sigma(W_2 x_t + W_6 h_{t-1} + b_2 + b_6) \\\\
a_t &= \\tanh(W_3 x_t + W_7 h_{t-1} + b_3 + b_7) \\\\
c_t &= f_t \\cdot c_{t-1} + i_t \\cdot a_t \\\\
h_t &= o_t \\cdot \\tanh(c_t)
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Eight weight matrices and eight bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`8S` weight matrices and :math:`8S` bias vectors.
If the number of layers ``n_layers`` is greater than :math:`1`, the input
of the ``k``-th layer is the hidden state ``h_t`` of the ``k-1``-th layer.
Note that all input variables except the first layer may have different
shape from the first layer.
Args:
n_layers(int): The number of layers.
dropout_ratio(float): Dropout ratio.
hx (:class:`~chainer.Variable`):
Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is the number of layers and
is equal to ``n_layers``, ``B`` is the mini-batch size, and ``N``
is the dimension of the hidden units.
cx (:class:`~chainer.Variable`): Variable holding stacked cell states.
It has the same shape as ``hx``.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[i]`` represents the weights for the i-th layer.
Each ``ws[i]`` is a list containing eight matrices.
``ws[i][j]`` corresponds to :math:`W_j` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 4`` are ``(N, I)``-shaped as
they are multiplied with input variables, where ``I`` is the size
of the input and ``N`` is the dimension of the hidden units. All
other matrices are ``(N, N)``-shaped.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[i]`` represents the biases for the i-th layer.
Each ``bs[i]`` is a list containing eight vectors.
``bs[i][j]`` corresponds to :math:`b_j` in the equation.
The shape of each matrix is ``(N,)`` where ``N`` is the dimension
of the hidden units.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is the
mini-batch size for time ``t``. The sequences must be transposed.
:func:`~chainer.functions.transpose_sequence` can be used to
transpose a list of :class:`~chainer.Variable`\\ s each
representing a sequence.
When sequences has different lengths, they must be
sorted in descending order of their lengths before transposing.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
the mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
.. note::
The dimension of hidden units is limited to only one size ``N``. If you
want to use variable dimension of hidden units, please use
:class:`chainer.functions.lstm`.
.. seealso::
:func:`chainer.functions.lstm`
.. admonition:: Example
>>> batchs = [3, 2, 1] # support variable length sequences
>>> in_size, out_size, n_layers = 3, 2, 2
>>> dropout_ratio = 0.0
>>> xs = [np.ones((b, in_size)).astype(np.float32) for b in batchs]
>>> [x.shape for x in xs]
[(3, 3), (2, 3), (1, 3)]
>>> h_shape = (n_layers, batchs[0], out_size)
>>> hx = np.ones(h_shape).astype(np.float32)
>>> cx = np.ones(h_shape).astype(np.float32)
>>> w_in = lambda i, j: in_size if i == 0 and j < 4 else out_size
>>> ws = []
>>> bs = []
>>> for n in range(n_layers):
... ws.append([np.ones((out_size, w_in(n, i))).astype(np.float32) \
for i in range(8)])
... bs.append([np.ones((out_size,)).astype(np.float32) \
for _ in range(8)])
...
>>> ws[0][0].shape # ws[0][:4].shape are (out_size, in_size)
(2, 3)
>>> ws[1][0].shape # others are (out_size, out_size)
(2, 2)
>>> bs[0][0].shape
(2,)
>>> hy, cy, ys = F.n_step_lstm(
... n_layers, dropout_ratio, hx, cx, ws, bs, xs)
>>> hy.shape
(2, 3, 2)
>>> cy.shape
(2, 3, 2)
>>> [y.shape for y in ys]
[(3, 2), (2, 2), (1, 2)]
"""
return n_step_lstm_base(n_layers, dropout_ratio, hx, cx, ws, bs, xs,
use_bi_direction=False, **kwargs)
def n_step_bilstm(
n_layers, dropout_ratio, hx, cx, ws, bs, xs, **kwargs):
"""n_step_bilstm(n_layers, dropout_ratio, hx, cx, ws, bs, xs)
Stacked Bi-directional Long Short-Term Memory function.
This function calculates stacked Bi-directional LSTM with sequences.
This function gets an initial hidden state :math:`h_0`, an initial cell
state :math:`c_0`, an input sequence :math:`x`, weight matrices :math:`W`,
and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
i^{f}_t &=& \\sigma(W^{f}_0 x_t + W^{f}_4 h_{t-1} + b^{f}_0 + b^{f}_4),
\\\\
f^{f}_t &=& \\sigma(W^{f}_1 x_t + W^{f}_5 h_{t-1} + b^{f}_1 + b^{f}_5),
\\\\
o^{f}_t &=& \\sigma(W^{f}_2 x_t + W^{f}_6 h_{t-1} + b^{f}_2 + b^{f}_6),
\\\\
a^{f}_t &=& \\tanh(W^{f}_3 x_t + W^{f}_7 h_{t-1} + b^{f}_3 + b^{f}_7),
\\\\
c^{f}_t &=& f^{f}_t \\cdot c^{f}_{t-1} + i^{f}_t \\cdot a^{f}_t,
\\\\
h^{f}_t &=& o^{f}_t \\cdot \\tanh(c^{f}_t),
\\\\
i^{b}_t &=& \\sigma(W^{b}_0 x_t + W^{b}_4 h_{t-1} + b^{b}_0 + b^{b}_4),
\\\\
f^{b}_t &=& \\sigma(W^{b}_1 x_t + W^{b}_5 h_{t-1} + b^{b}_1 + b^{b}_5),
\\\\
o^{b}_t &=& \\sigma(W^{b}_2 x_t + W^{b}_6 h_{t-1} + b^{b}_2 + b^{b}_6),
\\\\
a^{b}_t &=& \\tanh(W^{b}_3 x_t + W^{b}_7 h_{t-1} + b^{b}_3 + b^{b}_7),
\\\\
c^{b}_t &=& f^{b}_t \\cdot c^{b}_{t-1} + i^{b}_t \\cdot a^{b}_t, \\\\
h^{b}_t &=& o^{b}_t \\cdot \\tanh(c^{b}_t), \\\\
h_t &=& [h^{f}_t; h^{b}_t]
where :math:`W^{f}` is the weight matrices for forward-LSTM, :math:`W^{b}`
is weight matrices for backward-LSTM.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Eight weight matrices and eight bias vectors are
required for each layer of each direction. So, when :math:`S` layers
exist, you need to prepare :math:`16S` weight matrices and :math:`16S`
bias vectors.
If the number of layers ``n_layers`` is greater than :math:`1`, the input
of the ``k``-th layer is the hidden state ``h_t`` of the ``k-1``-th layer.
Note that all input variables except the first layer may have different
shape from the first layer.
Args:
n_layers(int): The number of layers.
dropout_ratio(float): Dropout ratio.
hx (:class:`~chainer.Variable`):
Variable holding stacked hidden states.
Its shape is ``(2S, B, N)`` where ``S`` is the number of layers and
is equal to ``n_layers``, ``B`` is the mini-batch size, and ``N``
is the dimension of the hidden units. Because of bi-direction, the
first dimension length is ``2S``.
cx (:class:`~chainer.Variable`): Variable holding stacked cell states.
It has the same shape as ``hx``.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[2 * l + m]`` represents the weights for the l-th layer of
the m-th direction. (``m == 0`` means the forward direction and
``m == 1`` means the backward direction.) Each ``ws[i]`` is a
list containing eight matrices. ``ws[i][j]`` corresponds to
:math:`W_j` in the equation. ``ws[0][j]`` and ``ws[1][j]`` where
``0 <= j < 4`` are ``(N, I)``-shaped because they are multiplied
with input variables, where ``I`` is the size of the input.
``ws[i][j]`` where ``2 <= i`` and ``0 <= j < 4`` are
``(N, 2N)``-shaped because they are multiplied with two hidden
layers :math:`h_t = [h^{f}_t; h^{b}_t]`. All other matrices are
``(N, N)``-shaped.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[2 * l + m]`` represents the weights for the l-th layer of
m-th direction. (``m == 0`` means the forward direction and
``m == 1`` means the backward direction.)
Each ``bs[i]`` is a list containing eight vectors.
``bs[i][j]`` corresponds to :math:`b_j` in the equation.
The shape of each matrix is ``(N,)``.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is the
mini-batch size for time ``t``. The sequences must be transposed.
:func:`~chainer.functions.transpose_sequence` can be used to
transpose a list of :class:`~chainer.Variable`\\ s each
representing a sequence.
When sequences has different lengths, they must be
sorted in descending order of their lengths before transposing.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, 2N)`` where ``B_t``
is the mini-batch size for time ``t``, and ``N`` is size of
hidden units. Note that ``B_t`` is the same value as ``xs[t]``.
.. admonition:: Example
>>> batchs = [3, 2, 1] # support variable length sequences
>>> in_size, out_size, n_layers = 3, 2, 2
>>> dropout_ratio = 0.0
>>> xs = [np.ones((b, in_size)).astype(np.float32) for b in batchs]
>>> [x.shape for x in xs]
[(3, 3), (2, 3), (1, 3)]
>>> h_shape = (n_layers * 2, batchs[0], out_size)
>>> hx = np.ones(h_shape).astype(np.float32)
>>> cx = np.ones(h_shape).astype(np.float32)
>>> def w_in(i, j):
... if i == 0 and j < 4:
... return in_size
... elif i > 0 and j < 4:
... return out_size * 2
... else:
... return out_size
...
>>> ws = []
>>> bs = []
>>> for n in range(n_layers):
... for direction in (0, 1):
... ws.append([np.ones((out_size, w_in(n, i))).\
astype(np.float32) for i in range(8)])
... bs.append([np.ones((out_size,)).astype(np.float32) \
for _ in range(8)])
...
>>> ws[0][0].shape # ws[0:2][:4].shape are (out_size, in_size)
(2, 3)
>>> ws[2][0].shape # ws[2:][:4].shape are (out_size, 2 * out_size)
(2, 4)
>>> ws[0][4].shape # others are (out_size, out_size)
(2, 2)
>>> bs[0][0].shape
(2,)
>>> hy, cy, ys = F.n_step_bilstm(
... n_layers, dropout_ratio, hx, cx, ws, bs, xs)
>>> hy.shape
(4, 3, 2)
>>> cy.shape
(4, 3, 2)
>>> [y.shape for y in ys]
[(3, 4), (2, 4), (1, 4)]
"""
return n_step_lstm_base(n_layers, dropout_ratio, hx, cx, ws, bs, xs,
use_bi_direction=True, **kwargs)
def n_step_lstm_base(
n_layers, dropout_ratio, hx, cx, ws, bs, xs, use_bi_direction,
**kwargs):
"""Base function for Stack LSTM/BiLSTM functions.
This function is used at :func:`chainer.functions.n_step_lstm` and
:func:`chainer.functions.n_step_bilstm`.
This function's behavior depends on following arguments,
``activation`` and ``use_bi_direction``.
Args:
n_layers(int): The number of layers.
dropout_ratio(float): Dropout ratio.
hx (:class:`~chainer.Variable`):
Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is the number of layers and
is equal to ``n_layers``, ``B`` is the mini-batch size, and ``N``
is the dimension of the hidden units.
cx (:class:`~chainer.Variable`): Variable holding stacked cell states.
It has the same shape as ``hx``.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[i]`` represents the weights for the i-th layer.
Each ``ws[i]`` is a list containing eight matrices.
``ws[i][j]`` corresponds to :math:`W_j` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 4`` are ``(N, I)``-shape as they
are multiplied with input variables, where ``I`` is the size of
the input and ``N`` is the dimension of the hidden units. All
other matrices are ``(N, N)``-shaped.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[i]`` represents the biases for the i-th layer.
Each ``bs[i]`` is a list containing eight vectors.
``bs[i][j]`` corresponds to :math:`b_j` in the equation.
The shape of each matrix is ``(N,)``.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is the
mini-batch size for time ``t``. The sequences must be transposed.
:func:`~chainer.functions.transpose_sequence` can be used to
transpose a list of :class:`~chainer.Variable`\\ s each
representing a sequence.
When sequences has different lengths, they must be
sorted in descending order of their lengths before transposing.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
use_bi_direction (bool): If ``True``, this function uses Bi-directional
LSTM.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
the mini-batch size for time ``t``. Note that ``B_t`` is the same
value as ``xs[t]``.
.. seealso::
:func:`chainer.functions.n_step_lstm`
:func:`chainer.functions.n_step_bilstm`
"""
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config',
use_cudnn='use_cudnn argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
# Check input size consistency with xs and ws here.
x_in = xs[0].shape[1]
w_in = ws[0][0].shape[1]
if x_in != w_in:
raise ValueError('Inconsistent input size in input values and weight '
'parameters: {} != {}'.format(x_in, w_in))
xp = backend.get_array_module(hx, hx.data)
use_cuda = xp is cuda.cupy or (
xp is chainerx and hx.device.device.backend.name == 'cuda')
directions = 1
if use_bi_direction:
directions = 2
combined = _combine_inputs(hx, cx, ws, bs, xs, n_layers, directions)
has_chainerx_array, combined = _extract_apply_in_data(combined)
hx_chx, cx_chx, ws_chx, bs_chx, xs_chx = _seperate_inputs(
combined, n_layers, len(xs), directions)
if has_chainerx_array and xp is chainerx and dropout_ratio == 0:
if use_bi_direction:
hy, cy, ys = chainerx.n_step_bilstm(
n_layers, hx_chx, cx_chx, ws_chx, bs_chx, xs_chx)
else:
hy, cy, ys = chainerx.n_step_lstm(
n_layers, hx_chx, cx_chx, ws_chx, bs_chx, xs_chx)
hy = variable.Variable._init_unchecked(
hy, requires_grad=hy.is_backprop_required(),
is_chainerx_array=True)
cy = variable.Variable._init_unchecked(
cy, requires_grad=cy.is_backprop_required(),
is_chainerx_array=True)
ys = [variable.Variable._init_unchecked(
y, requires_grad=y.is_backprop_required(),
is_chainerx_array=True)
for y in ys]
return hy, cy, ys
elif use_cuda and chainer.should_use_cudnn('>=auto', 5000):
lengths = [len(x) for x in xs]
xs = chainer.functions.concat(xs, axis=0)
with chainer.using_device(xs.device):
states = cuda.get_cudnn_dropout_states()
states.set_dropout_ratio(dropout_ratio)
w = n_step_rnn.cudnn_rnn_weight_concat(
n_layers, states, use_bi_direction, 'lstm', ws, bs)
if use_bi_direction:
rnn = NStepBiLSTM
else:
rnn = NStepLSTM
hy, cy, ys = rnn(n_layers, states, lengths)(hx, cx, w, xs)
sections = numpy.cumsum(lengths[:-1])
ys = chainer.functions.split_axis(ys, sections, 0)
return hy, cy, ys
else:
return n_step_rnn.n_step_rnn_impl(
_lstm, n_layers, dropout_ratio, hx, cx, ws, bs, xs,
use_bi_direction)
def _lstm(x, h, c, w, b):
xw = _stack_weight([w[2], w[0], w[1], w[3]])
hw = _stack_weight([w[6], w[4], w[5], w[7]])
xb = _stack_weight([b[2], b[0], b[1], b[3]])
hb = _stack_weight([b[6], b[4], b[5], b[7]])
lstm_in = linear.linear(x, xw, xb) + linear.linear(h, hw, hb)
c_bar, h_bar = lstm.lstm(c, lstm_in)
return h_bar, c_bar
| 22,970
| 40.917883
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/rnn/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/functions/rnn/n_step_rnn.py
|
import itertools
import numpy
import six
import chainer
import chainerx
from chainer import backend
from chainer import variable
from chainer.backends import cuda
from chainer import configuration
from chainer import function
from chainer.functions.activation import relu
from chainer.functions.activation import tanh
from chainer.functions.array import concat
from chainer.functions.array import split_axis
from chainer.functions.array import stack
from chainer.functions.connection import linear
from chainer.functions.noise import dropout
from chainer.utils import argument
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.libcudnn
_cudnn_version = libcudnn.getVersion()
if cuda.cudnn_enabled and _cudnn_version >= 5000:
# Define RNN parameters using dict.
_rnn_dirs = {
'uni': libcudnn.CUDNN_UNIDIRECTIONAL,
'bi': libcudnn.CUDNN_BIDIRECTIONAL,
}
_rnn_modes = {
'rnn_relu': libcudnn.CUDNN_RNN_RELU,
'rnn_tanh': libcudnn.CUDNN_RNN_TANH,
'gru': libcudnn.CUDNN_GRU,
'lstm': libcudnn.CUDNN_LSTM,
}
_rnn_n_params = {
libcudnn.CUDNN_RNN_RELU: 2,
libcudnn.CUDNN_RNN_TANH: 2,
libcudnn.CUDNN_GRU: 6,
libcudnn.CUDNN_LSTM: 8,
}
_rnn_params_direction = {
libcudnn.CUDNN_UNIDIRECTIONAL: 1,
libcudnn.CUDNN_BIDIRECTIONAL: 2,
}
_rnn_params_use_cell = {
libcudnn.CUDNN_RNN_RELU: False,
libcudnn.CUDNN_RNN_TANH: False,
libcudnn.CUDNN_GRU: False,
libcudnn.CUDNN_LSTM: True,
}
def _extract_apply_in_data(inputs):
if not inputs:
return False, ()
if chainerx.is_available():
has_chainerx_array = False
# Unwrap arrays
arrays = []
for x in inputs:
if isinstance(x, variable.Variable):
if x._has_chainerx_array:
arrays.append(x._data[0])
has_chainerx_array = True
else:
arrays.append(x.array)
else: # x is ndarray
arrays.append(x)
if not has_chainerx_array:
if isinstance(x, chainerx.ndarray):
has_chainerx_array = True
return has_chainerx_array, tuple(arrays)
else:
return False, tuple([
x.array if isinstance(x, variable.Variable) else x
for x in inputs])
def _combine_inputs(hx, ws, bs, xs, num_layers, directions):
combined = []
combined.append(hx)
for x in xs:
combined.append(x)
for n in range(num_layers):
for direction in range(directions):
idx = directions * n + direction
for i in range(2):
combined.append(ws[idx][i])
for i in range(2):
combined.append(bs[idx][i])
return combined
def _seperate_inputs(combined, num_layers, seq_length, directions):
hx = combined[0]
xs = combined[1: 1 + seq_length]
ws = []
bs = []
index = 1 + seq_length
for n in range(num_layers):
ws.append(combined[index: index + 2])
bs.append(combined[index + 2: index + 4])
index += 4
if directions == 2:
ws.append(combined[index: index + 2])
bs.append(combined[index + 2: index + 4])
index += 4
return hx, ws, bs, xs
class CudnnRNNWeightConcat(function.Function):
"""Concatenates weight matrices for cuDNN's RNN.
This function concatenates weight matrices for RNNs into one large array.
Its format is defined in cuDNN's API.
"""
def __init__(self, n_layers, states, rnn_dir, rnn_mode):
self.n_layers = n_layers
self.states = states
self.rnn_dir = _rnn_dirs[rnn_dir]
self.rnn_mode = _rnn_modes[rnn_mode]
self.rnn_direction = _rnn_params_direction[self.rnn_dir]
self.n_W = _rnn_n_params[self.rnn_mode]
def check_type_forward(self, in_types):
n_params = self.n_layers * self.rnn_direction * self.n_W
type_check.expect(
in_types.size() == n_params * 2)
w_types = in_types[:n_params]
b_types = in_types[n_params:]
in_size = w_types[0].shape[1]
out_size = w_types[0].shape[0]
dtype = w_types[0].dtype
type_check.expect(dtype.kind == 'f')
for layer in six.moves.range(self.n_layers):
for di in six.moves.range(self.rnn_direction):
for i in six.moves.range(self.n_W):
ind = (layer * self.rnn_direction + di) * self.n_W + i
w_type = w_types[ind]
b_type = b_types[ind]
if self.rnn_direction == 1:
# Uni-direction
if layer == 0 and i < (self.n_W // 2):
w_in = in_size
else:
w_in = out_size
else:
# Bi-direction
if layer == 0 and i < (self.n_W // 2):
w_in = in_size
elif layer > 0 and i < (self.n_W // 2):
w_in = out_size * self.rnn_direction
else:
w_in = out_size
type_check.expect(
w_type.dtype == dtype,
w_type.ndim == 2,
w_type.shape[0] == out_size,
w_type.shape[1] == w_in,
b_type.dtype == dtype,
b_type.ndim == 1,
b_type.shape[0] == out_size,
)
def forward_gpu(self, inputs):
handle = cudnn.get_handle()
ws_size = self.n_layers * self.rnn_direction * self.n_W
ws = inputs[:ws_size]
bs = inputs[ws_size:]
out_size = ws[0].shape[0]
in_size = ws[0].shape[1]
dtype = ws[0].dtype
cudnn_data_type = cudnn.get_data_type(dtype)
# TODO(unno): Make a wrapper method to avoid access _desc directly
rnn_desc = cudnn.create_rnn_descriptor(
out_size, self.n_layers, self.states._desc,
libcudnn.CUDNN_LINEAR_INPUT, self.rnn_dir,
self.rnn_mode, cudnn_data_type)
self.rnn_desc = rnn_desc
dummy_x = cuda.cupy.empty((1, in_size, 1), dtype=dtype)
x_desc = cudnn.create_tensor_nd_descriptor(dummy_x)
weights_size = libcudnn.getRNNParamsSize(
handle, rnn_desc.value, x_desc.value, cudnn_data_type)
byte_size = dtype.itemsize
w = cuda.cupy.empty((weights_size // byte_size, 1, 1), dtype=dtype)
w_desc = cudnn.create_filter_descriptor(w)
for layer in six.moves.range(self.n_layers):
for di in six.moves.range(self.rnn_direction):
mat_index = layer * self.rnn_direction + di
# di = 0: forward, 1: backward
for lin_layer_id in six.moves.range(self.n_W):
mat = cudnn.get_rnn_lin_layer_matrix_params(
handle, rnn_desc, mat_index,
x_desc, w_desc, w, lin_layer_id)
W_index = mat_index * self.n_W + lin_layer_id
m = mat.reshape(mat.size)
m[...] = ws[W_index].ravel()
bias = cudnn.get_rnn_lin_layer_bias_params(
handle, rnn_desc, mat_index,
x_desc, w_desc, w, lin_layer_id)
b = bias.reshape(bias.size)
b[...] = bs[W_index]
self.w_desc = w_desc
self.x_desc = x_desc
return w,
def backward(self, inputs, grads):
handle = cudnn.get_handle()
ws_size = self.n_layers * self.rnn_direction * self.n_W
ws = inputs[0:ws_size]
bs = inputs[ws_size:]
rnn_desc = self.rnn_desc
dw = grads[0]
dw_desc = cudnn.create_filter_descriptor(dw)
dx_desc = self.x_desc
dws = []
dbs = []
for layer in six.moves.range(self.n_layers):
for di in six.moves.range(self.rnn_direction):
mat_index = layer * self.rnn_direction + di
for lin_layer_id in six.moves.range(self.n_W):
mat = cudnn.get_rnn_lin_layer_matrix_params(
handle, rnn_desc, mat_index,
dx_desc, dw_desc, dw, lin_layer_id)
W_index = mat_index * self.n_W + lin_layer_id
dws.append(mat.reshape(ws[W_index].shape))
bias = cudnn.get_rnn_lin_layer_bias_params(
handle, rnn_desc, mat_index,
dx_desc, dw_desc, dw, lin_layer_id)
dbs.append(bias.reshape(bs[W_index].shape))
return tuple(dws + dbs)
def cudnn_rnn_weight_concat(
n_layers, states, use_bi_direction, rnn_mode, ws, bs):
rnn_dir = 'bi' if use_bi_direction else 'uni'
inputs = itertools.chain(
itertools.chain.from_iterable(ws),
itertools.chain.from_iterable(bs),
)
return CudnnRNNWeightConcat(n_layers, states, rnn_dir, rnn_mode)(*inputs)
class BaseNStepRNN(function.Function):
def __init__(self, n_layers, states, lengths, rnn_dir, rnn_mode, **kwargs):
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
if rnn_dir not in _rnn_dirs:
candidate_list = ','.join(_rnn_dirs.keys())
raise ValueError('Invalid rnn_dir: "%s". Please select from [%s]'
% (rnn_dir, candidate_list))
if rnn_mode not in _rnn_modes:
candidate_list = ','.join(_rnn_modes.keys())
raise ValueError('Invalid rnn_mode: "%s". Please select from [%s]'
% (rnn_mode, candidate_list))
self.rnn_dir = _rnn_dirs[rnn_dir]
self.rnn_mode = _rnn_modes[rnn_mode]
self.rnn_direction = _rnn_params_direction[self.rnn_dir]
self.n_layers = n_layers
self.states = states
self.use_cell = _rnn_params_use_cell[self.rnn_mode]
self.lengths = lengths
self.sections = numpy.cumsum(lengths)
def check_type_forward(self, in_types):
if self.use_cell:
type_check.expect(in_types.size() == 4)
h_type, c_type, w_type, x_type = in_types
h_size = self.n_layers * self.rnn_direction
type_check.expect(
h_type.dtype == x_type.dtype,
c_type.dtype == x_type.dtype,
h_type.ndim == 3,
h_type.shape[0] == h_size,
c_type.ndim == 3,
c_type.shape[0] == h_size,
# mini-batch size
h_type.shape[1] == c_type.shape[1],
# hidden size
h_type.shape[2] == c_type.shape[2],
)
else:
type_check.expect(in_types.size() == 3)
h_type, w_type, x_type = in_types
h_size = self.n_layers * self.rnn_direction
type_check.expect(
h_type.dtype == x_type.dtype,
h_type.ndim == 3,
h_type.shape[0] == h_size,
)
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 2,
x_type.shape[0] == self.sections[-1],
)
def forward_gpu(self, inputs):
if self.use_cell:
# LSTM
hx, cx, w, xs = inputs
else:
# RNN, GRU
hx, w, xs = inputs
cx = None
if not configuration.config.train:
hy, cy, ys = cudnn.rnn_forward_inference(
self.states, self.rnn_dir, self.rnn_mode,
hx, cx, w, xs, self.lengths)
else:
self.reserve_space, hy, cy, ys = cudnn.rnn_forward_training(
self.states, self.rnn_dir, self.rnn_mode,
hx, cx, w, xs, self.lengths)
if self.use_cell:
# LSTM
self.retain_outputs((2,))
return hy, cy, ys
else:
# GRU, RNN
self.retain_outputs((1,))
return hy, ys
def backward(self, inputs, grads):
if not configuration.config.train:
raise RuntimeError('cuDNN does not support backward computation '
'of RNN in testing mode')
if self.use_cell:
# LSTM
hx, cx, w, xs = inputs
dhy, dcy, dys = grads
if dcy is None:
dcy = cuda.cupy.zeros_like(cx)
else:
# GRU, RNN
hx, w, xs = inputs
dhy, dys = grads
dcy = cx = None
ys = self.output_data[-1]
if dhy is None:
dhy = cuda.cupy.zeros_like(hx)
if dys is None:
dys = cuda.cupy.zeros_like(ys)
dhx, dcx, dxs = cudnn.rnn_backward_data(
self.states, self.rnn_dir, self.rnn_mode,
hx, cx, w, xs, ys, self.reserve_space,
dhy, dcy, dys, self.lengths)
dw = cudnn.rnn_backward_weights(
self.states, self.rnn_dir, self.rnn_mode,
xs, hx, ys, w, self.reserve_space, self.lengths)
if self.use_cell:
# LSTM
return dhx, dcx, dw, dxs
else:
# GRU, RNN
return dhx, dw, dxs
class NStepRNNTanh(BaseNStepRNN):
def __init__(self, n_layers, states, lengths, **kwargs):
BaseNStepRNN.__init__(
self, n_layers, states, lengths,
rnn_dir='uni', rnn_mode='rnn_tanh', **kwargs)
class NStepRNNReLU(BaseNStepRNN):
def __init__(self, n_layers, states, lengths, **kwargs):
BaseNStepRNN.__init__(
self, n_layers, states, lengths,
rnn_dir='uni', rnn_mode='rnn_relu', **kwargs)
class NStepBiRNNTanh(BaseNStepRNN):
def __init__(self, n_layers, states, lengths, **kwargs):
BaseNStepRNN.__init__(
self, n_layers, states, lengths,
rnn_dir='bi', rnn_mode='rnn_tanh', **kwargs)
class NStepBiRNNReLU(BaseNStepRNN):
def __init__(self, n_layers, states, lengths, **kwargs):
BaseNStepRNN.__init__(
self, n_layers, states, lengths,
rnn_dir='bi', rnn_mode='rnn_relu', **kwargs)
def n_step_rnn(
n_layers, dropout_ratio, hx, ws, bs, xs, activation='tanh', **kwargs):
"""n_step_rnn(n_layers, dropout_ratio, hx, ws, bs, xs, activation='tanh')
Stacked Uni-directional RNN function for sequence inputs.
This function calculates stacked Uni-directional RNN with sequences.
This function gets an initial hidden state :math:`h_0`,
an initial cell state :math:`c_0`, an input sequence :math:`x`,
weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
h_t = f(W_0 x_t + W_1 h_{t-1} + b_0 + b_1)
where :math:`f` is an activation function.
Weight matrices :math:`W` contains two matrices :math:`W_0` and
:math:`W_1`. :math:`W_0` is a parameter for an input sequence.
:math:`W_1` is a parameter for a hidden state.
Bias matrices :math:`b` contains two matrices :math:`b_0` and :math:`b_1`.
:math:`b_0` is a parameter for an input sequence.
:math:`b_1` is a parameter for a hidden state.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Two weight matrices and two bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`2S` weight matrices and :math:`2S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
Args:
n_layers(int): Number of layers.
dropout_ratio(float): Dropout ratio.
hx (:class:`~chainer.Variable`):
Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[i]`` represents weights for i-th layer.
Each ``ws[i]`` is a list containing two matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 1`` is ``(N, I)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[i]`` represnents biases for i-th layer.
Each ``bs[i]`` is a list containing two vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable` holding input values.
Each element ``xs[t]`` holds input value for time ``t``.
Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length, and transpose the sorted sequence.
:func:`~chainer.functions.transpose_sequence` transpose a list
of :func:`~chainer.Variable` holding sequence.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
activation (str): Activation function name.
Please select ``tanh`` or ``relu``.
Returns:
tuple: This function returns a tuple containing two elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
"""
return n_step_rnn_base(n_layers, dropout_ratio, hx, ws, bs, xs,
activation, use_bi_direction=False, **kwargs)
def n_step_birnn(
n_layers, dropout_ratio, hx, ws, bs, xs, activation='tanh', **kwargs):
"""n_step_birnn(n_layers, dropout_ratio, hx, ws, bs, xs, activation='tanh')
Stacked Bi-directional RNN function for sequence inputs.
This function calculates stacked Bi-directional RNN with sequences.
This function gets an initial hidden state :math:`h_0`, an initial
cell state :math:`c_0`, an input sequence :math:`x`,
weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
h^{f}_t &=& f(W^{f}_0 x_t + W^{f}_1 h_{t-1} + b^{f}_0 + b^{f}_1), \\\\
h^{b}_t &=& f(W^{b}_0 x_t + W^{b}_1 h_{t-1} + b^{b}_0 + b^{b}_1), \\\\
h_t &=& [h^{f}_t; h^{f}_t], \\\\
where :math:`f` is an activation function.
Weight matrices :math:`W` contains two matrices :math:`W^{f}` and
:math:`W^{b}`. :math:`W^{f}` is weight matrices for forward directional
RNN. :math:`W^{b}` is weight matrices for backward directional RNN.
:math:`W^{f}` contains :math:`W^{f}_0` for an input sequence and
:math:`W^{f}_1` for a hidden state.
:math:`W^{b}` contains :math:`W^{b}_0` for an input sequence and
:math:`W^{b}_1` for a hidden state.
Bias matrices :math:`b` contains two matrices :math:`b^{f}` and
:math:`b^{f}`. :math:`b^{f}` contains :math:`b^{f}_0` for an input sequence
and :math:`b^{f}_1` for a hidden state.
:math:`b^{b}` contains :math:`b^{b}_0` for an input sequence and
:math:`b^{b}_1` for a hidden state.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Two weight matrices and two bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`2S` weight matrices and :math:`2S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
Args:
n_layers(int): Number of layers.
dropout_ratio(float): Dropout ratio.
hx (:class:`~chainer.Variable`):
Variable holding stacked hidden states.
Its shape is ``(2S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units. Because of bi-direction, the
first dimension length is ``2S``.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[2 * i + di]`` represents weights for i-th layer.
Note that ``di = 0`` for forward-RNN and ``di = 1`` for
backward-RNN.
Each ``ws[2 * i + di]`` is a list containing two matrices.
``ws[2 * i + di][j]`` is corresponding with ``W^{f}_j`` if
``di = 0`` and corresponding with ``W^{b}_j`` if ``di = 1`` in
the equation.
Only ``ws[0][j]`` and ``ws[1][j]`` where ``0 <= j < 1`` are
``(N, I)`` shape as they are multiplied with input variables.
All other matrices has ``(N, N)`` shape.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[2 * i + di]`` represnents biases for i-th layer.
Note that ``di = 0`` for forward-RNN and ``di = 1`` for
backward-RNN.
Each ``bs[2 * i + di]`` is a list containing two vectors.
``bs[2 * i + di][j]`` is corresponding with ``b^{f}_j`` if
``di = 0`` and corresponding with ``b^{b}_j`` if ``di = 1`` in
the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable` holding input values.
Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length, and transpose the sorted sequence.
:func:`~chainer.functions.transpose_sequence` transpose a list
of :func:`~chainer.Variable` holding sequence.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
activation (str): Activation function name.
Please select ``tanh`` or ``relu``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t``
is mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
"""
return n_step_rnn_base(n_layers, dropout_ratio, hx, ws, bs, xs,
activation, use_bi_direction=True)
def n_step_rnn_base(n_layers, dropout_ratio, hx, ws, bs, xs,
activation, use_bi_direction, **kwargs):
"""n_step_rnn_base(n_layers, dropout_ratio, hx, ws, bs, xs, activation, \
use_bi_direction)
Base function for Stack RNN/BiRNN functions.
This function is used at :func:`chainer.functions.n_step_birnn` and
:func:`chainer.functions.n_step_rnn`.
This function's behavior depends on following arguments,
``activation`` and ``use_bi_direction``.
Args:
n_layers(int): Number of layers.
dropout_ratio(float): Dropout ratio.
hx (:class:`~chainer.Variable`):
Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[i]`` represents weights for i-th layer.
Each ``ws[i]`` is a list containing two matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 1`` is ``(N, I)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[i]`` represnents biases for i-th layer.
Each ``bs[i]`` is a list containing two vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable` holding input values.
Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length, and transpose the sorted sequence.
:func:`~chainer.functions.transpose_sequence` transpose a list
of :func:`~chainer.Variable` holding sequence.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
activation (str): Activation function name.
Please select ``tanh`` or ``relu``.
use_bi_direction (bool): If ``True``, this function uses
Bi-directional RNN.
Returns:
tuple: This function returns a tuple containing three elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t``
is mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
.. seealso::
:func:`chainer.functions.n_step_rnn`
:func:`chainer.functions.n_step_birnn`
"""
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config',
use_cudnn='use_cudnn argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
activation_list = ['tanh', 'relu']
if activation not in activation_list:
candidate = ','.join(activation_list)
raise ValueError('Invalid activation: "%s". Please select from [%s]'
% (activation, candidate))
# Check input size consistency with xs and ws.
x_in = xs[0].shape[1]
w_in = ws[0][0].shape[1]
if x_in != w_in:
raise ValueError('Inconsistent input size in input values and weight '
'parameters: {} != {}'.format(x_in, w_in))
xp = backend.get_array_module(hx)
directions = 1
if use_bi_direction:
directions = 2
combined = _combine_inputs(hx, ws, bs, xs, n_layers, directions)
has_chainerx_array, combined = _extract_apply_in_data(combined)
hx_chx, ws_chx, bs_chx, xs_chx = _seperate_inputs(
combined, n_layers, len(xs), directions)
if has_chainerx_array and xp is chainerx and dropout_ratio == 0:
if use_bi_direction:
hy, ys = chainerx.n_step_birnn(
n_layers, hx_chx, ws_chx, bs_chx, xs_chx, activation)
else:
hy, ys = chainerx.n_step_rnn(
n_layers, hx_chx, ws_chx, bs_chx, xs_chx, activation)
hy = variable.Variable._init_unchecked(
hy, requires_grad=hy.is_backprop_required(),
is_chainerx_array=True)
ys = [variable.Variable._init_unchecked(
y, requires_grad=y.is_backprop_required(),
is_chainerx_array=True)
for y in ys]
return hy, ys
if xp is cuda.cupy and chainer.should_use_cudnn('>=auto', 5000):
lengths = [len(x) for x in xs]
xs = chainer.functions.concat(xs, axis=0)
with chainer.using_device(xs.device):
states = cuda.get_cudnn_dropout_states()
states.set_dropout_ratio(dropout_ratio)
rnn_mode = 'rnn_%s' % activation
w = cudnn_rnn_weight_concat(
n_layers, states, use_bi_direction, rnn_mode, ws, bs)
if use_bi_direction:
# Bi-directional RNN
if activation == 'tanh':
rnn = NStepBiRNNTanh
elif activation == 'relu':
rnn = NStepBiRNNReLU
else:
# Uni-directional RNN
if activation == 'tanh':
rnn = NStepRNNTanh
elif activation == 'relu':
rnn = NStepRNNReLU
hy, ys = rnn(n_layers, states, lengths)(hx, w, xs)
sections = numpy.cumsum(lengths[:-1])
ys = chainer.functions.split_axis(ys, sections, 0)
return hy, ys
else:
def f(x, h, c, w, b):
xw, hw = w
xb, hb = b
rnn_in = linear.linear(x, xw, xb) + linear.linear(h, hw, hb)
if activation == 'tanh':
return tanh.tanh(rnn_in), None
elif activation == 'relu':
return relu.relu(rnn_in), None
hy, _, ys = n_step_rnn_impl(
f, n_layers, dropout_ratio, hx, None, ws, bs, xs, use_bi_direction)
return hy, ys
def n_step_rnn_impl(
f, n_layers, dropout_ratio, hx, cx, ws, bs, xs, use_bi_direction):
direction = 2 if use_bi_direction else 1
hx = chainer.functions.separate(hx)
use_cell = cx is not None
if use_cell:
cx = chainer.functions.separate(cx)
else:
cx = [None] * len(hx)
xs_next = xs
hy = []
cy = []
for layer in six.moves.range(n_layers):
# Forward RNN
if layer == 0:
xs = xs_next
else:
xs = _dropout_sequence(xs_next, dropout_ratio)
idx = direction * layer
h, c, h_forward = _one_directional_loop(
f, xs, hx[idx], cx[idx], ws[idx], bs[idx])
hy.append(h)
cy.append(c)
if use_bi_direction:
# Backward RNN
idx = direction * layer + 1
if layer == 0:
xs = xs_next
else:
xs = _dropout_sequence(xs_next, dropout_ratio)
h, c, h_backward = _one_directional_loop(
f, reversed(xs), hx[idx], cx[idx], ws[idx], bs[idx])
h_backward.reverse()
# Concat
xs_next = [concat.concat([hfi, hbi], axis=1) for hfi, hbi in
six.moves.zip(h_forward, h_backward)]
hy.append(h)
cy.append(c)
else:
# Uni-directional RNN
xs_next = h_forward
ys = xs_next
hy = stack.stack(hy)
if use_cell:
cy = stack.stack(cy)
else:
cy = None
return hy, cy, tuple(ys)
def _one_directional_loop(f, xs, h, c, w, b):
h_list = []
for x in xs:
batch = len(x)
need_split = len(h) > batch
if need_split:
h, h_rest = split_axis.split_axis(h, [batch], axis=0)
if c is not None:
c, c_rest = split_axis.split_axis(c, [batch], axis=0)
h, c = f(x, h, c, w, b)
h_list.append(h)
if need_split:
h = concat.concat([h, h_rest], axis=0)
if c is not None:
c = concat.concat([c, c_rest], axis=0)
return h, c, h_list
def _dropout_sequence(xs, dropout_ratio):
return [dropout.dropout(x, ratio=dropout_ratio) for x in xs]
| 33,371
| 37.491349
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/util/forget.py
|
import chainer
from chainer import function
from chainer import function_node
from chainer import variable
def _call_func(func, xs):
outs = func(*xs)
if isinstance(outs, tuple):
for i, out in enumerate(outs):
if isinstance(out, variable.Variable):
continue
n = i + 1
suffix = {1: 'st', 2: 'nd', 3: 'rd'}.get(
n if n < 20 else n % 10, 'th')
msg = ('{}{} element of a returned tuple is not Variable, '
'but is {}').format(n, suffix, type(out))
raise RuntimeError(msg)
elif isinstance(outs, variable.Variable):
outs = (outs,)
else:
msg = ('A tuple of Variables or a Variable are expected, but {} '
'is returned.'.format(type(outs)))
raise RuntimeError(msg)
return outs
class Forget(function_node.FunctionNode):
def __init__(self, func):
if not callable(func):
raise TypeError('func must be callable')
self.func = func
def forward(self, inputs):
self.retain_inputs(tuple(range(len(inputs))))
with function.no_backprop_mode(),\
chainer.using_config('_will_recompute', True):
xs = [variable.Variable(x) for x in inputs]
outs = _call_func(self.func, xs)
return tuple(out.data for out in outs)
def backward(self, indexes, grad_outputs):
# Double backprop is not allowed
if chainer.config.enable_backprop:
raise RuntimeError('double backpropagation in functions.forget is '
'not allowed.')
inputs = self.get_retained_inputs()
# Create new variables that have no creators
dummy_inputs = tuple([variable.Variable(inp.array) for inp in inputs])
with function.force_backprop_mode(),\
chainer.using_config('in_recomputing', True):
outs = _call_func(self.func, dummy_inputs)
assert len(outs) == len(grad_outputs)
output_tuples = []
for out, grad_output in zip(outs, grad_outputs):
if grad_output is not None:
output_tuples.append((out.node, grad_output))
# TODO(kataoka): use outer backward's `retain_grad` and `loss_scale`
chainer._backprop._backprop_to_all(output_tuples, False, None)
return tuple([inp.grad_var for inp in dummy_inputs])
def forget(func, *xs):
"""Calls a function without storing intermediate results.
On a forward propagation, Chainer normally stores all intermediate results
of :class:`~chainer.variable.VariableNode`\\ s on a computational graph as
they are required on backward propagation.
Sometimes these results consume too much memory.
``F.forget`` *forgets* such intermediate results on forward propagation,
and still supports backpropagation with recalculation.
On a forward propagation, ``F.forget`` calls a given function with given
variables without creating a computational graph. That means, no
intermediate results are stored.
On a backward propagation, ``F.forget`` calls the given function again to
create a computational graph for backpropagation.
``F.forget`` reduces internal memory usage, whereas it requires more
calculation time as it calls the function twice.
.. admonition:: Example
Let ``f`` be a function defined as:
>>> def f(a, b):
... return (a + b) * a
and, ``x`` and ``y`` be :class:`~chainer.Variable`\\ s:
>>> x = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))
>>> y = chainer.Variable(np.random.uniform(-1, 1, 5).astype(np.float32))
When ``z`` is calculated as ``z = f(x, y)``, its intermediate result
``x + y`` is stored in memory. Instead, if you call ``f`` with
``F.forget``:
>>> z = F.forget(f, x, y)
intermediate ``x + y`` is forgotten.
.. note::
``F.forget`` does not support functions which behave differently in
multiple calls with the same inputs, such as
:meth:`F.dropout() <chainer.functions.dropout>` and
:meth:`F.negative_sampling() <chainer.functions.negative_sampling>`.
.. note::
In case input argument variables are of :ref:`ndarray` objects,
arguments will automatically be
converted to :class:`~chainer.Variable`\\ s.
This conversion takes place to ensure that this function is included
in the computational graph to enable backward computations.
.. note::
``F.forget`` does not support double backpropagation.
.. note::
If you want to use ``F.forget`` to a link which updates the link's
internal information every time the forward computation is called,
please ensure that the information is updated just once in a single
iteration. You may use the ``chainer.config.in_recomputing`` flag to
check if the forward computation is the first call in an iteration.
Please see the implementation of
:class:`~chainer.links.BatchNormalization` for detail.
Args:
func (callable): A function to call. It needs to be called with
:class:`~chainer.Variable` object(s) and to return a
:class:`~chainer.Variable` object or a tuple of
:class:`~chainer.Variable` objects.
xs (:class:`tuple` of :class:`~chainer.Variable` or :ref:`ndarray`):
Argument variables of the function.
Returns:
~chainer.Variable: A variable ``func`` returns. If it returns a tuple,
the method returns a tuple too.
"""
xs = tuple(x if isinstance(x, variable.Variable) else
variable.Variable(x, requires_grad=True) for x in xs)
y = Forget(func).apply(xs)
if len(y) == 1:
y, = y
return y
| 5,860
| 36.33121
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/util/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/functions/array/squeeze.py
|
import six
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
def argone(iterable):
result = []
for i, x in enumerate(iterable):
if not isinstance(x, six.integer_types):
raise ValueError('elements in iterable must be int')
if x == 1:
result.append(i)
return result
class Squeeze(function_node.FunctionNode):
"""Remove dimensions of size one from the shape of a ndarray."""
def __init__(self, axis=None):
if axis is None:
self.axis = None
elif isinstance(axis, six.integer_types):
self.axis = (axis,)
elif isinstance(axis, tuple) and all(
isinstance(x, six.integer_types) for x in axis):
self.axis = axis
else:
raise TypeError('axis must be None, int or tuple of ints')
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type = in_types[0]
if self.axis is not None:
for x in self.axis:
if x >= 0:
type_check.expect(x < x_type.ndim)
else:
type_check.expect(-x_type.ndim <= x)
def forward_chainerx(self, inputs):
x, = inputs
return x.squeeze(self.axis),
def forward(self, inputs):
x, = inputs
xp = backend.get_array_module(x)
return xp.squeeze(x, self.axis),
def backward(self, indexes, grad_outputs):
if self.axis is None:
axis = tuple(argone(self.inputs[0].shape))
else:
axis = self.axis
ndim = len(self.inputs[0].shape)
axis = [x + ndim if x < 0 else x for x in axis]
axis.sort()
gx, = grad_outputs
shape = list(gx.shape)
for x in axis: # axis needs to be sorted
shape.insert(x, 1)
return gx.reshape(shape),
def squeeze(x, axis=None):
"""Remove dimensions of size one from the shape of a ndarray.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
axis (None or int or tuple of ints): A subset of the single-dimensional
entries in the shape to remove. If ``None`` is supplied, all of
them are removed. The dimension index starts at zero. If an axis
with dimension greater than one is selected, an error is raised.
Returns:
~chainer.Variable: Variable whose dimensions of size 1 are removed.
.. admonition:: Example
>>> x = np.array([[[[0, 1, 2]]], [[[3, 4, 5]]]], np.float32)
>>> x.shape
(2, 1, 1, 3)
>>> y = F.squeeze(x)
>>> y.shape
(2, 3)
>>> y.array
array([[0., 1., 2.],
[3., 4., 5.]], dtype=float32)
>>> y = F.squeeze(x, axis=1)
>>> y.shape
(2, 1, 3)
>>> y.array
array([[[0., 1., 2.]],
<BLANKLINE>
[[3., 4., 5.]]], dtype=float32)
>>> y = F.squeeze(x, axis=(1, 2))
>>> y.shape
(2, 3)
>>> y.array
array([[0., 1., 2.],
[3., 4., 5.]], dtype=float32)
"""
y, = Squeeze(axis).apply((x,))
return y
| 3,313
| 28.589286
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/array/rollaxis.py
|
import six
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class Rollaxis(function_node.FunctionNode):
"""Roll axis of an array."""
def __init__(self, axis, start):
if not isinstance(axis, six.integer_types):
raise TypeError('axis must be int')
if not isinstance(start, six.integer_types):
raise TypeError('start must be int')
self.axis = axis
self.start = start
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type = in_types[0]
if self.axis >= 0:
type_check.expect(x_type.ndim > self.axis)
else:
type_check.expect(x_type.ndim > -self.axis - 1)
if self.start >= 0:
type_check.expect(x_type.ndim >= self.start)
else:
type_check.expect(x_type.ndim > -self.start - 1)
def forward(self, inputs):
self.retain_inputs(())
self._in_ndim = inputs[0].ndim
xp = backend.get_array_module(*inputs)
return xp.rollaxis(inputs[0], self.axis, self.start),
def backward(self, indexes, gy):
axis = self.axis
if axis < 0:
axis += self._in_ndim
start = self.start
if start < 0:
start += self._in_ndim
if axis > start:
axis += 1
elif axis < start:
start -= 1
return Rollaxis(start, axis).apply(gy)
def rollaxis(x, axis, start=0):
"""Roll the axis backwards to the given position.
This function continues to be supported for backward compatibility,
but you should prefer
``chainer.functions.moveaxis(x, source, destination)``.
See :func:`chainer.functions.moveaxis`.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
axis (int): The axis to roll backwards.
start (int): The place to which the axis is moved.
Returns:
~chainer.Variable: Variable whose axis is rolled.
"""
return Rollaxis(axis, start).apply((x,))[0]
| 2,092
| 27.283784
| 73
|
py
|
chainer
|
chainer-master/chainer/functions/array/concat.py
|
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import intel64
from chainer import function_node
from chainer.utils import type_check
import chainerx
class Concat(function_node.FunctionNode):
"""Concatenate multiple tensors towards specified axis."""
# concat along the channel dimension by default
def __init__(self, axis=1):
if not isinstance(axis, six.integer_types):
raise TypeError('axis must be int')
self.axis = axis
def check_type_forward(self, in_types):
type_check.expect(in_types.size() > 0)
type_check.expect(in_types[0].ndim >
type_check.make_variable(self.axis, 'axis'))
type_check.expect(
-in_types[0].ndim <= self.axis,
self.axis < in_types[0].ndim
)
ndim = type_check.eval(in_types[0].ndim)
axis = self.axis % ndim
for i in six.moves.range(1, type_check.eval(in_types.size())):
type_check.expect(
in_types[0].dtype == in_types[i].dtype,
in_types[0].ndim == in_types[i].ndim,
)
for d in six.moves.range(0, ndim):
if d == axis:
continue
type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])
def forward(self, xs):
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(xs, (4,))):
# iDeep implementation
return self._forward_ideep(xs)
# Generic implementation
xp = backend.get_array_module(*xs)
return xp.concatenate(xs, self.axis),
def forward_chainerx(self, xs):
return chainerx.concatenate(xs, self.axis),
def _forward_ideep(self, xs):
xs_mdarray = intel64.ideep.mdarrayVector()
for x in xs:
xs_mdarray.push_back(intel64.ideep.array(x))
ndim = xs[0].ndim
axis = self.axis % ndim
return intel64.ideep.concat.Forward(xs_mdarray, axis),
def backward(self, indexes, grad_outputs):
if len(self.inputs) == 1:
return grad_outputs
sizes = numpy.array(
[v.shape[self.axis] for v in self.inputs[:-1]]
).cumsum()
gx, = grad_outputs
return chainer.functions.split_axis(gx, sizes, self.axis)
def concat(xs, axis=1):
"""Concatenates given variables along an axis.
Args:
xs (tuple of :class:`~chainer.Variable` or :ref:`ndarray`):
Input variables to be concatenated. The variables must have the \
same shape, except in the dimension corresponding to axis.
axis (int): The axis along which the arrays will be joined. Default \
is 1.
Returns:
~chainer.Variable: The concatenated variable.
.. admonition:: Example
>>> x = np.arange(0, 12).reshape(3, 4)
>>> x
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> y = np.arange(0, 3).reshape(3, 1)
>>> y
array([[0],
[1],
[2]])
>>> z = F.concat((x, y), axis=1)
>>> z.array
array([[ 0, 1, 2, 3, 0],
[ 4, 5, 6, 7, 1],
[ 8, 9, 10, 11, 2]])
"""
y, = Concat(axis).apply(xs)
return y
| 3,365
| 29.6
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/array/depth2space.py
|
import numpy
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class Depth2Space(function_node.FunctionNode):
"""Depth to space transformation."""
def __init__(self, r):
self.r = r
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].ndim == 4
)
def forward(self, inputs):
X, = inputs
xp = backend.get_array_module(X)
bsize, c, a, b = X.shape
c //= self.r ** 2
if xp is numpy:
# These codes run faster on CPU than below `else` block codes.
X = xp.transpose(X, (0, 2, 3, 1))
X = xp.reshape(X, (bsize, a, b, self.r, self.r, c))
X = xp.transpose(X, (0, 1, 3, 2, 4, 5))
X = xp.reshape(X, (bsize, a * self.r, b * self.r, c))
X = xp.transpose(X, (0, 3, 1, 2))
else:
X = xp.reshape(X, (bsize, self.r, self.r, c, a, b))
X = xp.transpose(X, (0, 3, 4, 1, 5, 2))
X = xp.reshape(X, (bsize, c, a * self.r, b * self.r))
return X,
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
gy = chainer.functions.space2depth(gy, self.r)
return gy,
def depth2space(X, r):
"""Computes the depth2space transformation for subpixel calculations.
Args:
X (:class:`~chainer.Variable` or :ref:`ndarray`): Variable holding a
4d array of shape ``(batch, channel * r * r, dim1, dim2)``.
r (int): the upscaling factor.
Returns:
~chainer.Variable:
A variable holding the upscaled array from
interspersed depth layers. The shape is
``(batch, channel, dim1 * r, dim2 * r)``.
.. note::
This can be used to compute super-resolution transformations.
See https://arxiv.org/abs/1609.05158 for details.
.. seealso:: :func:`space2depth`
.. admonition:: Example
>>> X = np.arange(24).reshape(1, 4, 2, 3).astype(np.float32)
>>> X.shape
(1, 4, 2, 3)
>>> X
array([[[[ 0., 1., 2.],
[ 3., 4., 5.]],
<BLANKLINE>
[[ 6., 7., 8.],
[ 9., 10., 11.]],
<BLANKLINE>
[[12., 13., 14.],
[15., 16., 17.]],
<BLANKLINE>
[[18., 19., 20.],
[21., 22., 23.]]]], dtype=float32)
>>> y = F.depth2space(X, 2)
>>> y.shape
(1, 1, 4, 6)
>>> y.array
array([[[[ 0., 6., 1., 7., 2., 8.],
[12., 18., 13., 19., 14., 20.],
[ 3., 9., 4., 10., 5., 11.],
[15., 21., 16., 22., 17., 23.]]]], dtype=float32)
"""
return Depth2Space(r).apply((X,))[0]
| 2,907
| 29.291667
| 76
|
py
|
chainer
|
chainer-master/chainer/functions/array/permutate.py
|
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
def _check_indices(indices):
if len(indices) == 0:
return
# TODO(unno): Check indices without cpu
indices = cuda.to_cpu(indices)
for i in indices:
if 0 <= i < len(indices):
continue
raise ValueError('Out of bounds index: {}'.format(i))
sort = numpy.sort(indices)
for s, t in six.moves.zip(sort, sort[1:]):
if s == t:
raise ValueError('indices contains duplicate value: {}'.format(s))
def _inverse_indices(indices):
xp = backend.get_array_module(indices)
r = xp.empty_like(indices)
if xp is numpy:
r[indices] = numpy.arange(len(indices))
else:
cuda.elementwise(
'S ind', 'raw S r',
'r[ind] = i',
'inverse_indices'
)(indices, r)
return r
class Permutate(function_node.FunctionNode):
"""Permutate function."""
def __init__(self, indices, axis, inv):
self.indices = indices
self.axis = axis
self.inv = inv
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type, = in_types
if self.axis < 0:
type_check.expect(x_type.ndim >= -self.axis)
else:
type_check.expect(x_type.ndim > self.axis)
def _permutate(self, x, indices, inv):
if inv:
indices = _inverse_indices(indices)
return x[((slice(None),) * self.axis) + (indices,)]
def forward(self, inputs):
x, = inputs
inds = self.indices
if chainer.is_debug():
_check_indices(inds)
return self._permutate(x, inds, self.inv),
def backward(self, indexes, grad_outputs):
g, = grad_outputs
inds = self.indices
gx, = Permutate(inds, self.axis, not self.inv).apply((g,))
return gx,
def permutate(x, indices, axis=0, inv=False):
"""Permutates a given variable along an axis.
This function permutate ``x`` with given ``indices``.
That means ``y[i] = x[indices[i]]`` for all ``i``.
Note that this result is same as ``y = x.take(indices)``.
``indices`` must be a permutation of ``[0, 1, ..., len(x) - 1]``.
When ``inv`` is ``True``, ``indices`` is treated as its inverse.
That means ``y[indices[i]] = x[i]``.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable to permutate.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
indices (:class:`~chainer.Variable` or :ref:`ndarray`):
Indices to extract from the variable. A one-dimensional int array.
axis (int): Axis that the input array is permutate along.
inv (bool): If ``True``, ``indices`` is treated as its inverse.
Returns:
~chainer.Variable: Output variable.
.. admonition:: Example
>>> x = np.arange(6).reshape((3, 2)).astype(np.float32)
>>> x
array([[0., 1.],
[2., 3.],
[4., 5.]], dtype=float32)
>>> indices = np.array([2, 0, 1], np.int32)
>>> y = F.permutate(x, indices)
>>> y.array
array([[4., 5.],
[0., 1.],
[2., 3.]], dtype=float32)
>>> y = F.permutate(x, indices, inv=True)
>>> y.array
array([[2., 3.],
[4., 5.],
[0., 1.]], dtype=float32)
>>> indices = np.array([1, 0], np.int32)
>>> y = F.permutate(x, indices, axis=1)
>>> y.array
array([[1., 0.],
[3., 2.],
[5., 4.]], dtype=float32)
"""
if indices.dtype.kind != 'i' or indices.ndim != 1:
raise ValueError(
'indices should be a one-dimensional int array')
if isinstance(indices, chainer.Variable):
indices = indices.array
y, = Permutate(indices, axis, inv).apply((x,))
return y
| 4,025
| 28.822222
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/array/separate.py
|
from chainer import backend
from chainer import function_node
from chainer.functions.array import stack
from chainer.utils import type_check
class Separate(function_node.FunctionNode):
"""Function that separates a given array."""
def __init__(self, axis):
self.axis = axis
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type = in_types[0]
if self.axis >= 0:
type_check.expect(self.axis < x_type.ndim)
else:
type_check.expect(-self.axis <= x_type.ndim)
def forward(self, inputs):
x, = inputs
self._xp = backend.get_array_module(x)
xs = self._xp.split(x, x.shape[self.axis], self.axis)
ys = [self._xp.squeeze(y, self.axis) for y in xs]
self._shape = ys[0].shape
self._dtype = x.dtype
return tuple(ys)
def backward(self, indexes, grad_outputs):
grad_outputs = [
self._xp.zeros(self._shape, dtype=self._dtype)
if g is None else g for g in grad_outputs]
return stack.stack(grad_outputs, self.axis),
def separate(x, axis=0):
"""Separates an array along a given axis.
This function separates an array along a given axis. For example, shape of
an array is ``(2, 3, 4)``. When it separates the array with ``axis=1``, it
returns three ``(2, 4)`` arrays.
This function is an inverse of :func:`chainer.functions.stack`.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable to be separated.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
axis (int): Axis along which variables are separated.
Returns:
tuple of chainer.Variable: Output variables.
.. seealso:: :func:`chainer.functions.stack`
.. admonition:: Example
>>> x = np.arange(6).reshape((2, 3)).astype(np.float32)
>>> x
array([[0., 1., 2.],
[3., 4., 5.]], dtype=float32)
>>> x.shape
(2, 3)
>>> y = F.separate(x) # split along axis=0
>>> isinstance(y, tuple)
True
>>> len(y)
2
>>> y[0].shape
(3,)
>>> y[0].array
array([0., 1., 2.], dtype=float32)
>>> y = F.separate(x, axis=1)
>>> len(y)
3
>>> y[0].shape
(2,)
>>> y[0].array
array([0., 3.], dtype=float32)
"""
return Separate(axis).apply((x,))
| 2,459
| 27.941176
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/array/copy.py
|
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
import chainerx
class Copy(function_node.FunctionNode):
"""Copies the input variable onto the specified device."""
def __init__(self, in_device, out_device):
self._in_device = in_device
self.out_device = out_device
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
def forward(self, inputs):
x, = inputs
return self.out_device.send(x),
def forward_chainerx(self, inputs):
x, = inputs
return x.to_device(self.out_device.device),
def backward(self, indexes, grad_outputs):
f = Copy(self.out_device, self._in_device)
return f.apply(grad_outputs)
# TODO(niboshi): Link from `dst` to an appropriate device specifier docs.
def copy(x, dst):
"""Copies the input variable onto the specified device.
If the input ``x`` already resides on the device specified by ``dst``, no
copy will actually take place and the returned variable will hold a view
of the input. In other cases, the input will be copied to ``dst``.
When ``dst == -1``, the array is copied to the host memory.
This function supports copies from host to host, from host to device,
from device to device and from device to host.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable to be copied.
dst: Target device specifier.
Returns:
~chainer.Variable: Output variable.
.. admonition:: Example
>>> import chainer.backends.cuda as cuda
>>> x_arr = np.random.uniform(-1, 1, (5, 10))
>>> x = chainer.Variable(x_arr)
>>> x.device
<CpuDevice (numpy)>
>>> y = F.copy(x, '@cupy:0') # from CPU (NumPy) to GPU 0 (CuPy)
>>> y.device
<GpuDevice (cupy):0>
.. note::
Copies between non-ChainerX devices and ChainerX devices are not
supported.
"""
# For backward compatibility
if dst is cuda.DummyDevice:
dst = chainer.get_device('@numpy')
x_is_var = isinstance(x, chainer.Variable)
in_device = backend.get_device_from_array(x.array if x_is_var else x)
out_device = chainer.get_device(dst)
if in_device.xp is chainerx:
x_arr = x.chx_array if x_is_var else x
if out_device.xp is not chainerx:
# ChainerX to non-ChainerX
if x_arr.is_backprop_required():
raise RuntimeError(
'F.copy does not support copy from a ChainerX array with '
'backprop required to a non-ChainerX device.\n'
'From: {}\n'
'To: {}'.format(in_device, out_device))
return chainer.Variable(
out_device.send(x_arr), requires_grad=False)
# ChainerX to ChainerX
return chainer.Variable(
out_device.send(x_arr), requires_grad=x_arr.is_backprop_required())
if out_device.xp is chainerx:
# Non-ChainerX to ChainerX
if x_is_var and x.requires_grad:
raise RuntimeError(
'F.copy does not support copy from a non-ChainerX array with '
'backprop required to a ChainerX device.\n'
'From: {}\n'
'To: {}'.format(in_device, out_device))
x_arr = x.array if x_is_var else x
return chainer.Variable(out_device.send(x_arr), requires_grad=False)
# Non-ChainerX to non-ChainerX
y, = Copy(in_device, out_device).apply((x,))
return y
| 3,635
| 32.981308
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/array/get_item.py
|
import numpy
import chainer
from chainer import backend
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
from chainer import variable
import chainerx
_numpy_supports_0d_bool_index = \
numpy.lib.NumpyVersion(numpy.__version__) >= '1.13.0'
class GetItem(function_node.FunctionNode):
"""Function that slices array and extract elements."""
def __init__(self, slices):
if isinstance(slices, list):
if all([isinstance(s, int) for s in slices]):
slices = slices,
slices = tuple(slices)
elif not isinstance(slices, tuple):
slices = slices,
if chainer.is_debug():
n_ellipses = 0
for s in slices:
if s is Ellipsis:
n_ellipses += 1
if n_ellipses > 1:
raise ValueError('Only one Ellipsis is allowed')
self.slices = slices
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
def forward(self, xs):
slices = tuple([
backend.from_chx(s) if isinstance(s, chainerx.ndarray) else s
for s in self.slices])
return utils.force_array(xs[0][slices]),
def backward(self, indexes, gy):
return GetItemGrad(
self.slices, self.inputs[0].shape).apply(gy)
class GetItemGrad(function_node.FunctionNode):
def __init__(self, slices, in_shape):
self.slices = slices
self._in_shape = in_shape
def forward(self, inputs):
slices = tuple([
backend.from_chx(s) if isinstance(s, chainerx.ndarray) else s
for s in self.slices])
gy, = inputs
xp = backend.get_array_module(*inputs)
gx = xp.zeros(self._in_shape, gy.dtype)
if xp is numpy:
try:
numpy.add.at(gx, slices, gy)
except IndexError:
done = False
# In numpy<1.13, 0-dim boolean index is not supported in
# numpy.add.at and it's supported for 0-dim arr in
# arr.__getitem__.
if not _numpy_supports_0d_bool_index and len(slices) == 1:
idx = numpy.asanyarray(slices[0])
if idx.dtype == numpy.dtype(bool):
# Convert the array and the mask to 1-dim.
# numpy.add.at with them is supported in older numpy.
numpy.add.at(gx[None], idx[None], gy)
done = True
if not done:
msg = '''
GetItem does not support backward for this slices. The slices argument is not
supported by numpy.add.at, while it is supported by numpy.ndarray.__getitem__.
Please report this error to the issue tracker with the stack trace,
the information of your environment, and your script:
https://github.com/chainer/chainer/issues/new.
'''
raise IndexError(msg)
else:
gx.scatter_add(slices, inputs[0])
return gx,
def backward(self, indexes, ggx):
return GetItem(self.slices).apply(ggx)
def get_item(x, slices):
"""Extract elements from array with specified shape, axes and offsets.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
A variable to be sliced.
slices (int, slice, Ellipsis, None, integer array-like, boolean\
array-like or tuple of them):
An object to specify the selection of elements.
Returns:
A :class:`~chainer.Variable` object which contains sliced array of
``x``.
.. note::
It only supports types that are supported by CUDA's atomicAdd when
an integer array is included in ``slices``.
The supported types are ``numpy.float32``, ``numpy.int32``,
``numpy.uint32``, ``numpy.uint64`` and ``numpy.ulonglong``.
.. note::
It does not support ``slices`` that contains multiple boolean arrays.
.. note::
See NumPy documentation for details of `indexing
<https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html>`_.
.. admonition:: Example
>>> x = np.arange(12).reshape((2, 2, 3))
>>> x
array([[[ 0, 1, 2],
[ 3, 4, 5]],
<BLANKLINE>
[[ 6, 7, 8],
[ 9, 10, 11]]])
>>> F.get_item(x, 0)
variable([[0, 1, 2],
[3, 4, 5]])
>>> F.get_item(x, (0, 0, slice(0, 2, 1))) # equals x[0, 0, 0:2:1]
variable([0, 1])
>>> F.get_item(x, (Ellipsis, 2)) # equals x[..., 2]
variable([[ 2, 5],
[ 8, 11]])
>>> F.get_item(x, (1, np.newaxis, 1, 0)) # equals x[1, None, 1, 0]
variable([9])
"""
return GetItem(slices).apply((x,))[0]
def install_variable_get_item():
variable.Variable.__getitem__ = get_item
| 4,935
| 30.641026
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/array/flipud.py
|
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class FlipUD(function_node.FunctionNode):
"""Flip array in the up/down direction."""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('a',))
a_type = in_types[0]
type_check.expect(
a_type.dtype.kind == 'f',
a_type.ndim >= 1
)
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
return xp.flipud(inputs[0]),
def backward(self, indexes, grad_outputs):
return FlipUD().apply(grad_outputs)
def flipud(a):
"""Flip array in the up/down direction.
Args:
a (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return FlipUD().apply((a,))[0]
| 872
| 21.973684
| 73
|
py
|
chainer
|
chainer-master/chainer/functions/array/spatial_transformer_sampler.py
|
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function
from chainer.utils import argument
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.libcudnn
_sampler_type = cuda.libcudnn.CUDNN_SAMPLER_BILINEAR
class SpatialTransformerSampler(function.Function):
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 == n_in)
x_type = in_types[0]
grid_type = in_types[1]
type_check.expect(
x_type.dtype.kind == 'f',
grid_type.dtype == x_type.dtype,
x_type.ndim == 4,
grid_type.ndim == 4,
grid_type.shape[1] == 2,
x_type.shape[0] == grid_type.shape[0],
)
def forward_cpu(self, inputs):
return self._forward(inputs)
def forward_gpu(self, inputs):
if not chainer.should_use_cudnn('>=auto', 5000):
return self._forward(inputs)
x, grid = inputs
out_shape = x.shape[:2] + grid.shape[2:]
y = cuda.cupy.empty(out_shape, dtype=x.dtype)
shape = numpy.array(out_shape, dtype=numpy.int32)
x = cuda.cupy.ascontiguousarray(x)
grid_t = cuda.cupy.transpose(grid, (0, 2, 3, 1))
grid_t = cuda.cupy.ascontiguousarray(grid_t)
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(x)
y_desc = cudnn.create_tensor_descriptor(y)
self.st_desc =\
cuda.cupy.cudnn.create_spatial_transformer_descriptor(
_sampler_type, grid.dtype, len(shape), shape.ctypes.data)
dtype = numpy.float64 if x.dtype == numpy.float64 else numpy.float32
one = numpy.array(1, dtype=dtype).ctypes
zero = numpy.array(0, dtype=dtype).ctypes
libcudnn.spatialTfSamplerForward(
handle, self.st_desc.value, one.data,
x_desc.value, x.data.ptr, grid_t.data.ptr, zero.data,
y_desc.value, y.data.ptr)
return y,
def _forward(self, inputs):
x, grid = inputs
xp = backend.get_array_module(x)
B, C, H, W = x.shape
_, _, out_H, out_W = grid.shape
grid = grid.reshape(grid.shape[:2] + (-1,))
u = grid[:, 0]
v = grid[:, 1]
# Pad the image so that pixels locating outside of the original
# image's size can be sampled.
x_pad = xp.pad(x, ((0, 0), (0, 0), (1, 1), (1, 1)), mode='constant')
# Rescale coordinates from [-1, 1] to [0, width or height - 1],
# and adjust them to the padded image.
u = (u + 1) * (W - 1) / 2 + 1
v = (v + 1) * (H - 1) / 2 + 1
u_clipped = u.clip(0, W + 1)
v_clipped = v.clip(0, H + 1)
# indices of the 2x2 pixel neighborhood surrounding the coordinates
u0 = xp.floor(u_clipped).astype(numpy.int32)
u0 = u0.clip(0, W)
u1 = u0 + 1
v0 = xp.floor(v_clipped).astype(numpy.int32)
v0 = v0.clip(0, H)
v1 = v0 + 1
# weights
w1 = (u1 - u_clipped) * (v1 - v_clipped)
w2 = (u_clipped - u0) * (v1 - v_clipped)
w3 = (u1 - u_clipped) * (v_clipped - v0)
w4 = (u_clipped - u0) * (v_clipped - v0)
w1 = w1.astype(x_pad.dtype, copy=False)
w2 = w2.astype(x_pad.dtype, copy=False)
w3 = w3.astype(x_pad.dtype, copy=False)
w4 = w4.astype(x_pad.dtype, copy=False)
x_indexed_1 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v0[b], u0[b]], axis=0) for b in range(B)], axis=0)
x_indexed_2 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v0[b], u1[b]], axis=0) for b in range(B)], axis=0)
x_indexed_3 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v1[b], u0[b]], axis=0) for b in range(B)], axis=0)
x_indexed_4 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v1[b], u1[b]], axis=0) for b in range(B)], axis=0)
y = w1[:, :, None] * x_indexed_1
y += w2[:, :, None] * x_indexed_2
y += w3[:, :, None] * x_indexed_3
y += w4[:, :, None] * x_indexed_4
y = y.reshape(B, out_H, out_W, C).transpose(0, 3, 1, 2)
return y,
def backward_cpu(self, inputs, grad_outputs):
return self._backward(inputs, grad_outputs)
def backward_gpu(self, inputs, grad_outputs):
if not chainer.should_use_cudnn('>=auto', 5000):
return self._backward(inputs, grad_outputs)
x, grid = inputs
gy, = grad_outputs
grid_t = cuda.cupy.transpose(grid, (0, 2, 3, 1))
grid_t = cuda.cupy.ascontiguousarray(grid_t)
x = cuda.cupy.ascontiguousarray(x)
gy = cuda.cupy.ascontiguousarray(gy)
gx = cuda.cupy.empty_like(x)
ggrid_t = cuda.cupy.empty_like(grid_t)
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(x)
dx_desc = cudnn.create_tensor_descriptor(gx)
dy_desc = cudnn.create_tensor_descriptor(gy)
dtype = numpy.float64 if x.dtype == numpy.float64 else numpy.float32
one = numpy.array(1, dtype=dtype).ctypes
zero = numpy.array(0, dtype=dtype).ctypes
libcudnn.spatialTfSamplerBackward(
handle, self.st_desc.value,
one.data,
x_desc.value, x.data.ptr,
zero.data,
dx_desc.value, gx.data.ptr,
one.data,
dy_desc.value, gy.data.ptr,
grid_t.data.ptr, zero.data, ggrid_t.data.ptr)
ggrid = cuda.cupy.transpose(ggrid_t, axes=(0, 3, 1, 2))
return gx, ggrid
def _backward(self, inputs, grad_outputs):
x, grid = inputs
xp = backend.get_array_module(x)
gy, = grad_outputs
B, C, H, W = x.shape
_, _, out_H, out_W = grid.shape
grid = grid.reshape(grid.shape[:2] + (-1,))
u = grid[:, 0]
v = grid[:, 1]
# Pad the image so that points locating outside of the original
# image's size can be sampled.
x_pad = xp.pad(x, ((0, 0), (0, 0), (1, 1), (1, 1)), mode='constant')
# Rescale coordinates from [-1, 1] to [0, width or height - 1],
# and adjust them to the padded image.
u = (u + 1) * (W - 1) / 2 + 1
v = (v + 1) * (H - 1) / 2 + 1
u_clipped = u.clip(0, W + 1)
v_clipped = v.clip(0, H + 1)
# indices of the 2x2 pixel neighborhood surrounding the coordinates
u0 = xp.floor(u_clipped).astype(numpy.int32)
u0 = u0.clip(0, W)
u1 = u0 + 1
v0 = xp.floor(v_clipped).astype(numpy.int32)
v0 = v0.clip(0, H)
v1 = v0 + 1
# weights
wu0 = u_clipped - u0
wu1 = u1 - u_clipped
wv0 = v_clipped - v0
wv1 = v1 - v_clipped
wu0 = wu0.astype(gy.dtype, copy=False)
wu1 = wu1.astype(gy.dtype, copy=False)
wv0 = wv0.astype(gy.dtype, copy=False)
wv1 = wv1.astype(gy.dtype, copy=False)
# --- gu, gv
x_indexed_1 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v0[b], u0[b]], axis=0) for b in range(B)], axis=0)
x_indexed_2 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v0[b], u1[b]], axis=0) for b in range(B)], axis=0)
x_indexed_3 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v1[b], u0[b]], axis=0) for b in range(B)], axis=0)
x_indexed_4 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v1[b], u1[b]], axis=0) for b in range(B)], axis=0)
gu = -wv1[:, :, None] * x_indexed_1
gu += wv1[:, :, None] * x_indexed_2
gu -= wv0[:, :, None] * x_indexed_3
gu += wv0[:, :, None] * x_indexed_4
gv = -wu1[:, :, None] * x_indexed_1
gv -= wu0[:, :, None] * x_indexed_2
gv += wu1[:, :, None] * x_indexed_3
gv += wu0[:, :, None] * x_indexed_4
gu = gu.reshape(B, out_H, out_W, C).transpose(0, 3, 1, 2)
gv = gv.reshape(B, out_H, out_W, C).transpose(0, 3, 1, 2)
gu *= gy
gv *= gy
gu = xp.sum(gu, axis=1)
gv = xp.sum(gv, axis=1)
# Offsets scaling of the coordinates and clip gradients.
u_reshaped = u.reshape(gu.shape)
v_reshaped = v.reshape(gv.shape)
gu = gu / 2. * (W - 1) * (u_reshaped > 0) * (u_reshaped < (W + 1))
gv = gv / 2. * (H - 1) * (v_reshaped > 0) * (v_reshaped < (H + 1))
ggrid = xp.concatenate((gu[:, None], gv[:, None]), axis=1)
# --- gx
if xp is numpy:
scatter_add = numpy.add.at
else:
scatter_add = cuda.cupyx.scatter_add
gx = xp.zeros_like(x_pad)
gy = gy.reshape(B, C, -1)
for b in range(B):
scatter_add(gx[b], (slice(None), v0[b], u0[b]),
gy[b] * wu1[b] * wv1[b])
scatter_add(gx[b], (slice(None), v0[b], u1[b]),
gy[b] * wu0[b] * wv1[b])
scatter_add(gx[b], (slice(None), v1[b], u0[b]),
gy[b] * wu1[b] * wv0[b])
scatter_add(gx[b], (slice(None), v1[b], u1[b]),
gy[b] * wu0[b] * wv0[b])
gx = gx[:, :, 1:-1, 1:-1]
return gx, ggrid
def spatial_transformer_sampler(x, grid, **kwargs):
"""2D Spatial Transformer sampler.
This is a differentiable image sampler. With a set of sampling points
``grid`` and an input feature map ``x``, this produces a sampled output
feature map.
This function currently only supports bilinear interpolation as a sampling
kernel.
When coordinates in ``grid`` is outside range :math:`[-1, 1]`, values are
sampled from a zero padded input image.
Notation: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` is the number of the input channels.
- :math:`h` and :math:`w` are the height and width of the input image,
respectively.
- :math:`h_O` and :math:`w_O` are the height and width of the output
image.
See detail in the following paper: `Spatial Transformer Networks
<https://arxiv.org/abs/1506.02025>`_.
.. note::
cuDNN supports SpatialTransformerSampler from version 5.0.0.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable of shape :math:`(n, c_I, h, w)`.
grid (~chainer.Variable): Coordinate variable of shape
:math:`(n, 2, h_O, w_O)`. Each coordinate defines the spatial
location in the input where a sampling kernel is applied to get
the value at a particular pixel in the output.
``grid[idx, :, i, j]`` corresponds to the coordinate that is used
to sample the values for an output pixel at location
:math:`(i, j)`.
In the second dimension, the first coordinate corresponds to the
location along the horizontal axis, and the second coordinate
corresponds to the location along the vertical axis.
The coordinate :math:`(-1, -1)` corresponds to the upper-left
corner of the input image.
Returns:
~chainer.Variable: Output feature map of shape \
:math:`(n, c_I, h_O, w_O)`.
"""
if kwargs:
argument.check_unexpected_kwargs(
kwargs, use_cudnn='The argument "use_cudnn" is not '
'supported anymore. '
'Use chainer.using_config(\'use_cudnn\', value) '
'context where value can be `always`, `never`, or `auto`.')
argument.assert_kwargs_empty(kwargs)
return SpatialTransformerSampler()(x, grid)
| 11,635
| 35.939683
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/array/as_strided.py
|
import numpy as np
import six
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
index_dtype = {t().itemsize: t for t in np.sctypes['int']}
def _byte2step(iterable, itemsize):
for i in iterable:
assert i % itemsize == 0
return tuple([i // itemsize for i in iterable])
def _step2byte(iterable, itemsize):
return tuple([i * itemsize for i in iterable])
def _maybe_overlapping_memory(shape, strides):
"""Returns bool value indicating the array with such shape and strides
might have overlapping memory.
Args:
shape (tuple of int): The shape of output.
strides (tuple of int): The strides of output, given in the unit of steps.
storage_offset (int):
The offset between the head of allocated memory and the pointer of
first element, given in the unit of steps.
Returns:
bool: Existence of the overlapping memory
"""
max_ptr_in_slice = 0
for stride, size in sorted(zip([abs(s) for s in strides], shape)):
if stride <= max_ptr_in_slice:
return True
max_ptr_in_slice += stride * (size - 1)
return False
def _min_index(shape, strides, storage_offset):
"""Returns the leftest index in the array (in the unit-steps)
Args:
shape (tuple of int): The shape of output.
strides (tuple of int):
The strides of output, given in the unit of steps.
storage_offset (int):
The offset between the head of allocated memory and the pointer of
first element, given in the unit of steps.
Returns:
int: The leftest pointer in the array
"""
sh_st_neg = [sh_st for sh_st in zip(shape, strides) if sh_st[1] < 0]
if not sh_st_neg:
return storage_offset
else:
return storage_offset + six.moves.reduce(
lambda base, sh_st: base + (sh_st[0] - 1) * sh_st[1], sh_st_neg, 0)
def _max_index(shape, strides, storage_offset):
"""Returns the rightest index in the array
Args:
shape (tuple of int): The shape of output.
strides (tuple of int): The strides of output, given in unit-steps.
storage_offset (int):
The offset between the head of allocated memory and the pointer of
first element, given in the unit of steps.
Returns:
int: The rightest pointer in the array
"""
sh_st_pos = [sh_st for sh_st in zip(shape, strides) if sh_st[1] > 0]
if not sh_st_pos:
return storage_offset
else:
return storage_offset + six.moves.reduce(
lambda base, sh_st: base + (sh_st[0] - 1) * sh_st[1], sh_st_pos, 0)
def _index_add(augend, indices, addend):
"""Wrapper of :func:`cupyx.scatter_add` and :func:`numpy.add.at`
Args:
augend (:class:`numpy.ndarray` or :class:`cupy.ndarray`):
The array modified in-place.
indices (:class:`numpy.ndarray` or :class:`cupy.ndarray`):
The indices of ``augend``. The shape is the same to the ``addend``.
addend (:class:`numpy.ndarray` or :class:`cupy.ndarray`):
The array to be added.
Returns:
None
"""
if isinstance(augend, cuda.ndarray):
cuda.cupyx.scatter_add(augend, indices, addend)
elif isinstance(augend, np.ndarray):
np.add.at(augend, indices, addend)
def _get_base_array(array):
"""Get the founder of :class:`numpy.ndarray`.
Args:
array (:class:`numpy.ndarray`):
The view of the base array.
Returns:
:class:`numpy.ndarray`:
The base array.
"""
base_array_candidate = array
while base_array_candidate.base is not None:
base_array_candidate = base_array_candidate.base
return base_array_candidate
def _stride_array(array, shape, strides, storage_offset):
"""Wrapper of :func:`numpy.lib.stride_tricks.as_strided`.
.. note:
``strides`` and ``storage_offset`` is given in the unit of steps
instead the unit of bytes. This specification differs from that of
:func:`numpy.lib.stride_tricks.as_strided`.
Args:
array (:class:`numpy.ndarray` of :class:`cupy.ndarray`):
The base array for the returned view.
shape (tuple of int):
The shape of the returned view.
strides (tuple of int):
The strides of the returned view, given in the unit of steps.
storage_offset (int):
The offset from the leftest pointer of allocated memory to
the first element of returned view, given in the unit of steps.
Returns:
:class:`numpy.ndarray` or :class:`cupy.ndarray`:
The new view for the base array.
"""
min_index = _min_index(shape, strides, storage_offset)
max_index = _max_index(shape, strides, storage_offset)
strides = _step2byte(strides, array.itemsize)
storage_offset, = _step2byte((storage_offset,), array.itemsize)
if min_index < 0:
raise ValueError('Out of buffer: too small index was specified')
if isinstance(array, cuda.ndarray):
pooled_memory = array.data.mem
if (max_index + 1) * array.itemsize > pooled_memory.size:
raise ValueError('Out of buffer: too large index was specified')
memptr = cuda.cupy.cuda.memory.MemoryPointer(pooled_memory,
storage_offset)
return cuda.cupy.ndarray(shape, array.dtype, memptr, strides)
elif isinstance(array, np.ndarray):
base_array = _get_base_array(array)
if (max_index + 1) * base_array.itemsize > base_array.nbytes:
raise ValueError('Out of buffer: too large index was specified')
return np.ndarray(shape, base_array.dtype, base_array.data,
storage_offset, strides)
else:
raise TypeError('Only (np|cp).ndarray is accepted')
class TensorGeometry(object):
def __init__(self, array):
self.shape = array.shape
self.strides = _byte2step(array.strides, array.itemsize)
if isinstance(array, np.ndarray):
base_array = _get_base_array(array)
array_ptr = array.__array_interface__['data'][0]
base_array_ptr = base_array.__array_interface__['data'][0]
offset_bytes = array_ptr - base_array_ptr
elif isinstance(array, cuda.ndarray):
offset_bytes = array.data.ptr - array.data.mem.ptr
else:
raise ValueError('only (np|cp).ndarray is supported')
self.storage_offset, = _byte2step((offset_bytes,), array.itemsize)
self.itemsize = array.itemsize
@property
def ndim(self):
return len(self.shape)
class AsStrided(function_node.FunctionNode):
"""Transportation of :func:`torch.Tensor.as_strided`.
While :func:`torch.Tensor.as_strided` does not support nagative strides,
this implementation does support it.
"""
def __init__(self, shape, strides, storage_offset=None):
self.shape = shape
self.strides = strides
self.storage_offset = storage_offset
self.input_geometry = None
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
def forward(self, inputs):
assert len(inputs) > 0
x = inputs[0]
self.input_geometry = TensorGeometry(x)
if self.storage_offset is None:
self.storage_offset = self.input_geometry.storage_offset
return _stride_array(x, self.shape, self.strides, self.storage_offset),
def backward(self, _, grad_outputs):
"""Backward computation which calls :class:`AsStridedGrad`.
.. note:
While this implementation is based on *New-Style Function
Implementation*, the backward computation does not support
double-backpropagation due to *layout agnostic* algorithm (
originally named in the note of pytorch).
"""
return AsStridedGrad(self.input_geometry, self.shape, self.strides,
self.storage_offset).apply(grad_outputs)
class AsStridedGrad(function_node.FunctionNode):
"""Backward of :func:`~chainer.functions.as_strided`.
"""
def __init__(self, input_geometry, shape, strides, storage_offset):
self.input_geometry = input_geometry
self.shape = shape
self.strides = strides
self.storage_offset = storage_offset
def forward(self, grads):
assert len(grads) > 0
gy = grads[0]
if gy.dtype not in np.sctypes['float']:
raise TypeError('Only float is supported for back propagation')
xp = backend.get_array_module(gy)
input_geometry = self.input_geometry
itemsize = input_geometry.itemsize
if 0 in input_geometry.shape:
return xp.zeros(input_geometry.shape)
# 1. remove redundant axis from input/output
# [redundant axis]
# axis with shape==0, shape==1 or strides==0
if 0 in gy.shape:
return backend.get_array_module(gy).zeros(input_geometry.shape)
else:
out_shape = tuple([
self.shape[i] for i in six.moves.range(gy.ndim)
if self.shape[i] != 1 and self.strides[i] != 0])
out_strides = tuple([
self.strides[i] for i in six.moves.range(gy.ndim)
if self.shape[i] != 1 and self.strides[i] != 0])
gy = gy.sum(
tuple([i for i in six.moves.range(gy.ndim)
if self.strides[i] == 0]))
gy = gy.squeeze()
out_storage_offset = self.storage_offset
inp_shape = tuple([input_geometry.shape[i]
for i in six.moves.range(input_geometry.ndim)
if input_geometry.shape[i] != 1])
inp_strides = tuple([input_geometry.strides[i]
for i in six.moves.range(input_geometry.ndim)
if input_geometry.shape[i] != 1])
inp_storage_offset = input_geometry.storage_offset
# 2. calculate minimum required storage for gradient computation
inp_min_ptr = _min_index(inp_shape, inp_strides,
input_geometry.storage_offset)
out_min_ptr = _min_index(out_shape, out_strides, self.storage_offset)
common_min_ptr = min(inp_min_ptr, out_min_ptr)
inp_max_ptr = _max_index(inp_shape, inp_strides,
input_geometry.storage_offset)
out_max_ptr = _max_index(out_shape, out_strides, self.storage_offset)
common_max_ptr = max(inp_max_ptr, out_max_ptr)
base_size = (common_max_ptr - common_min_ptr) + 1
storage = xp.zeros(base_size, dtype=gy.dtype)
flatten_full_indices = xp.arange(base_size,
dtype=index_dtype[itemsize])
out_maybe_overlap = _maybe_overlapping_memory(out_shape, out_strides)
if out_maybe_overlap:
out_indices = _stride_array(flatten_full_indices, out_shape,
out_strides,
out_storage_offset - common_min_ptr)
_index_add(storage, out_indices, gy)
else:
storage_view = _stride_array(storage, out_shape, out_strides,
out_storage_offset - common_min_ptr)
storage_view[:] = gy[:]
inp_maybe_overlap = _maybe_overlapping_memory(inp_shape, inp_strides)
if inp_maybe_overlap:
count = xp.zeros_like(storage)
inp_indices = _stride_array(flatten_full_indices, inp_shape,
inp_strides,
inp_storage_offset - common_min_ptr)
_index_add(count, inp_indices, xp.ones(1))
with np.errstate(divide='ignore', invalid='ignore'):
storage /= count
return _stride_array(storage, inp_shape, inp_strides,
inp_storage_offset - common_min_ptr),
def backward(self, target_input_indexes, grad_outputs):
raise NotImplementedError
def as_strided(x, shape, strides, storage_offset=None):
"""Create a new view of array with the given shape, strides, and offset.
Args:
x (tuple of :class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
The array pointing a memory buffer. Its view is totally ignored.
shape (tuple of int):
The shape of output.
strides (tuple of int):
The strides of output, given in the unit of steps.
storage_offset (int):
The offset between the head of allocated memory and the pointer of
first element, given in the unit of steps.
Returns:
~chainer.Variable: The strided variable.
.. warning::
Users should be aware that this function potentially causes unintended
side effects. See `numpy.lib.stride_tricks.as_strided`_ for the detail.
.. note::
The backward algorithm is borrowed from `torch.Tensor.as_strided`.
Therefore, the returned gradient of ``backward`` is *layout-agnostic*
when ``x`` contains memory overlap. See notes in pytorch's source
code (as_strided Backward and layout-aware/agnostic autograd) too.
.. note::
In this function ``strides`` and ``storage_offset`` are given in the
unit of steps instead of bytes. This specification differs from
:func:`numpy.lib.stride_tricks.as_strided`.
.. admonition:: Example
>>> from chainer import functions as F, Variable
>>> x = Variable(np.arange(4, dtype=np.float32))
>>> x
variable([0., 1., 2., 3.])
>>> y = F.as_strided(x, (3, 2), (1, 1), 0)
>>> y
variable([[0., 1.],
[1., 2.],
[2., 3.]])
>>> y.grad = np.ones((3, 2), dtype=np.float32)
>>> y.backward()
>>> x.grad
array([1., 2., 2., 1.], dtype=float32)
.. _numpy.lib.stride_tricks.as_strided:
https://docs.scipy.org/doc/numpy/reference/generated/\
numpy.lib.stride_tricks.as_strided.html
"""
return AsStrided(shape, strides, storage_offset).apply((x,))[0]
| 14,427
| 35.994872
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/array/diagonal.py
|
import numpy
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class Diagonal(function_node.FunctionNode):
def __init__(self, offset, axis1, axis2):
self.offset = offset
self.axis1 = axis1
self.axis2 = axis2
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
in_type = in_types[0]
type_check.expect(max(self.axis1, self.axis2) < in_type.ndim)
type_check.expect(-in_type.ndim <= min(self.axis1, self.axis2))
def forward(self, inputs):
x, = inputs
self._in_shape = x.shape
y = x.diagonal(offset=self.offset, axis1=self.axis1, axis2=self.axis2)
return y,
def backward(self, indexes, grad_outputs):
return DiagonalGrad(
self._in_shape, self.offset, self.axis1, self.axis2
).apply(grad_outputs)
class DiagonalGrad(function_node.FunctionNode):
def __init__(self, out_shape, offset, axis1, axis2):
self.out_shape = out_shape
self.offset = offset
self.axis1 = axis1
self.axis2 = axis2
def forward(self, inputs):
x, = inputs
xp = backend.get_array_module(x)
y = xp.zeros(self.out_shape, x.dtype)
y_diag = y.diagonal(
offset=self.offset, axis1=self.axis1, axis2=self.axis2)
if xp is numpy:
y_diag.setflags(write=True)
y_diag[...] = x
return y,
def backward(self, indexes, grad_outputs):
return Diagonal(self.offset, self.axis1, self.axis2).apply(
grad_outputs)
def diagonal(x, offset=0, axis1=0, axis2=1):
"""Take diagonal
Axes other than ``axis1`` and ``axis2`` are regarded as batch dimensions.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
A variable to be sliced.
offset (int): Offset from the principal diagonal. An upper diagonal
matrix can have nonzero diagonals with nonnegative offsets.
axis1 (int): First axis (that has row indices) of matrix
axis2 (int): Second axis (that has column indices) of matrix
Returns:
~chainer.Variable: (Batched) diagonal vectors
.. admonition:: Example
>>> x = chainer.Variable(np.arange(9).reshape(3, 3).astype(np.float32))
>>> x
variable([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]])
>>> chainer.functions.diagonal(x, offset=1)
variable([1., 5.])
"""
return Diagonal(offset, axis1, axis2).apply((x,))[0]
| 2,584
| 29.411765
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/array/fliplr.py
|
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class FlipLR(function_node.FunctionNode):
"""Flip array in the left/right direction."""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('a',))
a_type = in_types[0]
type_check.expect(
a_type.dtype.kind == 'f',
a_type.ndim >= 2
)
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
return xp.fliplr(inputs[0]),
def backward(self, indexes, grad_outputs):
return FlipLR().apply(grad_outputs)
def fliplr(a):
"""Flip array in the left/right direction.
Args:
a (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return FlipLR().apply((a,))[0]
| 878
| 22.131579
| 73
|
py
|
chainer
|
chainer-master/chainer/functions/array/moveaxis.py
|
import six
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
def _normalize_axis_tuple(axis, ndim):
ret = []
for ax in axis:
ret.append(ax % ndim)
return ret
def _moveaxis(a, source, destination, xp):
if hasattr(xp, 'moveaxis'):
return xp.moveaxis(a, source, destination)
if not all(isinstance(axis, six.integer_types) for axis in source):
raise TypeError('int or tuple of int are required.')
if not all(isinstance(axis, six.integer_types) for axis in destination):
raise TypeError('int or tuple of int are required.')
if len(source) != len(destination):
raise ValueError('Length of source and destination are '
'different.')
source = _normalize_axis_tuple(source, a.ndim)
destination = _normalize_axis_tuple(destination, a.ndim)
if len(set(source)) != len(source):
raise ValueError('duplicate value in source axis: ({})'.format(
', '.join(map(str, source))))
if len(set(destination)) != len(destination):
raise ValueError('duplicate value in destination axis: ({})'
.format(', '.join(map(str, destination))))
order = [n for n in six.moves.range(a.ndim) if n not in source]
for dest, src in sorted(six.moves.zip(destination, source)):
order.insert(dest, src)
result = a.transpose(order)
return result
class Moveaxis(function_node.FunctionNode):
"""Move axis of an array."""
def __init__(self, source, destination):
if isinstance(source, int):
self.source = (source,)
else:
self.source = source
if isinstance(destination, int):
self.destination = (destination,)
else:
self.destination = destination
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(
in_types[0].dtype.kind == 'f',
)
if self.source is not None:
for axis in self.source:
if axis >= 0:
type_check.expect(
axis < in_types[0].ndim,
)
else:
type_check.expect(
-axis - 1 < in_types[0].ndim,
)
if self.destination is not None:
for axis in self.destination:
if axis >= 0:
type_check.expect(
axis < in_types[0].ndim,
)
else:
type_check.expect(
-axis - 1 < in_types[0].ndim,
)
def forward(self, inputs):
self.retain_inputs(())
self._in_ndim = inputs[0].ndim
xp = backend.get_array_module(*inputs)
return _moveaxis(inputs[0], self.source, self.destination, xp),
def backward(self, indexes, gy):
return Moveaxis(self.destination, self.source).apply(gy)
def moveaxis(x, source, destination):
"""Move the source axes to the destination.
This function transpose the input ``x`` by moving
the axes ``source`` to the axes ``destination``.
Other axes remain in their original order.
See also :func:`chainer.functions.transpose`,
:func:`chainer.functions.swapaxes`.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
source (int or tuple of int):
Original positions of the axes to move. These must be unique.
destination (int or tuple of int):
Destination positions for each of the original axes.
These must also be unique.
Returns:
~chainer.Variable: Variable whose axis is moved.
.. admonition:: Example
>>> x = np.zeros((2, 3, 4, 5), np.float32)
>>> chainer.functions.moveaxis(x, 0, -1).shape
(3, 4, 5, 2)
>>> chainer.functions.moveaxis(x, (0, 3), (2, 0)).shape
(5, 3, 2, 4)
"""
return Moveaxis(source, destination).apply((x,))[0]
| 4,098
| 31.023438
| 76
|
py
|
chainer
|
chainer-master/chainer/functions/array/scatter_add.py
|
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
import chainerx
class ScatterAdd(function_node.FunctionNode):
def __init__(self, slices):
if isinstance(slices, list):
if all([isinstance(s, int) for s in slices]):
slices = slices,
slices = tuple(slices)
elif not isinstance(slices, tuple):
slices = slices,
if chainer.is_debug():
n_ellipses = 0
for s in slices:
if s is Ellipsis:
n_ellipses += 1
if n_ellipses > 1:
raise ValueError('Only one Ellipsis is allowed')
self.slices = slices
def check_type_forward(self, in_types):
type_check._argname(in_types, ('a', 'b'))
n_nones = len([item for item in self.slices if item is None])
valid_slice = len(self.slices) - n_nones
type_check.expect(in_types[0].ndim >= valid_slice)
def forward(self, xs):
a = xs[0]
b = xs[1]
y = a.copy()
xp = backend.get_array_module(a)
slices = tuple([
backend.from_chx(s) if isinstance(s, chainerx.ndarray) else s
for s in self.slices])
if y[slices].shape != b.shape:
raise ValueError(
'Chainer does not support automatic broadcasting '
'of variables.')
if xp is numpy:
numpy.add.at(y, slices, b),
else:
cuda.cupyx.scatter_add(y, slices, b),
return y,
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
ret = []
if 0 in indexes:
ret.append(gy)
if 1 in indexes:
ret.append(gy[self.slices])
return ret
def scatter_add(a, slices, b):
"""Adds given values to specified elements of an array.
This function adds ``b`` to the specified elements of the copy of
``a``, and returns the copy.
The value of the original ``a`` is not changed.
Args:
a (:class:`~chainer.Variable` or :ref:`ndarray`): A variable.
slices (int, slice, Ellipsis, None, integer array-like, boolean\
array-like or tuple of them):
It is an integer, a slice, an ellipsis,
a numpy.newaxis, an integer array-like, a boolean array-like
or tuple of them.
b (:class:`~chainer.Variable` or :ref:`ndarray`):
A variable that is scatter added to ``a``.
Its shape has to equal ``a[slices]`` because broadcasting
of variables is not supported.
Returns:
A :class:`~chainer.Variable` object which is the result of
scatter addition.
.. note::
It only supports types that are supported by CUDA's atomicAdd when
an integer array is included in ``slices``.
The supported types are ``numpy.float32``, ``numpy.int32``,
``numpy.uint32``, ``numpy.uint64`` and ``numpy.ulonglong``.
.. note::
It does not support ``slices`` that contains multiple boolean arrays.
.. seealso::
:func:`numpy.add.at` and
:func:`cupyx.scatter_add`.
"""
y, = ScatterAdd(slices).apply((a, b))
return y
| 3,309
| 29.648148
| 77
|
py
|
chainer
|
chainer-master/chainer/functions/array/tile.py
|
import six
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class Tile(function_node.FunctionNode):
"""Tiling of an array."""
def __init__(self, reps):
if isinstance(reps, six.integer_types):
self.reps = (reps,)
elif isinstance(reps, tuple) and all(
isinstance(x, six.integer_types) for x in reps):
self.reps = reps
else:
msg = 'reps must be int or tuple of ints.\n' \
'Actual: {0}'.format(type(reps))
raise TypeError(msg)
if not all(x >= 0 for x in self.reps):
raise ValueError('All elements in reps must be zero or larger')
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
def forward(self, inputs):
self._in_shape = inputs[0].shape
xp = backend.get_array_module(*inputs)
return xp.tile(inputs[0], self.reps),
def backward(self, indexes, grad_outputs):
reps = self.reps
shape = tuple(self._in_shape)
ndim = len(shape)
# Ensure input and reps have the same length.
if ndim > len(reps):
reps = (1,) * (ndim - len(reps)) + reps
elif ndim < len(reps):
shape = (1,) * (len(reps) - ndim) + shape
gy, = grad_outputs
# Reshape so that base axis and reps axis can be distinguished.
new_shape = []
for i in range(gy.ndim):
new_shape.append(reps[i])
new_shape.append(shape[i])
new_shape = tuple(new_shape)
# Sum along reps axis
reps_axis = tuple(range(0, 2 * gy.ndim, 2))
gy = gy.reshape(new_shape)
gy = chainer.functions.sum(gy, axis=reps_axis)
if ndim < len(reps):
return gy.reshape(self._in_shape),
else:
return gy,
def tile(x, reps):
"""Construct an array by tiling a given array.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. Let the length of ``reps`` be ``d``. If
``x.ndim < d``, ``x`` is treated as ``d``-dimensional array by
prepending new axes. For example, when the shape of ``x`` is
``(2,)`` and tiled with 2-dim repetitions, ``x`` is treated as the
shape ``(1, 2)``. If ``x.ndim > d``, ``reps`` is treated as
``x.ndim``-dimensional by pre-pending 1's. For example, when the
shape of ``x`` is ``(2, 3, 2, 3)``, the 2-dim ``reps`` of
``(2, 2)`` is treated as ``(1, 1, 2, 2)``.
reps (:class:`int` or :class:`tuple` of :class:`int` s):
The number of times which ``x`` is replicated along each axis.
Returns:
~chainer.Variable: The tiled output Variable.
Let the length of ``reps`` be ``d``, the output has the dimension of
``max(d, x.ndim)``.
.. admonition:: Example
>>> x = np.array([0, 1, 2])
>>> x.shape
(3,)
>>> y = F.tile(x, 2)
>>> y.shape
(6,)
>>> y.array
array([0, 1, 2, 0, 1, 2])
>>> y = F.tile(x, (2, 2))
>>> y.shape
(2, 6)
>>> y.array
array([[0, 1, 2, 0, 1, 2],
[0, 1, 2, 0, 1, 2]])
>>> y = F.tile(x, (2, 1, 2))
>>> y.shape
(2, 1, 6)
>>> y.array
array([[[0, 1, 2, 0, 1, 2]],
<BLANKLINE>
[[0, 1, 2, 0, 1, 2]]])
>>> x = np.array([[1, 2], [3, 4]])
>>> x.shape
(2, 2)
>>> y = F.tile(x, 2)
>>> y.shape
(2, 4)
>>> y.array
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> y = F.tile(x, (2, 2))
>>> y.shape
(4, 4)
>>> y.array
array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> y = F.tile(x, (2, 1, 2))
>>> y.shape
(2, 2, 4)
>>> y.array
array([[[1, 2, 1, 2],
[3, 4, 3, 4]],
<BLANKLINE>
[[1, 2, 1, 2],
[3, 4, 3, 4]]])
"""
return Tile(reps).apply((x,))[0]
| 4,207
| 28.843972
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/array/cast.py
|
import numpy
import chainer
from chainer import function_node
from chainer.utils import type_check
class Cast(function_node.FunctionNode):
"""Cast function."""
def __init__(self, typ):
self.type = typ
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
def forward(self, x):
self._in_type = x[0].dtype.type
return x[0].astype(self.type, copy=False),
def backward(self, indexes, g):
if numpy.dtype(self._in_type).kind != 'f':
gx = None
else:
gx = cast(g[0], self._in_type)
return gx,
def cast(x, typ):
"""Cast an input variable to a given type.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable to be casted. A \
:math:`(s_1, s_2, ..., s_N)`-shaped array.
typ (:class:`str` of dtype or :class:`numpy.dtype`):
Typecode or data type to cast.
Returns:
~chainer.Variable: Variable holding a casted array.
.. admonition:: Example
>>> x = np.arange(0, 3, dtype=np.float64)
>>> x.dtype
dtype('float64')
>>> y = F.cast(x, np.float32)
>>> y.dtype
dtype('float32')
>>> y = F.cast(x, 'float16')
>>> y.dtype
dtype('float16')
"""
if x.dtype == typ:
if not chainer.config.enable_backprop:
return chainer.as_variable(x)
return Cast(typ).apply((x,))[0]
| 1,478
| 23.65
| 60
|
py
|
chainer
|
chainer-master/chainer/functions/array/swapaxes.py
|
from chainer import function_node
from chainer.utils import type_check
class Swapaxes(function_node.FunctionNode):
"""Swap two axes of an array."""
def __init__(self, axis1, axis2):
self.axis1 = axis1
self.axis2 = axis2
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1,)
@property
def label(self):
return 'Swapaxes'
def forward(self, inputs):
x, = inputs
return x.swapaxes(self.axis1, self.axis2),
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
return Swapaxes(self.axis1, self.axis2).apply((gy,))
def swapaxes(x, axis1, axis2):
"""Swap two axes of a variable.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
axis1 (int): The first axis to swap.
axis2 (int): The second axis to swap.
Returns:
~chainer.Variable: Variable whose axes are swapped.
.. admonition:: Example
>>> x = np.array([[[0, 1, 2], [3, 4, 5]]], np.float32)
>>> x.shape
(1, 2, 3)
>>> y = F.swapaxes(x, axis1=0, axis2=1)
>>> y.shape
(2, 1, 3)
>>> y.array
array([[[0., 1., 2.]],
<BLANKLINE>
[[3., 4., 5.]]], dtype=float32)
"""
y, = Swapaxes(axis1, axis2).apply((x,))
return y
| 1,426
| 24.482143
| 73
|
py
|
chainer
|
chainer-master/chainer/functions/array/broadcast.py
|
import six
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
import chainerx
class Broadcast(function_node.FunctionNode):
"""Function that broadcasts given arrays."""
def check_type_forward(self, in_types):
type_check.expect(in_types.size() > 0)
shapes = [t.shape for t in in_types]
type_check.expect_broadcast_shapes(*shapes)
def forward(self, inputs):
self._xp = backend.get_array_module(*inputs)
self._in_shapes = [x.shape for x in inputs]
self._in_dtypes = [x.dtype for x in inputs]
return tuple(self._xp.broadcast_arrays(*inputs))
def backward(self, indexes, grad_outputs):
return tuple([None if grad_outputs[i] is None else
chainer.functions.sum_to(
grad_outputs[i], self.inputs[i].shape)
for i in indexes])
def broadcast(*args):
"""Broadcast given variables.
Args:
args (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variables to be broadcasted. Each dimension of the shapes \
of the input variables must have the same size.
Returns:
~chainer.Variable: :class:`~chainer.Variable` or tuple of \
:class:`~chainer.Variable` objects which are broadcasted \
from the given arguments.
.. admonition:: Example
>>> x = np.random.uniform(0, 1, (3, 2)).astype(np.float32)
>>> y = F.broadcast(x)
>>> np.all(x == y.array)
True
>>> z = np.random.uniform(0, 1, (3, 2)).astype(np.float32)
>>> y, w = F.broadcast(x, z)
>>> np.all(x == y.array) & np.all(z == w.array)
True
"""
if len(args) == 1:
return chainer.as_variable(args[0])
return Broadcast().apply(args)
class BroadcastTo(function_node.FunctionNode):
"""Function that broadcasts an array to a new shape."""
def __init__(self, shape):
self._shape = tuple(shape)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
ndim = type_check.make_variable(len(self._shape), 'len(shape)')
type_check.expect(in_types[0].ndim <= ndim)
shape = type_check.eval(in_types[0].shape)
# check the shape in inverse order
for i in six.moves.range(-1, -len(shape) - 1, -1):
if shape[i] == self._shape[i] or shape[i] == 1:
continue
expect = 'in_type[0].shape[%d] == %d' % (i, self._shape[i])
if self._shape[i] != 1:
expect += ' or in_type[0].shape[%d] == 1' % i
actual = 'in_type[0].shape: %s' % str(shape)
raise type_check.InvalidType(expect, actual)
def broadcast_to(self, inputs):
x, = inputs
return chainerx.broadcast_to(x, self.shape),
def forward(self, inputs):
x, = inputs
xp = backend.get_array_module(x)
if hasattr(xp, 'broadcast_to'):
return xp.broadcast_to(x, self._shape),
else:
# numpy 1.9 doesn't support broadcast_to method
dummy = xp.empty(self._shape)
bx, _ = xp.broadcast_arrays(x, dummy)
return bx,
def backward(self, indexes, grad_outputs):
gx, = grad_outputs
x_node, = self.inputs
return chainer.functions.sum_to(gx, x_node.shape),
def broadcast_to(x, shape):
"""Broadcast a given variable to a given shape.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable to be broadcasted. A \
:math:`(s_1, s_2, ..., s_N)`-shaped float array.
shape (tuple): Tuple of :class:`int` of the shape of the \
output variable.
Returns:
~chainer.Variable: Output variable broadcasted to the given shape.
.. admonition:: Example
>>> x = np.arange(0, 3)
>>> x
array([0, 1, 2])
>>> y = F.broadcast_to(x, (3, 3))
>>> y.array
array([[0, 1, 2],
[0, 1, 2],
[0, 1, 2]])
"""
if x.shape == shape:
return chainer.as_variable(x)
y, = BroadcastTo(shape).apply((x,))
return y
| 4,232
| 29.673913
| 77
|
py
|
chainer
|
chainer-master/chainer/functions/array/space2depth.py
|
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class Space2Depth(function_node.FunctionNode):
"""Space to depth transformation."""
def __init__(self, r):
self.r = r
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].ndim == 4
)
def forward(self, inputs):
X, = inputs
xp = backend.get_array_module(X)
bsize, c, a, b = X.shape
X = xp.reshape(
X, (bsize, c, a // self.r, self.r, b // self.r, self.r))
X = xp.transpose(X, (0, 3, 5, 1, 2, 4))
X = xp.reshape(
X, (bsize, self.r ** 2 * c, a // self.r, b // self.r))
return X,
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
gy = chainer.functions.depth2space(gy, self.r)
return gy,
def space2depth(X, r):
"""Computes the space2depth transformation for subpixel calculations.
Args:
X (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable holding a 4d array of shape
``(batch, channel, dim1 * r, dim2 * r)``.
r (int): the downscaling factor.
Returns:
~chainer.Variable:
A variable holding the downscaled layer array from subpixel array
sampling. The shape is ``(batch, channel * r * r, dim1, dim2)``.
.. note::
This can be used to compute inverse super-resolution transformations.
See https://arxiv.org/abs/1609.05158 for details.
.. seealso:: :func:`depth2space`
.. admonition:: Example
>>> X = np.arange(24).reshape(1, 1, 4, 6).astype(np.float32)
>>> X.shape
(1, 1, 4, 6)
>>> X
array([[[[ 0., 1., 2., 3., 4., 5.],
[ 6., 7., 8., 9., 10., 11.],
[12., 13., 14., 15., 16., 17.],
[18., 19., 20., 21., 22., 23.]]]], dtype=float32)
>>> y = F.space2depth(X, 2)
>>> y.shape
(1, 4, 2, 3)
>>> y.array
array([[[[ 0., 2., 4.],
[12., 14., 16.]],
<BLANKLINE>
[[ 1., 3., 5.],
[13., 15., 17.]],
<BLANKLINE>
[[ 6., 8., 10.],
[18., 20., 22.]],
<BLANKLINE>
[[ 7., 9., 11.],
[19., 21., 23.]]]], dtype=float32)
"""
return Space2Depth(r).apply((X,))[0]
| 2,545
| 28.604651
| 77
|
py
|
chainer
|
chainer-master/chainer/functions/array/reshape.py
|
import chainer
from chainer import function_node
from chainer.utils import type_check
def _count_unknown_dims(shape):
cnt = 0
for dim in shape:
cnt += dim < 0
return cnt
class Reshape(function_node.FunctionNode):
"""Reshapes an input array without copy."""
def __init__(self, shape):
self.shape = shape
self._cnt = _count_unknown_dims(shape)
assert self._cnt <= 1
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type, = in_types
if self._cnt == 0:
type_check.expect(
type_check.prod(x_type.shape) == type_check.prod(self.shape))
else:
known_size = 1
for s in self.shape:
if s > 0:
known_size *= s
size_var = type_check.make_variable(
known_size, 'known_size(=%d)' % known_size)
type_check.expect(
type_check.prod(x_type.shape) % size_var == 0)
def forward_chainerx(self, inputs):
x, = inputs
return x.reshape(self.shape),
def forward(self, inputs):
x, = inputs
return x.reshape(self.shape),
def backward(self, indexes, grad_outputs):
gx, = grad_outputs
return reshape(gx, self.inputs[0].shape),
def reshape(x, shape):
"""Reshapes an input variable without copy.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
shape (:class:`tuple` of :class:`int` s):
Expected shape of the output array. The number of elements which
the array of ``shape`` contains must be equal to that of input
array. One shape dimension can be -1. In this case, the value is
inferred from the length of the array and remaining dimensions.
Returns:
~chainer.Variable:
Variable that holds a reshaped version of the input variable.
.. seealso:: :func:`numpy.reshape`, :func:`cupy.reshape`
.. admonition:: Example
>>> x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
>>> y = F.reshape(x, (8,))
>>> y.shape
(8,)
>>> y.array
array([1, 2, 3, 4, 5, 6, 7, 8])
>>> y = F.reshape(x, (4, -1)) # the shape of output is inferred
>>> y.shape
(4, 2)
>>> y.array
array([[1, 2],
[3, 4],
[5, 6],
[7, 8]])
>>> y = F.reshape(x, (4, 3)) \
# the shape of input and output are not consistent
Traceback (most recent call last):
...
chainer.utils.type_check.InvalidType:
Invalid operation is performed in: Reshape (Forward)
<BLANKLINE>
Expect: prod(x.shape) == prod((4, 3))
Actual: 8 != 12
"""
if x.shape == shape:
return chainer.as_variable(x)
y, = Reshape(shape).apply((x,))
return y
| 2,918
| 28.19
| 77
|
py
|
chainer
|
chainer-master/chainer/functions/array/where.py
|
import numpy
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class Where(function_node.FunctionNode):
"""Choose elements depending on condition."""
def __init__(self, condition):
self.condition = condition
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 'y'))
x_type, y_type = in_types
condition = type_check._make_variable_from_array(
# allow scalar `condition`
chainer.utils.force_array(self.condition),
'condition')
type_check.expect(
condition.dtype == numpy.bool_,
x_type.dtype == y_type.dtype,
)
type_check.expect_broadcast_shapes(
condition.shape, x_type.shape, y_type.shape)
def forward(self, inputs):
# may broadcast
xp = backend.get_array_module(*inputs)
x, y = inputs
condition = self.condition
return xp.where(condition, x, y),
def backward(self, indexes, grad_outputs):
condition = self.condition
xp = backend.get_array_module(condition)
g, = grad_outputs
zero = xp.zeros((), dtype=g.dtype)
ret = []
if 0 in indexes:
gx, = Where(condition).apply((g, zero))
ret.append(chainer.functions.sum_to(gx, self.inputs[0].shape))
if 1 in indexes:
gy, = Where(condition).apply((zero, g))
ret.append(chainer.functions.sum_to(gy, self.inputs[1].shape))
return ret
def where(condition, x, y):
"""Choose elements depending on condition.
This function choose values depending on a given ``condition``.
All ``condition``, ``x``, and ``y`` must have the same shape.
Args:
condition (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable containing the condition.
A :math:`(s_1, s_2, ..., s_N)` -shaped boolean array.
Only boolean array is permitted.
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable chosen when ``condition`` is ``True``.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
y (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable chosen when ``condition`` is ``False``.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
Returns:
~chainer.Variable: Variable containing chosen values.
.. admonition:: Example
>>> cond = np.array([[1, 0], [0, 1]], dtype=np.bool)
>>> cond
array([[ True, False],
[False, True]])
>>> x = np.array([[1, 2], [3, 4]], np.float32)
>>> y = np.zeros((2, 2), np.float32)
>>> F.where(cond, x, y).array
array([[1., 0.],
[0., 4.]], dtype=float32)
"""
if isinstance(condition, chainer.Variable):
condition = condition.array
z, = Where(condition).apply((x, y))
return z
| 2,995
| 31.565217
| 74
|
py
|
chainer
|
chainer-master/chainer/functions/array/expand_dims.py
|
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class ExpandDims(function_node.FunctionNode):
"""Expands dimensions of an input array without copy."""
def __init__(self, axis):
self.axis = int(axis)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type, = in_types
if self.axis >= 0:
type_check.expect(x_type.ndim >= self.axis)
else:
type_check.expect(x_type.ndim >= -self.axis - 1)
def forward(self, inputs):
x, = inputs
xp = backend.get_array_module(x)
return xp.expand_dims(x, self.axis),
def backward(self, indexes, grad_outputs):
gx, = grad_outputs
return chainer.functions.reshape(gx, self.inputs[0].shape),
def expand_dims(x, axis):
"""Expands dimensions of an input variable without copy.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
axis (int):
Position where new axis is to be inserted. The ``axis`` parameter
is acceptable when :math:`-ndim - 1 \\leq axis \\leq ndim`.
(``ndim`` is the dimension of input variables). When
:math:`axis < 0`, the result is the same with
:math:`ndim + 1 - |axis|`.
Returns:
~chainer.Variable: Variable that holds an expanded input. The ``ndim``
of output is one greater than that of ``x``.
.. admonition:: Example
>>> x = np.array([1, 2, 3])
>>> x.shape
(3,)
>>> y = F.expand_dims(x, axis=0)
>>> y.shape
(1, 3)
>>> y.array
array([[1, 2, 3]])
>>> y = F.expand_dims(x, axis=1)
>>> y.shape
(3, 1)
>>> y.array
array([[1],
[2],
[3]])
>>> y = F.expand_dims(x, axis=-2)
>>> y.shape
(1, 3)
>>> y.array
array([[1, 2, 3]])
"""
y, = ExpandDims(axis).apply((x,))
return y
| 2,057
| 26.810811
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/array/flatten.py
|
import chainer
def flatten(x):
"""Flatten a given array into one dimension.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable flatten to one dimension.
.. note::
When you input a scalar array (i.e. the shape is ``()``),
you can also get the one dimension array whose shape is ``(1,)``.
.. admonition:: Example
>>> x = np.array([[1, 2], [3, 4]])
>>> x.shape
(2, 2)
>>> y = F.flatten(x)
>>> y.shape
(4,)
>>> y.array
array([1, 2, 3, 4])
>>> x = np.arange(8).reshape(2, 2, 2)
>>> x.shape
(2, 2, 2)
>>> y = F.flatten(x)
>>> y.shape
(8,)
>>> y.array
array([0, 1, 2, 3, 4, 5, 6, 7])
"""
return chainer.functions.reshape(x, (x.size,))
| 891
| 21.3
| 73
|
py
|
chainer
|
chainer-master/chainer/functions/array/hstack.py
|
import numpy
import six
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class Hstack(function_node.FunctionNode):
"""Concatenate multiple tensors horizontally (column wise)."""
def check_type_forward(self, in_types):
type_check.expect(in_types.size() > 0)
type_check._argname((in_types[0],), ('x0',))
ndim = type_check.eval(in_types[0].ndim)
for i in six.moves.range(1, type_check.eval(in_types.size())):
type_check._argname((in_types[i],), ('x{}'.format(i),))
type_check.expect(
in_types[0].dtype == in_types[i].dtype,
in_types[0].ndim == in_types[i].ndim,
)
if ndim <= 1:
continue
for d in six.moves.range(0, ndim):
if d == 1:
continue
type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])
def forward(self, xs):
xp = backend.get_array_module(*xs)
return xp.hstack(xs),
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
ndim = len(self.inputs[0].shape)
if len(self.inputs) == 1:
if ndim == 0:
return gy.reshape(()),
return gy,
if ndim == 0:
gx = chainer.functions.split_axis(gy, len(self.inputs), 0)
return [g.reshape(()) for g in gx]
axis = 0 if ndim == 1 else 1
sizes = numpy.array([x.shape[axis] for x in self.inputs[:-1]]).cumsum()
return chainer.functions.split_axis(gy, sizes, axis)
def hstack(xs):
"""Concatenate variables horizontally (column wise).
Args:
xs (list of :class:`~chainer.Variable` or :ref:`ndarray`):
Input variables to be concatenated. The variables must have the
same ``ndim``. When the variables have the second axis (i.e.
:math:`ndim \\geq 2`), the variables must have the same shape
along all but the second axis. When the variables do not have the
second axis(i.e. :math:`ndim < 2`), the variables need not to have
the same shape.
Returns:
~chainer.Variable:
Output variable. When the input variables have the second axis
(i.e. :math:`ndim \\geq 2`), the shapes of inputs and output are
the same along all but the second axis. The length of second axis
is the sum of the lengths of inputs' second axis.
When the variables do not have the second axis (i.e.
:math:`ndim < 2`), the shape of output is ``(N, )`` (``N`` is the
sum of the input variables' size).
.. admonition:: Example
>>> x1 = np.array((1, 2, 3))
>>> x1.shape
(3,)
>>> x2 = np.array((2, 3, 4))
>>> x2.shape
(3,)
>>> y = F.hstack((x1, x2))
>>> y.shape
(6,)
>>> y.array
array([1, 2, 3, 2, 3, 4])
>>> x1 = np.arange(0, 12).reshape(3, 4)
>>> x1.shape
(3, 4)
>>> x1
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x2 = np.arange(12, 18).reshape(3, 2)
>>> x2.shape
(3, 2)
>>> x2
array([[12, 13],
[14, 15],
[16, 17]])
>>> y = F.hstack([x1, x2])
>>> y.shape
(3, 6)
>>> y.array
array([[ 0, 1, 2, 3, 12, 13],
[ 4, 5, 6, 7, 14, 15],
[ 8, 9, 10, 11, 16, 17]])
"""
y, = Hstack().apply(xs)
return y
| 3,651
| 31.035088
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/array/transpose.py
|
import numpy
from chainer import function_node
from chainer.utils import type_check
class Transpose(function_node.FunctionNode):
"""Permute the dimensions of an array."""
def __init__(self, axes=None):
self.axes = axes
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1,)
@property
def label(self):
return 'Transpose'
def forward_chainerx(self, inputs):
x = inputs[0]
return x.transpose(self.axes),
def forward(self, inputs):
x = inputs[0]
return x.transpose(self.axes),
def backward(self, indexes, grad_outputs):
inv_axes = self.axes
if inv_axes:
axes_len = len(inv_axes)
inv_axes = tuple(numpy.argsort([ax % axes_len for ax in inv_axes]))
return Transpose(inv_axes).apply(grad_outputs)
def transpose(x, axes=None):
"""Permute the dimensions of an input variable without copy.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable to be transposed.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
axes (tuple of ints): By default, reverse the dimensions,
otherwise permute the axes according to the values given.
Returns:
~chainer.Variable: Variable whose axes are permuted.
.. admonition:: Example
>>> x = np.array([[[0, 1, 2], [3, 4, 5]]], np.float32)
>>> x.shape
(1, 2, 3)
>>> y = F.transpose(x) # reverse the dimensions
>>> y.shape
(3, 2, 1)
>>> y.array
array([[[0.],
[3.]],
<BLANKLINE>
[[1.],
[4.]],
<BLANKLINE>
[[2.],
[5.]]], dtype=float32)
>>> y = F.transpose(x, axes=(1, 0, 2)) # swap 1st and 2nd axis
>>> y.shape
(2, 1, 3)
>>> y.array
array([[[0., 1., 2.]],
<BLANKLINE>
[[3., 4., 5.]]], dtype=float32)
"""
return Transpose(axes).apply((x,))[0]
| 2,061
| 26.131579
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/array/spatial_transformer_grid.py
|
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function
from chainer.utils import argument
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.libcudnn
_sampler_type = cuda.libcudnn.CUDNN_SAMPLER_BILINEAR
class SpatialTransformerGrid(function.Function):
def __init__(self, output_shape):
self.output_shape = output_shape
def check_type_forward(self, in_types):
type_check._argname(in_types, ('theta',))
theta_type = in_types[0]
type_check.expect(
theta_type.dtype.kind == 'f',
theta_type.ndim == 3,
theta_type.shape[1] == 2,
theta_type.shape[2] == 3,
)
def forward_cpu(self, inputs):
return self._forward(inputs)
def forward_gpu(self, inputs):
if not chainer.should_use_cudnn('>=auto', 5000):
return self._forward(inputs)
theta, = inputs
B, _, _ = theta.shape
H, W = self.output_shape
grid_t = cuda.cupy.empty((B, H, W, 2), dtype=theta.dtype)
# Unlike spatial_transformer_sampler,
# channel size can be anything in this case.
shape = numpy.array((B, 1, H, W), dtype=numpy.int32)
theta = cuda.cupy.ascontiguousarray(theta)
handle = cudnn.get_handle()
self.st_desc =\
cuda.cupy.cudnn.create_spatial_transformer_descriptor(
_sampler_type, grid_t.dtype, len(shape), shape.ctypes.data)
libcudnn.spatialTfGridGeneratorForward(
handle, self.st_desc.value, theta.data.ptr, grid_t.data.ptr)
grid = cuda.cupy.transpose(grid_t, (0, 3, 1, 2))
return grid,
def _forward(self, inputs):
theta, = inputs
H, W = self.output_shape
B, _, _ = theta.shape
xp = backend.get_array_module(theta)
ys, xs = xp.meshgrid(
xp.linspace(-1, 1, H, dtype=theta.dtype),
xp.linspace(-1, 1, W, dtype=theta.dtype), indexing='ij',
copy=False
)
coords = xp.concatenate(
[xs[None], ys[None], xp.ones((1, H, W), dtype=theta.dtype)],
axis=0)
grid = theta.dot(coords.reshape(3, H * W)).reshape(B, 2, H, W)
return grid,
def backward_cpu(self, inputs, grad_outputs):
return self._backward(inputs, grad_outputs)
def backward_gpu(self, inputs, grad_outputs):
if not chainer.should_use_cudnn('>=auto', 5000):
return self._backward(inputs, grad_outputs)
theta, = inputs
ggrid, = grad_outputs
ggrid_t = cuda.cupy.transpose(ggrid, (0, 2, 3, 1))
gtheta = cuda.cupy.empty_like(theta)
handle = cudnn.get_handle()
ggrid_t = cuda.cupy.ascontiguousarray(ggrid_t)
libcudnn.spatialTfGridGeneratorBackward(
handle, self.st_desc.value, ggrid_t.data.ptr, gtheta.data.ptr)
return gtheta,
def _backward(self, inputs, grad_outputs):
theta, = inputs
ggrid, = grad_outputs
H, W = self.output_shape
B, _, _ = theta.shape
xp = backend.get_array_module(theta)
ys, xs = xp.meshgrid(
xp.linspace(-1, 1, H, dtype=theta.dtype),
xp.linspace(-1, 1, W, dtype=theta.dtype), indexing='ij',
copy=False
)
coords = xp.concatenate(
[xs[None], ys[None], xp.ones((1, H, W), dtype=theta.dtype)],
axis=0)
coords_T = coords.reshape(3, H * W).transpose(1, 0)
ggrid = ggrid.reshape(B, 2, H * W)
gtheta = ggrid.dot(coords_T).reshape(B, 2, 3)
return gtheta,
def spatial_transformer_grid(theta, output_shape, **kwargs):
"""2D Spatial Transformer grid.
This function generates coordinates of the points sampled from an image
to perform warping described in `Spatial Transformer Networks
<https://arxiv.org/abs/1506.02025>`_.
Given a coordinate in the warped image :math:`(x_i^t, y_i^t)`, the point
sampled from the source image :math:`(x_i^s, y_i^s)` are calculated
by the following equation.
.. note::
cuDNN supports SpatialTransformerGrid from version 5.0.0.
.. math::
\\left(\\begin{matrix} x_i^s \\\\
y_i^s \\end{matrix}\\right)
=
\\left(\\begin{matrix} \\theta_{11} & \\theta_{12} & \\theta_{13} \\\\
\\theta_{21} & \\theta_{22} & \\theta_{23} \\end{matrix}\\right)
\\left(\\begin{matrix} x_i^t \\\\
y_i^t \\\\
1 \\end{matrix}\\right)
Notation: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`h_O` and :math:`w_O` are the height and the width of the output
image.
Args:
theta (:class:`~chainer.Variable` or :ref:`ndarray`):
An array of shape :math:`(n, 2, 3)`.
This is a batch of :math:`2 \\times 3` matrix used for
the warping described above.
output_shape (tuple): A tuple of 2 elements: :math:`h_O, w_O`.
Returns:
~chainer.Variable: A variable of shape :math:`(n, 2, h_O, w_O)`.
In the 2nd dimension, the first element is the coordinate along the
x axis, and the second element is the coordinate along the y axis.
All the coordinates in the image are scaled to fit range
:math:`[-1, 1]`.
This means that the coordinate :math:`(-1, -1)` corresponds to
the upper-left corner of the input image.
"""
if kwargs:
argument.check_unexpected_kwargs(
kwargs, use_cudnn='The argument "use_cudnn" is not '
'supported anymore. '
'Use chainer.using_config(\'use_cudnn\', value) '
'context where value can be `always`, `never`, or `auto`.')
argument.assert_kwargs_empty(kwargs)
return SpatialTransformerGrid(output_shape)(theta)
| 5,933
| 33.5
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/array/repeat.py
|
import six
from chainer import backend
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class Repeat(function_node.FunctionNode):
"""Repeat elements of an array."""
def __init__(self, repeats, axis=None):
if isinstance(repeats, six.integer_types):
self.repeats = (repeats,)
elif isinstance(repeats, tuple) and all(
isinstance(x, six.integer_types) for x in repeats):
# Although it is not explicitly documented, NumPy/CuPy allows
# specifying bool or tuple of bools as `repeats`.
# Thus we just check type against `six.integer_types`, without
# excluding `bool`.
self.repeats = repeats
else:
raise TypeError('repeats must be int or tuple of ints')
if not all(x >= 0 for x in self.repeats):
raise ValueError('all elements in repeats must be zero or larger')
if axis is not None and (
not isinstance(axis, six.integer_types) or
isinstance(axis, bool)):
# `axis` cannot be bool, in contrast to `repeats`.
raise TypeError('axis must be int or None')
self.axis = axis
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
def forward(self, inputs):
self.retain_inputs((0,))
x, = inputs
xp = backend.get_array_module(x)
repeats = self.repeats
# Workaround for bug in NumPy 1.9 that specifying one element list to
# `repeats` fails to broadcast.
if len(repeats) == 1:
repeats = repeats[0]
return xp.repeat(x, repeats, self.axis),
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
return RepeatGrad(self.repeats, self.axis, x.shape, x.dtype).apply(
grad_outputs)
class RepeatGrad(function_node.FunctionNode):
def __init__(self, repeats, axis, in_shape, in_dtype):
self.repeats = repeats
self.axis = axis
if axis is not None and axis < 0:
self.axis += len(in_shape)
self.in_shape = in_shape
self.in_dtype = in_dtype
def forward(self, inputs):
gy, = inputs
xp = backend.get_array_module(gy)
repeats = self.repeats
axis = self.axis
shape = list(self.in_shape)
dtype = self.in_dtype
if len(gy) == 0:
gx = xp.zeros(shape, dtype)
return gx,
if len(repeats) == 1:
repeats = int(repeats[0])
if axis is None:
gx = gy.reshape(-1, repeats).sum(axis=1).reshape(shape)
else:
shape[axis:axis + 1] = [-1, repeats]
gx = gy.reshape(shape).sum(axis=axis + 1)
return gx,
if axis is None:
pos = 0
gx = xp.zeros(utils.size_of_shape(shape), dtype)
for (i, r) in enumerate(repeats):
gx[i] = xp.sum(gy[pos:pos + r])
pos += r
gx = gx.reshape(shape)
else:
gx = xp.zeros(shape, dtype)
pos = 0
src = [slice(None)] * axis + [None]
dst = [slice(None)] * axis + [None]
for (i, r) in enumerate(repeats):
src[-1] = slice(pos, pos + r)
dst[-1] = slice(i, i + 1)
gx[tuple(dst)] = gy[tuple(src)].sum(axis=axis, keepdims=True)
pos += r
return gx,
def backward(self, indexes, grad_outputs):
return Repeat(self.repeats, self.axis).apply(grad_outputs)
def repeat(x, repeats, axis=None):
"""Construct an array by repeating a given array.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable.
repeats (:class:`int` or :class:`tuple` of :class:`int` s):
The number of times which each element of ``x`` is repeated.
axis (:class:`int`):
The axis along which to repeat values.
Returns:
~chainer.Variable: The repeated output Variable.
.. admonition:: Example
>>> x = np.array([0, 1, 2])
>>> x.shape
(3,)
>>> y = F.repeat(x, 2)
>>> y.shape
(6,)
>>> y.array
array([0, 0, 1, 1, 2, 2])
>>> x = np.array([[1,2], [3,4]])
>>> x.shape
(2, 2)
>>> y = F.repeat(x, 3, axis=1)
>>> y.shape
(2, 6)
>>> y.array
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> y = F.repeat(x, (1, 2), axis=0)
>>> y.shape
(3, 2)
>>> y.array
array([[1, 2],
[3, 4],
[3, 4]])
"""
return Repeat(repeats, axis).apply((x,))[0]
| 4,817
| 29.884615
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/array/select_item.py
|
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
class SelectItem(function_node.FunctionNode):
"""Select elements stored in given indices."""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 't'))
x_type, t_type = in_types
type_check.expect(
t_type.dtype.kind == 'i',
x_type.ndim == 2,
t_type.ndim == 1,
x_type.shape[0] == t_type.shape[0],
)
def forward(self, inputs):
self.retain_inputs((1,))
x, t = inputs
self._in_shape = x.shape
self._in_dtype = x.dtype
if chainer.is_debug():
if not ((0 <= t).all() and
(t < x.shape[1]).all()):
msg = 'Each label `t` need to satisfty `0 <= t < x.shape[1]`'
raise ValueError(msg)
xp = backend.get_array_module(x)
if xp is numpy:
# This code is equivalent to `t.choose(x.T)`, but `numpy.choose`
# does not work when `x.shape[1] > 32`.
return x[six.moves.range(t.size), t],
else:
y = cuda.elementwise(
'S t, raw T x',
'T y',
'int ind[] = {i, t}; y = x[ind];',
'getitem_fwd'
)(t, x)
return y,
def backward(self, indexes, gy):
t = self.get_retained_inputs()[0]
ret = []
if 0 in indexes:
gx = Assign(self._in_shape, self._in_dtype, t).apply(gy)[0]
ret.append(gx)
if 1 in indexes:
ret.append(None)
return ret
class Assign(function_node.FunctionNode):
def __init__(self, shape, dtype, t):
self.shape = shape
self.dtype = dtype
self.t = t.data
def forward_cpu(self, inputs):
t = backend.from_chx(self.t) # Workaround for ChainerX.
gx = numpy.zeros(self.shape, self.dtype)
gx[six.moves.range(self.t.size), t] = inputs[0]
return gx,
def forward_gpu(self, inputs):
t = backend.from_chx(self.t) # Workaround for ChainerX.
gx = cuda.cupy.zeros(self.shape, self.dtype)
gx = cuda.elementwise(
'S t, T gloss',
'raw T gx',
'int ind[] = {i, t}; gx[ind] = gloss;',
'getitem_bwd'
)(t, inputs[0], gx)
return gx,
def backward(self, indexes, gy):
return SelectItem().apply((gy[0], self.t))
def select_item(x, t):
"""Select elements stored in given indices.
This function returns ``t.choose(x.T)``, that means
``y[i] == x[i, t[i]]`` for all ``i``.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable storing arrays. A two-dimensional float array.
t (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable storing index numbers. A one-dimensional int array.
Length of the ``t`` should be equal to ``x.shape[0]``.
Returns:
~chainer.Variable: Variable that holds ``t``-th element of ``x``.
.. admonition:: Example
>>> x = np.array([[0, 1, 2], [3, 4, 5]], np.float32)
>>> t = np.array([0, 2], np.int32)
>>> y = F.select_item(x, t)
>>> y.shape
(2,)
>>> y.array
array([0., 5.], dtype=float32)
"""
return SelectItem().apply((x, t))[0]
| 3,484
| 28.041667
| 77
|
py
|
chainer
|
chainer-master/chainer/functions/array/im2col.py
|
import numpy
from chainer import function_node
from chainer.utils.conv import col2im_cpu
from chainer.utils.conv import col2im_gpu
from chainer.utils.conv import im2col_cpu
from chainer.utils.conv import im2col_gpu
from chainer.utils import type_check
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
def _col2im(x, *args, **kwargs):
if isinstance(x, numpy.ndarray):
return col2im_cpu(x, *args, **kwargs)
return col2im_gpu(x, *args, **kwargs)
def _im2col(x, *args, **kwargs):
if isinstance(x, numpy.ndarray):
return im2col_cpu(x, *args, **kwargs)
return im2col_gpu(x, *args, **kwargs)
class Im2Col(function_node.FunctionNode):
"""Im2Col function."""
def __init__(self, ksize, stride, pad, cover_all, dilate):
self.kh, self.kw = _pair(ksize)
self.sy, self.sx = _pair(stride)
self.ph, self.pw = _pair(pad)
self.dy, self.dx = _pair(dilate)
self.cover_all = cover_all
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4
)
def forward(self, inputs):
x, = inputs
y = _im2col(
x, self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
n, c, kh, kw, out_h, out_w = y.shape
return y.reshape(n, c * kh * kw, out_h, out_w),
def backward(self, indexes, grad_outputs):
return Im2ColGrad((self.kh, self.kw), (self.sy, self.sx),
(self.ph, self.pw), self.cover_all,
(self.dy, self.dx), self.inputs[0].shape) \
.apply(grad_outputs)
class Im2ColGrad(function_node.FunctionNode):
"""Im2Col gradient function."""
def __init__(self, ksize, stride, pad, cover_all, dilate, in_shape):
self.kh, self.kw = _pair(ksize)
self.sy, self.sx = _pair(stride)
self.ph, self.pw = _pair(pad)
self.dy, self.dx = _pair(dilate)
self.cover_all = cover_all
self.in_shape = in_shape
def check_type_forward(self, in_types):
type_check._argname(in_types, ('gy',))
gy_type = in_types[0]
type_check.expect(
gy_type.dtype.kind == 'f',
gy_type.ndim == 4
)
def forward(self, inputs):
_, c, h, w = self.in_shape
gy, = inputs
n, _, out_h, out_w = gy.shape
gy = gy.reshape(n, c, self.kh, self.kw, out_h, out_w)
gx = _col2im(
gy, self.sy, self.sx, self.ph, self.pw, h, w, self.dy, self.dx)
return gx,
def backward(self, indexes, grad_outputs):
return Im2Col(
(self.kh, self.kw), (self.sy, self.sx),
(self.ph, self.pw), self.cover_all,
(self.dy, self.dx)).apply(grad_outputs)
def im2col(x, ksize, stride=1, pad=0, cover_all=False, dilate=1):
"""Extract patches from an image based on the filter.
This function rearranges patches of an image and puts them in the channel
dimension of the output.
Patches are extracted at positions shifted by multiples of ``stride`` from
the first position ``-pad`` for each spatial axis.
The right-most (or bottom-most) patches do not run over the padded spatial
size.
Notation: here is a notation.
- :math:`n` is the batch size.
- :math:`c` is the number of the input channels.
- :math:`h` and :math:`w` are the height and width of the input image,
respectively.
- :math:`k_H` and :math:`k_W` are the height and width of the filters,
respectively.
- :math:`s_Y` and :math:`s_X` are the strides of the filter.
- :math:`p_H` and :math:`p_W` are the spatial padding sizes.
- :math:`d_Y` and :math:`d_X` are the dilation factors of filter \
application.
The output size :math:`(h_O, w_O)` is determined by the following
equations when ``cover_all = False``:
.. math::
h_O &= (h + 2p_H - k_H - (k_H - 1) * (d_Y - 1)) / s_Y + 1,\\\\
w_O &= (w + 2p_W - k_W - (k_W - 1) * (d_X - 1)) / s_X + 1.
When ``cover_all = True``, the output size is determined by
the following equations:
.. math::
h_O &= (h + 2p_H - k_H - (k_H - 1) * (d_Y - 1) + s_Y - 1) / s_Y + 1,\\\\
w_O &= (w + 2p_W - k_W - (k_W - 1) * (d_X - 1) + s_X - 1) / s_X + 1.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable of shape :math:`(n, c, h, w)`.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
cover_all (bool): If ``True``, all spatial locations are rearranged
into some output pixels. It may make the output size larger.
dilate (int or pair of ints): Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
Returns:
~chainer.Variable:
Output variable whose shape is
:math:`(n, c \\cdot k_H \\cdot k_W, h_O, w_O)`
"""
return Im2Col(ksize, stride, pad, cover_all, dilate).apply((x,))[0]
| 5,495
| 32.717791
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/array/vstack.py
|
import numpy
import six
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class Vstack(function_node.FunctionNode):
"""Concatenate multiple tensors vertically (row wise)."""
def check_type_forward(self, in_types):
type_check.expect(in_types.size() > 0)
ndim = type_check.eval(in_types[0].ndim)
for i in six.moves.range(1, type_check.eval(in_types.size())):
type_check.expect(
in_types[0].dtype == in_types[i].dtype,
in_types[0].ndim == in_types[i].ndim,
)
if ndim <= 1:
type_check.expect(in_types[0].shape == in_types[i].shape)
continue
for d in six.moves.range(1, ndim):
type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])
def forward(self, xs):
xp = backend.get_array_module(*xs)
return xp.vstack(xs),
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
ndim = len(self.inputs[0].shape)
if len(self.inputs) == 1:
if ndim <= 1:
return gy.reshape(self.inputs[0].shape),
return gy,
if ndim <= 1:
gxs = chainer.functions.split_axis(gy, len(self.inputs), 0)
return [gx.reshape(self.inputs[0].shape) for gx in gxs]
sizes = numpy.array([x.shape[0] for x in self.inputs[:-1]]).cumsum()
return chainer.functions.split_axis(gy, sizes, 0)
def vstack(xs):
"""Concatenate variables vertically (row wise).
Args:
xs (list of :class:`~chainer.Variable` or :ref:`ndarray`):
Input variables to be concatenated. The variables must have the
same ``ndim``. When the variables have the second axis (i.e.
:math:`ndim \\geq 2`), the variables must have the same shape
along all but the first axis. When the variables do not have the
second axis(i.e. :math:`ndim < 2`), the variables must have the
same shape.
Returns:
~chainer.Variable:
Output variable. When the input variables have the second axis
(i.e. :math:`ndim \\geq 2`), the shapes of inputs and output are
the same along all but the first axis. The length of first axis
is the sum of the lengths of inputs' first axis.
When the variables do not have the second axis (i.e.
:math:`ndim < 2`), the shape of output is ``(2, N)`` (``N`` is the
size of the input variable).
.. admonition:: Example
>>> x1 = np.array((1, 2, 3))
>>> x1.shape
(3,)
>>> x2 = np.array((2, 3, 4))
>>> x2.shape
(3,)
>>> y = F.vstack((x1, x2))
>>> y.shape
(2, 3)
>>> y.array
array([[1, 2, 3],
[2, 3, 4]])
>>> x1 = np.arange(0, 12).reshape(3, 4)
>>> x1.shape
(3, 4)
>>> x1
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x2 = np.arange(12, 20).reshape(2, 4)
>>> x2.shape
(2, 4)
>>> x2
array([[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> y = F.vstack([x1, x2])
>>> y.shape
(5, 4)
>>> y.array
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
"""
return Vstack().apply((xs))[0]
| 3,567
| 31.144144
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/array/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/functions/array/stack.py
|
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
import chainerx
class Stack(function_node.FunctionNode):
"""Concatenate variables along a new axis."""
def __init__(self, axis):
self.axis = axis
def check_type_forward(self, in_types):
type_check.expect(in_types.size() > 0)
type_check.expect(
-in_types[0].ndim - 1 <= self.axis,
self.axis <= in_types[0].ndim
)
dtype = in_types[0].dtype
shape = in_types[0].shape
for x_type in in_types[1:]:
type_check.expect(
x_type.dtype == dtype,
x_type.shape == shape,
)
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
if hasattr(xp, 'stack'):
return xp.stack(inputs, axis=self.axis),
else:
# Old numpy does not have numpy.stack.
return xp.concatenate(
[xp.expand_dims(x, self.axis) for x in inputs], self.axis),
def forward_chainerx(self, xs):
return chainerx.stack(xs, self.axis),
def backward(self, inputs, grads):
return chainer.functions.separate(grads[0], self.axis)
def stack(xs, axis=0):
"""Concatenate variables along a new axis.
Args:
xs (list of :class:`~chainer.Variable` or :ref:`ndarray`):
Input variables to be concatenated. The variables must have the
same shape.
axis (int): The axis along which the arrays will be stacked. The
``axis`` parameter is acceptable when
:math:`-ndim - 1 \\leq axis \\leq ndim`. (``ndim`` is the
dimension of input variables). When :math:`axis < 0`, the result
is the same with :math:`ndim + 1 - |axis|`.
Returns:
~chainer.Variable:
Output variable. Let ``x_1, x_2, ..., x_n`` and ``y`` be the input
variables and the output variable,
``y[:, ..., 0, ..., :]`` is ``x_1``,
``y[:, ..., 1, ..., :]`` is ``x_2``
and ``y[:, ..., n-1, ..., :]`` is ``x_n`` (The indexed axis
indicates the ``axis``).
.. admonition:: Example
>>> x1 = np.arange(0, 12).reshape(3, 4)
>>> x1.shape
(3, 4)
>>> x1
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x2 = np.arange(12, 24).reshape(3, 4)
>>> x2.shape
(3, 4)
>>> x2
array([[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]])
>>> y = F.stack([x1, x2], axis=0)
>>> y.shape
(2, 3, 4)
>>> y.array
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
<BLANKLINE>
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> y = F.stack([x1, x2], axis=1)
>>> y.shape
(3, 2, 4)
>>> y.array
array([[[ 0, 1, 2, 3],
[12, 13, 14, 15]],
<BLANKLINE>
[[ 4, 5, 6, 7],
[16, 17, 18, 19]],
<BLANKLINE>
[[ 8, 9, 10, 11],
[20, 21, 22, 23]]])
>>> y = F.stack([x1, x2], axis=2)
>>> y.shape
(3, 4, 2)
>>> y.array
array([[[ 0, 12],
[ 1, 13],
[ 2, 14],
[ 3, 15]],
<BLANKLINE>
[[ 4, 16],
[ 5, 17],
[ 6, 18],
[ 7, 19]],
<BLANKLINE>
[[ 8, 20],
[ 9, 21],
[10, 22],
[11, 23]]])
>>> y = F.stack([x1, x2], axis=-1)
>>> y.shape
(3, 4, 2)
"""
return Stack(axis).apply(xs)[0]
| 3,888
| 28.915385
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/array/dstack.py
|
import numpy
import six
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class Dstack(function_node.FunctionNode):
"""Concatenate multiple tensors along third axis (depth wise)."""
def check_type_forward(self, in_types):
type_check.expect(in_types.size() > 0)
type_check._argname((in_types[0],), ('x0',))
ndim = type_check.eval(in_types[0].ndim)
for i in six.moves.range(1, type_check.eval(in_types.size())):
type_check._argname((in_types[i],), ('x{}'.format(i),))
type_check.expect(
in_types[0].dtype == in_types[i].dtype,
in_types[0].ndim == in_types[i].ndim,
)
if ndim <= 2:
type_check.expect(in_types[0].shape == in_types[i].shape)
continue
for d in six.moves.range(0, ndim):
if d == 2:
continue
type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])
def forward(self, xs):
xp = backend.get_array_module(*xs)
return xp.dstack(xs),
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
ndim = len(self.inputs[0].shape)
if len(self.inputs) == 1:
if ndim <= 2:
return gy.reshape(self.inputs[0].shape),
return gy,
if ndim <= 2:
gxs = chainer.functions.split_axis(gy, len(self.inputs), axis=2)
return [gx.reshape(self.inputs[0].shape) for gx in gxs]
sizes = numpy.array([x.shape[2] for x in self.inputs[:-1]]).cumsum()
return chainer.functions.split_axis(gy, sizes, axis=2)
def dstack(xs):
"""Concatenate variables along third axis (depth wise).
Args:
xs (list of :class:`~chainer.Variable` or :ref:`ndarray`):
Input variables to be concatenated. The variables must have the
same ``ndim``. When the variables have the third axis (i.e.
:math:`ndim \\geq 3`), the variables must have the same shape
along all but the third axis. When the variables do not have the
third axis(i.e. :math:`ndim < 3`), the variables must have the
same shape.
Returns:
~chainer.Variable:
Output variable. When the input variables have the third axis
(i.e. :math:`ndim \\geq 3`), the shapes of inputs and output are
the same along all but the third axis. The length of third axis
is the sum of the lengths of inputs' third axis.
When the shape of variables are ``(N1, N2)`` (i.e.
:math:`ndim = 2`), the shape of output is ``(N1, N2, 2)``. When
the shape of variables are ``(N1,)`` (i.e. :math:`ndim = 1`), the
shape of output is ``(1, N1, 2)``. When the shape of variables are
``()`` (i.e. :math:`ndim = 0`), the shape of output is
``(1, 1, 2)``.
.. admonition:: Example
>>> x1 = np.array((1, 2, 3))
>>> x1.shape
(3,)
>>> x2 = np.array((2, 3, 4))
>>> x2.shape
(3,)
>>> y = F.dstack((x1, x2))
>>> y.shape
(1, 3, 2)
>>> y.array
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> x1 = np.arange(0, 6).reshape(3, 2)
>>> x1.shape
(3, 2)
>>> x1
array([[0, 1],
[2, 3],
[4, 5]])
>>> x2 = np.arange(6, 12).reshape(3, 2)
>>> x2.shape
(3, 2)
>>> x2
array([[ 6, 7],
[ 8, 9],
[10, 11]])
>>> y = F.dstack([x1, x2])
>>> y.shape
(3, 2, 2)
>>> y.array
array([[[ 0, 6],
[ 1, 7]],
<BLANKLINE>
[[ 2, 8],
[ 3, 9]],
<BLANKLINE>
[[ 4, 10],
[ 5, 11]]])
>>> x1 = np.arange(0, 12).reshape(3, 2, 2)
>>> x2 = np.arange(12, 18).reshape(3, 2, 1)
>>> y = F.dstack([x1, x2])
>>> y.shape
(3, 2, 3)
>>> y.array
array([[[ 0, 1, 12],
[ 2, 3, 13]],
<BLANKLINE>
[[ 4, 5, 14],
[ 6, 7, 15]],
<BLANKLINE>
[[ 8, 9, 16],
[10, 11, 17]]])
"""
return Dstack().apply((xs))[0]
| 4,433
| 30.671429
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/array/flip.py
|
import six
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
def _flip(array, axis):
indices = [slice(None)] * array.ndim
indices[axis] = slice(None, None, -1)
return array[tuple(indices)]
class Flip(function_node.FunctionNode):
"""Flips an input variable in reverse order along the given axis."""
def __init__(self, axis):
if not isinstance(axis, six.integer_types):
raise TypeError('axis must be int')
self.axis = axis
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type = in_types[0]
type_check.expect(x_type.ndim > 0)
if self.axis >= 0:
type_check.expect(x_type.ndim > self.axis)
else:
type_check.expect(x_type.ndim >= -self.axis)
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
if hasattr(xp, 'flip'): # numpy.flip is supported from version 1.12.0
return xp.flip(inputs[0], self.axis),
else:
return _flip(inputs[0], self.axis),
def backward(self, indexes, grad_outputs):
return flip(grad_outputs[0], self.axis),
def flip(x, axis):
"""Flips an input variable in reverse order along the given axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable.
axis (int): Axis along which the input variable is reversed.
Returns:
~chainer.Variable: Output variable.
"""
return Flip(axis).apply((x,))[0]
| 1,569
| 27.035714
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/array/pad_sequence.py
|
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class PadSequence(function_node.FunctionNode):
"""Padding arrays to create a matrix."""
def __init__(self, length, padding):
self.length = length
self.padding = padding
def check_type_forward(self, in_types):
type_check.expect(in_types.size() > 0)
for i, in_type in enumerate(in_types):
type_check._argname((in_type,), ('x{}'.format(i),))
type_check.expect(
in_type.ndim > 0,
in_type.shape[1:] == in_types[0].shape[1:],
in_type.dtype == in_types[0].dtype
)
if self.length is not None:
for in_type in in_types:
type_check.expect(in_type.shape[0] <= self.length)
def forward(self, xs):
xp = backend.get_array_module(*xs)
if self.length is None:
length = max(len(x) for x in xs)
else:
length = self.length
shape = (len(xs), length) + xs[0].shape[1:]
y = xp.empty(shape, xs[0].dtype)
if length == 0:
return y, # y is an empty array
if xp is numpy or any(not x._c_contiguous for x in xs):
for i, x in enumerate(xs):
l = len(x)
if l == length:
y[i] = x
else:
y[i, 0:l] = x
y[i, l:] = self.padding
else:
# This code assumes that all arrays are c_contiguous
ptr_shape = (Ellipsis,) + (None,) * xs[0].ndim
ptrs = cuda.cupy.array(
[x.data for x in xs], numpy.uintp)[ptr_shape]
lengths = cuda.cupy.array(
[len(x) for x in xs], numpy.int32)[ptr_shape]
base = utils.size_of_shape(xs[0].shape[1:])
cuda.elementwise(
'P ptr, int32 length, T pad, int32 base, int32 max_length',
'T y',
'''
int d = i / base % max_length;
if (d < length) {
y = reinterpret_cast<const T*>(ptr)[i % (base * max_length)];
} else {
y = pad;
}
''',
'pad_sequence_fwd'
)(ptrs, lengths, self.padding, base, length, y)
return y,
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
inputs = self.inputs
if gy.size == 0:
# `split` in NumPy 1.9 behaves inconsistently when size is zero.
gy = [gy]
else:
gy = chainer.functions.split_axis(gy, len(inputs), axis=0)
return tuple(g[0, :x.shape[0]] for g, x in six.moves.zip(gy, inputs))
def pad_sequence(xs, length=None, padding=0):
"""Pad given arrays to make a matrix.
Args:
xs (list of ~chainer.Variable or :ref:`ndarray`):
Variables you want to concatenate.
length (None or int): Size of the first dimension of a padded array.
If it is ``None``, the longest size of the first dimension of
``xs`` is used.
padding (int or float): Value to fill.
Returns:
~chainer.Variable: A padded matrix. Its shape is
``(n, length, ...)``, where ``n == len(xs)``.
"""
return PadSequence(length, padding).apply((xs))[0]
| 3,502
| 31.435185
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/array/split_axis.py
|
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import intel64
from chainer import function_node
from chainer.utils import collections_abc
from chainer.utils import type_check
import chainerx
_numpy_split_ok = numpy.lib.NumpyVersion(numpy.__version__) >= '1.11.0'
def _fix_numpy_split(ys, x, indices_or_sections, axis):
"""Make the output of np.split compatible with numpy >= 1.11"""
if all(y.ndim == x.ndim for y in ys):
return ys
tmp = [len(t) for t in numpy.split(
numpy.empty(x.shape[axis], dtype=numpy.int8), indices_or_sections, 0)]
shape = list(x.shape)
for i, t in enumerate(tmp):
y = ys[i]
if y.ndim != x.ndim:
assert y.size == 0
shape[axis] = t
ys[i] = y.reshape(shape)
return ys
def _get_indices_or_sections(indices_or_sections):
"""Checks and convert ``indices_or_sections`` argument
Converted value is one of: 1-D numpy.ndarray, list, int, and
NumPy int scalar.
Returns:
A binary tuple in which the 1st element is indices (sequence) and
the 2nd element is sections (scalar).
Only one of the two is not ``None`` and the other is ``None``.
"""
ios = indices_or_sections
is_seq = False
if isinstance(ios, numpy.ndarray):
# numpy.ndarray
if ios.dtype.kind != 'i' and ios.size > 0:
# Note: numpy.array([]) (dtype is float64) should be accepted.
raise TypeError('indices_or_sections must be integers')
if ios.ndim >= 2:
raise TypeError('indices_or_sections must be 1-D sequence')
is_seq = ios.ndim != 0
elif isinstance(ios, collections_abc.Sequence):
# Any sequence except numpy.ndarray
ios = list(ios)
is_seq = True
elif isinstance(indices_or_sections, six.integer_types):
# int
pass
else:
raise TypeError(
'indices_or_sections must be integer or 1-D array.\n'
'Actual: {}'.format(type(indices_or_sections)))
if is_seq and chainer.is_debug():
for p, n in six.moves.zip(ios, ios[1:]):
if p > n:
raise ValueError('indices_or_sections must be sorted')
if is_seq:
return ios, None
else:
return None, ios
class SplitAxis(function_node.FunctionNode):
"""Function that splits multiple arrays along the specified axis."""
def __init__(self, indices_or_sections, axis):
indices, sections = _get_indices_or_sections(indices_or_sections)
assert (indices is None) != (sections is None)
self.indices = indices
self.sections = sections
self.axis = axis
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].ndim > self.axis)
if self.indices is not None:
indices = self.indices
if len(indices) > 0:
max_index = type_check.make_variable(indices[-1], 'max_index')
type_check.expect(in_types[0].shape[self.axis] >= max_index)
else:
assert self.sections is not None
sections = type_check.make_variable(self.sections, 'sections')
type_check.expect(in_types[0].shape[self.axis] % sections == 0)
@property
def indices_or_sections(self):
return self.indices if self.indices is not None else self.sections
def forward_chainerx(self, inputs):
x, = inputs
return tuple(chainerx.split(x, self.indices_or_sections, self.axis))
def forward(self, inputs):
x, = inputs
self._xp = backend.get_array_module(x)
# Currently iDeep only supports 4 dims
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs, (4,))
and self._ideep_is_supported(inputs)):
return self._forward_ideep(inputs)
indices_or_sections = self.indices_or_sections
ret = self._xp.split(x, indices_or_sections, self.axis)
if self._xp == numpy and not _numpy_split_ok:
ret = _fix_numpy_split(ret, x, indices_or_sections, self.axis)
self._shapes = [r.shape for r in ret]
return tuple(ret)
def _ideep_is_supported(self, inputs):
# Returns True if iDeep supports current configuration of inputs and
# arguments. This is workaround for limitation in iDeep internal
# implementation.
if self.indices is not None:
indices = self.indices
if len(indices) == 0:
return False # Empty sequence
if indices[0] == 0:
return False # Sequence starting with 0
for i in six.moves.range(1, len(indices)):
if indices[i-1] == indices[i]:
return False # Sequence with duplicate index
else:
if self.sections == 1:
return False # 1
# Workaround for iDeep segfault issue
# See:
# https://github.com/chainer/chainer/pull/4281#issuecomment-365830630
# TODO(niboshi): Remove this after iDeep is fixed.
# Note: inputs[0].ndim is always 4.
if (self.axis == 1 or self.axis == -3) and inputs[0].shape[1] == 8:
return False
return True
def _forward_ideep(self, inputs):
x, = inputs
offsets = intel64.ideep.intVector()
# TODO(iDeep)
# bypass python3 issue when transfer array to std::vector<>
# https://github.com/SimpleITK/SimpleITK/issues/106
axis = self.axis % x.ndim
if self.indices is not None:
for i in self.indices:
offsets.push_back(int(i))
else:
d = x.shape[self.axis]
step = d // self.sections
for i in six.moves.range(step, d, step):
offsets.push_back(i)
ret = intel64.ideep.concat.Backward(
intel64.ideep.array(x), offsets, axis)
self._shapes = [r.shape for r in ret]
return ret
def backward(self, indexes, grad_outputs):
dtype = self.inputs[0].dtype
grads = [
self._xp.zeros(shape, dtype=dtype) if gy is None else gy
for gy, shape in six.moves.zip(grad_outputs, self._shapes)]
return chainer.functions.concat(grads, self.axis),
def split_axis(x, indices_or_sections, axis, force_tuple=True):
"""Splits given variables along an axis.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
A variable to be split.
indices_or_sections (int or 1-D array): If this argument is an integer,
N, the array will be divided into N equal arrays along axis.
If it is a 1-D array of sorted integers, it
indicates the positions where the array is split.
axis (int): Axis that the input array is split along.
force_tuple (bool): If ``True`` (the default) this method returns a
tuple even when the number of outputs is one. Otherwise, if
``False`` a Variable will be returned when the number of outputs
is one.
Returns:
tuple or ~chainer.Variable: Tuple of :class:`~chainer.Variable` objects
if the number of outputs is more than 1 or
:class:`~chainer.Variable` otherwise.
When ``force_tuple`` is ``True``, returned value is always a tuple
regardless of the number of outputs.
"""
res = SplitAxis(indices_or_sections, axis).apply((x,))
if force_tuple or len(res) != 1:
return res
return res[0]
| 7,645
| 35.409524
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/array/transpose_sequence.py
|
import numpy
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
def _transpose(xs, length):
if length == 0:
return ()
xp = backend.get_array_module(*xs)
lengths = numpy.empty(length, dtype=numpy.int32)
end = length
for i, x in enumerate(xs):
len_x = len(x)
if len_x == end:
continue
lengths[len_x:end] = i
end = len_x
lengths[0:end] = len(xs)
if xp is numpy:
dtype = xs[0].dtype
unit = xs[0].shape[1:]
outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths])
for i, x in enumerate(xs):
for p, xi in enumerate(x):
outs[p][i] = xi
else:
offsets1 = numpy.empty(len(xs) + 1, dtype=numpy.int32)
offsets1[0] = 0
numpy.cumsum([len(x) for x in xs], out=offsets1[1:])
offsets2 = numpy.empty(length + 1, dtype=numpy.int32)
offsets2[0] = 0
numpy.cumsum(lengths, dtype=numpy.int32, out=offsets2[1:])
x = xp.concatenate(xs, axis=0)
o = xp.empty_like(x)
unit = xs[0].size // len(xs[0])
size = length * len(xs) * unit
cuda.elementwise(
'int32 len, int32 unit, raw int32 off1, raw int32 off2, raw T vs',
'raw T hs',
'''
int ind = i / unit;
int off = i - ind * unit;
int y = ind / len;
int x = ind - y * len;
if (off2[x] + y < off2[x + 1]) {
hs[(off2[x] + y) * unit + off] = vs[(off1[y] + x) * unit + off];
}
''',
'transpose_sequence'
)(length, unit, cuda.to_gpu(offsets1), cuda.to_gpu(offsets2), x, o,
size=size)
outs = tuple(xp.split(o, offsets2[1:-1]))
return outs
class TransposeSequence(function_node.FunctionNode):
"""Function that transposes a list of Variables."""
def __init__(self, length):
self._length = length
def check_type_forward(self, xs_type):
for p, n in zip(xs_type, xs_type[1:]):
type_check.expect(
p.shape[0] >= n.shape[0],
p.shape[1:] == n.shape[1:],
)
def forward(self, xs):
if not xs:
return ()
return _transpose(xs, self._length)
def backward(self, indexes, grad_outputs):
return TransposeSequence(len(self.inputs)).apply(grad_outputs)
def transpose_sequence(xs):
"""Transpose a list of Variables.
This function transposes a list of :class:`~chainer.Variable`\\ s and
returns a list of :class:`Variable`\\ s.
For example a user gives ``[(0, 1, 2, 3), (4, 5), (6)]``, the function
returns ``[(0, 4, 6), (1, 5), (2), (3)]``.
Note that a given list needs to be sorted by each length of
:class:`~chainer.Variable`.
Args:
xs (list of :class:`~chainer.Variable` or :ref:`ndarray`):
Variables to transpose.
Returns:
tuple of :class:`~chainer.Variable`: Transposed list.
.. admonition:: Example
>>> lst = [chainer.Variable(np.array([1, 1, 1])),
... chainer.Variable(np.array([2, 2])),
... chainer.Variable(np.array([3]))]
>>> lst
[variable([1, 1, 1]), variable([2, 2]), variable([3])]
>>> transposed = F.transpose_sequence(lst)
>>> transposed
(variable([1, 2, 3]), variable([1, 2]), variable([1]))
"""
if not xs:
return ()
return TransposeSequence(len(xs[0])).apply(xs)
| 3,591
| 28.68595
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/array/pad.py
|
import numpy
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class Pad(function_node.FunctionNode):
"""Padding of an array."""
def __init__(self, pad_width, mode, **keywords):
self.mode = mode
self.keywords = keywords
self.pad_width = pad_width
self.pad_bw = numpy.asarray(pad_width)
if self.pad_bw.size == 1:
self.pad_bw = numpy.repeat(self.pad_bw, 2)
def check_type_forward(self, in_types):
# Depending on the arguments, pad_width and keywords, the input value
# may be inappropriate. In that case, numpy.pad or cupy.pad will raise
# errors, so that only check the size and the dtype in this function.
type_check._argname(in_types, ('x',))
x_type = in_types[0]
type_check.expect(x_type.dtype.kind == 'f')
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
return xp.pad(inputs[0], self.pad_width, mode=self.mode,
**self.keywords),
def backward(self, inputs, grad_outputs):
gy, = grad_outputs
in_shape = self.inputs[0].shape
if self.pad_bw.ndim == 1:
self.pad_bw = numpy.tile(self.pad_bw, (len(in_shape), 1))
input_idxs = tuple(
slice(p[0], p[0] + dim) for dim, p in zip(in_shape, self.pad_bw))
return gy[input_idxs],
def pad(x, pad_width, mode, **keywords):
"""Pad an input variable.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input data.
pad_width (int or array-like):
Number of values padded to the edges of each axis.
mode (str):
Specifies how the function fills the periphery of the array.
The mode is passed to :func:`numpy.pad` or :func:`cupy.pad`.
If it is ``'constant'``, the input is padded by a constant value
specified by ``constant_values``.
constant_values (int or array-like):
Constant values to fill the periphery in the ``'constant'`` mode.
Returns:
~chainer.Variable: Output variable.
"""
return Pad(pad_width, mode, **keywords).apply((x,))[0]
| 2,221
| 33.71875
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/array/resize_images.py
|
from __future__ import division
import numpy
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
def _infer_lines(B, C, H, W, out_H, out_W, kH, kW):
target_size = 2 ** 17
line_size = B * C * (H * W // out_H + kH * kW * out_W)
target_lines = target_size // line_size
if target_lines < out_H:
lines = 1
while True:
next_lines = lines * 2
if next_lines > target_lines:
break
lines = next_lines
else:
lines = out_H
return lines
def interpolate_bilinear_cpu(x, v, u, vw, uw):
B, C, H, W = x.shape
out_H, out_W = v.shape
# Interpolation is done by each output panel (i.e. multi lines)
# in order to better utilize CPU cache memory.
lines = _infer_lines(B, C, H, W, out_H, out_W, 2, 2)
vcol = numpy.empty((2, lines, out_W), dtype=v.dtype)
ucol = numpy.empty((2, lines, out_W), dtype=u.dtype)
wcol = numpy.empty((2, 2, lines, out_W), dtype=x.dtype)
y = numpy.empty((B * C, out_H * out_W), dtype=x.dtype)
for i in range(0, out_H, lines):
l = min(lines, out_H - i)
vcol = vcol[:, :l]
ucol = ucol[:, :l]
wcol = wcol[:, :, :l]
i_end = i + l
# indices
vcol[0] = v[i:i_end]
ucol[0] = u[i:i_end]
numpy.add(vcol[0], 1, out=vcol[1])
numpy.add(ucol[0], 1, out=ucol[1])
numpy.minimum(vcol[1], H - 1, out=vcol[1])
numpy.minimum(ucol[1], W - 1, out=ucol[1])
# weights
# wcol[0, 0] = (1 - uw) * (1 - vw)
# wcol[0, 1] = uw * (1 - vw)
# wcol[1, 0] = (1 - uw) * vw
# wcol[1, 1] = uw * vw
wcol[0, 1] = uw[i:i_end]
numpy.subtract(1, wcol[0, 1], out=wcol[0, 0])
numpy.multiply(wcol[0], vw[i:i_end], out=wcol[1])
wcol[0] -= wcol[1]
# packing to the panel whose shape is (B, C, 2, 2, l, out_W)
panel = x[:, :, vcol[:, None], ucol[None, :]]
# interpolation
panel = panel.reshape((B * C, 4, l * out_W))
weights = wcol.reshape((4, l * out_W))
iout = i * out_W
iout_end = i_end * out_W
numpy.einsum('ijk,jk->ik', panel, weights, out=y[:, iout:iout_end])
del panel, weights
return y.reshape((B, C, out_H, out_W))
def interpolate_bilinear_gpu(x, v, u, vw, uw):
B, C, H, W = x.shape
out_H, out_W = v.shape
y = cuda.cupy.empty((B, C, out_H, out_W), dtype=x.dtype)
cuda.elementwise(
'raw T x, S v, S u, T vw, T uw, S H, S W, S outsize', 'T y', '''
// indices
S v0 = v;
S v1 = min(v + 1, (S)(H - 1));
S u0 = u;
S u1 = min(u + 1, (S)(W - 1));
// weights
T w0 = (1 - vw) * (1 - uw);
T w1 = (1 - vw) * uw;
T w2 = vw * (1 - uw);
T w3 = vw * uw;
// fetch
S offset = i / outsize * H * W;
T px0 = x[offset + v0 * W + u0];
T px1 = x[offset + v0 * W + u1];
T px2 = x[offset + v1 * W + u0];
T px3 = x[offset + v1 * W + u1];
// interpolate
y = (w0 * px0 + w1 * px1) + (w2 * px2 + w3 * px3);
''', 'resize_images_interpolate_bilinear'
)(x, v, u, vw, uw, H, W, out_H * out_W, y)
return y
def interpolate_grad_bilinear_cpu(gy, v, u, vw, uw, H, W):
B, C, out_H, out_W = gy.shape
# indices
vcol = numpy.empty((2, out_H, out_W), dtype=v.dtype)
ucol = numpy.empty((2, out_H, out_W), dtype=u.dtype)
vcol[0] = v
ucol[0] = u
numpy.add(vcol[0], 1, out=vcol[1])
numpy.add(ucol[0], 1, out=ucol[1])
numpy.minimum(vcol[1], H - 1, out=vcol[1])
numpy.minimum(ucol[1], W - 1, out=ucol[1])
# weights
wcol = numpy.empty((2, 2, out_H, out_W), dtype=gy.dtype)
wcol[0, 1] = uw
numpy.subtract(1, wcol[0, 1], out=wcol[0, 0])
numpy.multiply(wcol[0], vw, out=wcol[1])
wcol[0] -= wcol[1]
# grad
gycol = gy.reshape((B * C, 1, 1, out_H, out_W)) * wcol
# ravel everything and use `bincount`
indices = (vcol[:, None] * W + ucol[None, :]).ravel()
offsets = numpy.arange(0, B * C * H * W, H * W, dtype=v.dtype)
indices = (offsets[:, None] + indices).ravel()
gx = numpy.bincount(indices, weights=gycol.ravel(),
minlength=(B * C * H * W))
gx = gx.astype(gy.dtype, copy=False)
return gx.reshape((B, C, H, W))
def interpolate_grad_bilinear_gpu(gy, v, u, vw, uw, H, W):
B, C, out_H, out_W = gy.shape
gx = cuda.cupy.zeros((B * C, H, W), dtype=gy.dtype)
cuda.elementwise(
'T gy, S v, S u, T vw, T uw, S H, S W, S outsize', 'raw T gx', '''
// indices
S v0 = v;
S v1 = min(v + 1, (S)(H - 1));
S u0 = u;
S u1 = min(u + 1, (S)(W - 1));
// weights
T w0 = (1 - vw) * (1 - uw);
T w1 = (1 - vw) * uw;
T w2 = vw * (1 - uw);
T w3 = vw * uw;
// scatter
S offset = i / outsize * H * W;
atomicAdd(&gx[offset + v0 * W + u0], w0 * gy);
atomicAdd(&gx[offset + v0 * W + u1], w1 * gy);
atomicAdd(&gx[offset + v1 * W + u0], w2 * gy);
atomicAdd(&gx[offset + v1 * W + u1], w3 * gy);
''', 'resize_images_interpolate_grad_bilinear'
)(gy, v, u, vw, uw, H, W, out_H * out_W, gx)
return gx.reshape((B, C, H, W))
def compute_indices_and_weights(out_size, in_size, mode, align_corners, xp):
out_H, out_W = out_size
H, W = in_size
if mode == 'bilinear':
if align_corners:
v = xp.linspace(0, H - 1, num=out_H, dtype=numpy.float)
u = xp.linspace(0, W - 1, num=out_W, dtype=numpy.float)
else:
y_scale = H / out_H
x_scale = W / out_W
v = (xp.arange(out_H, dtype=numpy.float) + 0.5) * y_scale - 0.5
v = xp.maximum(v, 0)
u = (xp.arange(out_W, dtype=numpy.float) + 0.5) * x_scale - 0.5
u = xp.maximum(u, 0)
vw, v = xp.modf(v)
uw, u = xp.modf(u)
elif mode == 'nearest':
y_scale = H / out_H
x_scale = W / out_W
v = xp.minimum(xp.floor(
xp.arange(out_H, dtype=numpy.float) * y_scale), H - 1)
u = xp.minimum(xp.floor(
xp.arange(out_W, dtype=numpy.float) * x_scale), W - 1)
vw = xp.zeros_like(v)
uw = xp.zeros_like(u)
return v, u, vw, uw
class ResizeImages(function_node.FunctionNode):
def __init__(self, output_shape, mode, align_corners):
self.out_H = output_shape[0]
self.out_W = output_shape[1]
assert mode in ['bilinear', 'nearest']
self.mode = mode
self.align_corners = align_corners
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type = in_types[0]
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim == 4
)
def forward(self, inputs):
x, = inputs
xp = backend.get_array_module(x)
v, u, vw, uw = compute_indices_and_weights(
(self.out_H, self.out_W), x.shape[2:],
self.mode, self.align_corners, xp)
v = v.astype(numpy.intp)
u = u.astype(numpy.intp)
vw = vw.astype(x.dtype)
uw = uw.astype(x.dtype)
# Meshgrid-like operation. Meshgrid can cause
# performance loss due to memory consumption.
# Note that numpy 1.9 doesn't support broadcast_to method.
v, u, vw, uw = xp.broadcast_arrays(
v[:, None], u[None, :], vw[:, None], uw[None, :])
if xp is numpy:
y = interpolate_bilinear_cpu(x, v, u, vw, uw)
else:
y = interpolate_bilinear_gpu(x, v, u, vw, uw)
return y,
def backward(self, indexes, grad_outputs):
return ResizeImagesGrad(
self.inputs[0].shape,
(self.out_H, self.out_W),
self.mode, self.align_corners).apply(grad_outputs)
class ResizeImagesGrad(function_node.FunctionNode):
def __init__(self, input_shape, output_shape, mode, align_corners):
self.out_H = output_shape[0]
self.out_W = output_shape[1]
self.input_shape = input_shape
assert mode in ['bilinear', 'nearest']
self.mode = mode
self.align_corners = align_corners
def check_type_forward(self, in_types):
type_check._argname(in_types, ('gy',))
gy_type = in_types[0]
type_check.expect(
gy_type.dtype == numpy.float32,
gy_type.ndim == 4
)
def forward(self, inputs):
gy, = inputs
xp = backend.get_array_module(gy)
_, C, H, W = self.input_shape
v, u, vw, uw = compute_indices_and_weights(
(self.out_H, self.out_W), (H, W),
self.mode, self.align_corners, xp)
v = v.astype(numpy.intp)
u = u.astype(numpy.intp)
vw = vw.astype(gy.dtype)
uw = uw.astype(gy.dtype)
# Meshgrid-like operation. Meshgrid can cause
# performance loss due to memory consumption.
# Note that numpy 1.9 doesn't support broadcast_to method.
v, u, vw, uw = xp.broadcast_arrays(
v[:, None], u[None, :], vw[:, None], uw[None, :])
if xp is numpy:
gx = interpolate_grad_bilinear_cpu(gy, v, u, vw, uw, H, W)
else:
gx = interpolate_grad_bilinear_gpu(gy, v, u, vw, uw, H, W)
return gx,
def backward(self, indexes, grad_outputs):
return ResizeImages(
(self.out_H, self.out_W),
self.mode, self.align_corners).apply(grad_outputs)
def resize_images(x, output_shape, *, mode='bilinear', align_corners=True):
"""Resize images to the given shape.
This function resizes 2D data to :obj:`output_shape`.
Notation: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` is the number of the input channels.
- :math:`h` and :math:`w` are the height and width of the input image,
respectively.
- :math:`h_O` and :math:`w_O` are the height and width of the output
image.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable of shape :math:`(n, c_I, h, w)`.
output_shape (tuple): This is a tuple of length 2 whose values are
:obj:`(h_O, w_O)`. Note that the order of height and width is
opposite of the one in OpenCV.
mode ({\'bilinear\', \'nearest\'}): Defines the sampling rule.
align_corners (bool): When this value is :obj:`True`,
the corners of the input are mapped to the corners of
the output. When :obj:`False`, the behavior is the same as
OpenCV.
Returns:
~chainer.Variable: Resized image whose shape is \
:math:`(n, c_I, h_O, w_O)`.
"""
return ResizeImages(output_shape, mode, align_corners).apply((x,))[0]
| 10,949
| 31.784431
| 76
|
py
|
chainer
|
chainer-master/chainer/functions/activation/softmax.py
|
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
import chainer.functions
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
_algorithm = cuda.libcudnn.CUDNN_SOFTMAX_ACCURATE
class Softmax(function_node.FunctionNode):
"""Softmax activation function."""
def __init__(self, axis=1):
self.axis = axis
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type, = in_types
type_check.expect(
x_type.dtype.kind == 'f',
-x_type.ndim <= self.axis < x_type.ndim,
)
def forward(self, x):
xp = backend.get_array_module(*x)
if xp is cuda.cupy and chainer.should_use_cudnn('>=auto'):
y = cudnn.softmax_forward(x[0], self.axis, _algorithm)
else:
y = x[0] - x[0].max(axis=self.axis, keepdims=True)
xp.exp(y, out=y)
y /= y.sum(axis=self.axis, keepdims=True)
self.retain_outputs((0,))
return y,
def backward(self, indexes, grad_outputs):
y = self.get_retained_outputs()[0]
gy, = grad_outputs
return _SoftmaxGrad(self.axis).apply((y, gy))
class _SoftmaxGrad(function_node.FunctionNode):
def __init__(self, axis):
self.axis = axis
def forward(self, inputs):
self.retain_inputs((0, 1))
y, gy = inputs
xp = backend.get_array_module(*y)
if xp is cuda.cupy and chainer.should_use_cudnn('>=auto'):
gx = cudnn.softmax_backward(y, gy, self.axis, _algorithm)
else:
gx = y * gy
sumdx = gx.sum(axis=self.axis, keepdims=True)
gx -= y * sumdx
return gx,
def backward(self, indexes, grad_outputs):
y, gy = self.get_retained_inputs()
ggx, = grad_outputs
gs = chainer.functions.sum(ggx * y, axis=self.axis, keepdims=True)
ga = ggx - chainer.functions.broadcast_to(gs, gy.shape)
ret = []
if 0 in indexes:
s = chainer.functions.broadcast_to(chainer.functions.sum(
y * gy, axis=self.axis, keepdims=True), gy.shape)
gy2 = ga * gy - ggx * s
ret.append(gy2)
if 1 in indexes:
ggy = ga * y
ret.append(ggy)
return tuple(ret)
def softmax(x, axis=1):
"""Softmax function.
This function computes its softmax along an axis. Let
:math:`c = (c_1, c_2, \\dots, c_D)` be the slice of ``x`` along with
the axis. For each slice :math:`c`, it computes the function :math:`f(c)`
defined as :math:`f(c)={\\exp(c) \\over \\sum_{d} \\exp(c_d)}`.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable.
A :math:`n`-dimensional (:math:`n \\geq 2`) float array.
axis (int): The axis along which the softmax is to be computed.
Returns:
~chainer.Variable: Output variable.
A :math:`n`-dimensional (:math:`n \\geq 2`) float array, which is the
same shape with x.
.. admonition:: Example
>>> x = np.array([[0, 1, 2], [0, 2, 4]], np.float32)
>>> x
array([[0., 1., 2.],
[0., 2., 4.]], dtype=float32)
>>> y = F.softmax(x, axis=1)
>>> y.array
array([[0.09003057, 0.24472848, 0.66524094],
[0.01587624, 0.11731043, 0.86681336]], dtype=float32)
>>> F.sum(y, axis=1).array
array([1., 1.], dtype=float32)
"""
return Softmax(axis=axis).apply((x,))[0]
| 3,591
| 29.965517
| 77
|
py
|
chainer
|
chainer-master/chainer/functions/activation/rrelu.py
|
import numpy as np
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import argument
from chainer.utils import type_check
def _kern():
return cuda.elementwise(
'T cond, T x, T slope', 'T y',
'y = cond >= 0 ? x : (T)(slope * x)', 'rrelu')
class RReLU(function_node.FunctionNode):
"""Randomized Leaky rectifier unit."""
def __init__(self, lower=1. / 8, upper=1. / 3, r=None):
if not 0.0 <= lower < 1.0:
raise ValueError('lower must be in the range [0, 1)')
if not 0.0 <= upper < 1.0:
raise ValueError('upper must be in the range [0, 1)')
if not lower < upper:
raise ValueError('lower must be less than upper')
self.lower = lower
self.upper = upper
self.r = r
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(x_type.dtype.kind == 'f')
if self.r is not None:
type_check.expect(x_type.dtype == self.r.dtype)
type_check.expect(x_type.shape == self.r.shape)
def forward_cpu(self, inputs):
x, = inputs
if chainer.config.train:
if self.r is None:
self.r = np.random.uniform(
self.lower, self.upper, x.shape
).astype(x.dtype, copy=False)
else:
self.r = np.full(
x.shape, (self.lower + self.upper) / 2, dtype=x.dtype)
y = np.where(x >= 0, x, x * self.r)
self.retain_outputs((0,))
return y,
def forward_gpu(self, inputs):
x, = inputs
xp = cuda.cupy
if chainer.config.train:
if self.r is None:
self.r = xp.random.uniform(
self.lower, self.upper, x.shape
).astype(x.dtype, copy=False)
else:
self.r = xp.full(
x.shape, (self.lower + self.upper) / 2, dtype=x.dtype)
y = _kern()(x, x, self.r)
self.retain_outputs((0,))
return y,
def backward(self, indexes, grad_outputs):
y = self.get_retained_outputs()[0].data
return _RReLUGrad(y, self.r).apply(grad_outputs)
class _RReLUGrad(function_node.FunctionNode):
def __init__(self, y, r):
self.r = r
self.y = y
def forward_cpu(self, inputs):
gy, = inputs
gy = np.where(self.y >= 0, gy, gy * self.r)
return gy,
def forward_gpu(self, inputs):
gy, = inputs
gy = _kern()(self.y, gy, self.r)
return gy,
def backward(self, indexes, grad_outputs):
return _RReLUGrad(self.y, self.r).apply(grad_outputs)
def rrelu(x, l=1. / 8, u=1. / 3, **kwargs):
"""rrelu(x, l=1. / 8, u=1. / 3, *, r=None, return_r=False)
Randomized Leaky Rectified Liner Unit function.
This function is expressed as
.. math:: f(x)=\\max(x, rx),
where :math:`r` is a random number sampled from a uniform distribution
:math:`U(l, u)`.
.. note::
The :math:`r` corresponds to :math:`a` in the original
paper (https://arxiv.org/pdf/1505.00853.pdf).
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
l (float): The lower bound of the uniform distribution.
u (float): The upper bound of the uniform distribution.
r (:ref:`ndarray` or None):
The r to be used for rrelu.
The shape and dtype must be the same as ``x[0]`` and should be on
the same device.
If ``r`` is not specified or set to ``None``, an ``r`` will be
generated randomly according to the given ``l`` and ``u``.
If ``r`` is specified, ``l`` and ``u`` will be ignored.
return_r (bool):
If ``True``, the r used for rrelu is returned altogether with
the output variable.
The returned ``r`` can latter be reused by passing it to ``r``
argument.
Returns:
~chainer.Variable or tuple:
When ``return_r`` is ``False`` (default), return the output
variable. Otherwise returnes the tuple of the output variable and
``r`` (:ref:`ndarray`). The ``r`` will be on the same device as
the input.
A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
.. admonition:: Example
>>> x = np.array([[-1, 0], [2, -3], [-2, 1]], np.float32)
>>> x
array([[-1., 0.],
[ 2., -3.],
[-2., 1.]], dtype=float32)
>>> F.rrelu(x).array # doctest: +SKIP
array([[-0.24850948, 0. ],
[ 2. , -0.50844127],
[-0.598535 , 1. ]], dtype=float32)
"""
r = None
return_r = False
if kwargs:
r, return_r = argument.parse_kwargs(
kwargs, ('r', r), ('return_r', r),
train='train argument is not supported anymore. '
'Use chainer.using_config')
func = RReLU(l, u, r)
out, = func.apply((x,))
r = func.r
if return_r:
return out, r
return out
| 5,240
| 31.351852
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/activation/selu.py
|
from chainer.functions.activation import elu
def selu(x,
alpha=1.6732632423543772848170429916717,
scale=1.0507009873554804934193349852946):
"""Scaled Exponential Linear Unit function.
For parameters :math:`\\alpha` and :math:`\\lambda`, it is expressed as
.. math::
f(x) = \\lambda \\left \\{ \\begin{array}{ll}
x & {\\rm if}~ x \\ge 0 \\\\
\\alpha (\\exp(x) - 1) & {\\rm if}~ x < 0,
\\end{array} \\right.
See: https://arxiv.org/abs/1706.02515
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
alpha (float): Parameter :math:`\\alpha`.
scale (float): Parameter :math:`\\lambda`.
Returns:
~chainer.Variable: Output variable. A
:math:`(s_1, s_2, ..., s_N)`-shaped float array.
"""
return scale * elu.elu(x, alpha=alpha)
| 933
| 29.129032
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/activation/softplus.py
|
import numpy
from chainer.backends import cuda
from chainer import function_node
import chainer.functions
from chainer import utils
from chainer.utils import type_check
class Softplus(function_node.FunctionNode):
"""Softplus function."""
def __init__(self, beta=1.0):
self.beta = float(beta)
self.beta_inv = float(1.0 / beta)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type, = in_types
type_check.expect(x_type.dtype.kind == 'f')
def forward_cpu(self, inputs):
self.retain_inputs((0,))
x = inputs[0]
# y = log(1 + exp(beta * x)) / beta
bx = self.beta * x
y = (numpy.fmax(bx, 0) +
numpy.log1p(numpy.exp(-numpy.fabs(bx)))) * self.beta_inv
return utils.force_array(y, x.dtype),
def forward_gpu(self, inputs):
self.retain_inputs((0,))
x = inputs[0]
y = cuda.elementwise(
'T x, T beta, T beta_inv', 'T y',
'''
T bx = beta * x;
y = (max(bx, (T)0) + log1p(exp(-fabs(bx)))) * beta_inv;
''',
'softplus_fwd'
)(x, self.beta, self.beta_inv)
return y,
def backward(self, indexes, grad_outputs):
x = self.get_retained_inputs()[0]
gy, = grad_outputs
return SoftplusGrad((self.beta,)).apply((x, gy))
class SoftplusGrad(function_node.FunctionNode):
"""Softplus gradient function."""
def __init__(self, inputs):
super(SoftplusGrad, self).__init__()
self.beta = inputs[0]
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, gy_type = in_types
type_check.expect(x_type.dtype.kind == 'f')
type_check.expect(gy_type.dtype.kind == 'f')
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
gx = (1 - 1 / (1 + numpy.exp(self.beta * x))) * gy
return utils.force_array(gx, x.dtype),
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
gx = cuda.elementwise(
'T x, T gy, T beta', 'T gx',
'gx = (1 - 1 / (1 + exp(beta * x))) * gy',
'softplus_bwd')(x, gy, self.beta)
return gx,
def backward(self, indexes, grad_outputs):
x, gy = self.get_retained_inputs()
ggx, = grad_outputs
e = chainer.functions.exp(self.beta * x)
gx = ggx * gy * self.beta * e / (1 + e) ** 2
ggy = SoftplusGrad((self.beta,)).apply((x, ggx))[0]
return gx, ggy
def softplus(x, beta=1.0):
"""Element-wise softplus function.
The softplus function is the smooth approximation of ReLU.
.. math:: f(x)=\\frac{1}{\\beta}\\log(1 + \\exp(\\beta x)),
where :math:`\\beta` is a parameter. The function becomes curved
and akin to ReLU as the :math:`\\beta` is increasing.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
beta (float): Parameter :math:`\\beta`.
Returns:
~chainer.Variable: Output variable. A
:math:`(s_1, s_2, ..., s_N)`-shaped float array.
.. admonition:: Example
>>> x = np.arange(-2, 3, 2).astype(np.float32)
>>> x
array([-2., 0., 2.], dtype=float32)
>>> F.softplus(x, beta=1.0).array
array([0.126928 , 0.6931472, 2.126928 ], dtype=float32)
"""
y, = Softplus(beta=beta).apply((x,))
return y
| 3,567
| 28.983193
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/activation/log_softmax.py
|
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
import chainer.functions
from chainer.utils import type_check
import chainerx
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
_algorithm = cuda.cuda.cudnn.CUDNN_SOFTMAX_LOG # type: ignore
def logsumexp(x, axis):
xp = backend.get_array_module(x)
m = x.max(axis=axis, keepdims=True)
y = x - m
xp.exp(y, out=y)
s = y.sum(axis=axis, keepdims=True)
xp.log(s, out=s)
m += s
return m
def _log_softmax(x, axis=1):
if chainer.should_use_cudnn('>=auto'):
xp = backend.get_array_module(x)
if xp is cuda.cupy:
return cudnn.softmax_forward(x, axis, _algorithm)
log_z = logsumexp(x, axis)
y = x - log_z
return y
class LogSoftmax(function_node.FunctionNode):
"""Log-softmax activation function."""
def __init__(self, axis=1):
self.axis = axis
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type, = in_types
type_check.expect(
x_type.dtype.kind == 'f',
-x_type.ndim <= self.axis < x_type.ndim,
)
def forward_chainerx(self, xs):
return chainerx.log_softmax(xs[0], axis=self.axis),
def forward(self, xs):
y = _log_softmax(xs[0], axis=self.axis)
self._x_xp = backend.get_array_module(*xs)
self._x_shape = xs[0].shape
self._x_dtype = xs[0].dtype
self.retain_outputs((0,))
return y,
def backward(self, indexes, gy):
y = self.get_retained_outputs()[0]
return LogSoftmaxGrad(
self._x_xp, self._x_shape, self._x_dtype, self.axis).apply(
(y, gy[0]))
class LogSoftmaxGrad(function_node.FunctionNode):
def __init__(self, x_xp, x_shape, x_dtype, axis):
self._x_xp = x_xp
self._x_shape = x_shape
self._x_dtype = x_dtype
self.axis = axis
def forward(self, inputs):
self.retain_inputs((0, 1))
y, gy = inputs
xp = self._x_xp
if xp is cuda.cupy and chainer.should_use_cudnn('>=auto'):
gx = cudnn.softmax_backward(y, gy, self.axis, _algorithm)
else:
gx = gy - xp.exp(y) * gy.sum(axis=self.axis, keepdims=True)
return gx,
def backward(self, indexes, ggx):
y, gy = self.get_retained_inputs()
ret = []
exp_y = chainer.functions.exp(y)
if 0 in indexes:
gy_sum = chainer.functions.sum(gy, self.axis, True)
gy_sum = chainer.functions.broadcast_to(gy_sum, gy.shape)
g0 = -ggx[0] * exp_y * gy_sum
ret.append(g0)
if 1 in indexes:
# TODO(Kenta Oono): implement it with double-backpropable F.matmul
a = chainer.functions.sum(ggx[0] * exp_y, self.axis, True)
a = chainer.functions.broadcast_to(a, gy.shape)
g1 = ggx[0] - a
ret.append(g1)
return ret
def log_softmax(x, axis=1):
"""Channel-wise log-softmax function.
This function computes its logarithm of softmax along the second axis.
Let :math:`c = (c_1, c_2, \\dots, c_D)` be the slice of ``x`` along with
the second axis. For each slice :math:`c`, it computes the logarithm of
the function :math:`f(c)` defined as
.. math::
f(c) = {\\exp(c) \\over \\sum_{d} \\exp(c_d)}.
This method is theoretically equivalent to ``log(softmax(x))`` but is more
stable.
.. note::
``log(softmax(x))`` may cause underflow when ``x`` is too small,
because ``softmax(x)`` may returns ``0``.
``log_softmax`` method is more stable.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable.
A :math:`n`-dimensional (:math:`n \\geq 2`) float array.
axis (int): The axis along which the softmax is to be computed.
Returns:
~chainer.Variable: Output variable.
A :math:`n`-dimensional (:math:`n \\geq 2`) float array, which is the
same shape with x.
.. seealso:: :func:`~chainer.functions.softmax`
.. admonition:: Example
>>> x = np.array([[0, 1, 2], [0, 2, 4]], np.float32)
>>> x
array([[0., 1., 2.],
[0., 2., 4.]], dtype=float32)
>>> F.log_softmax(x).array
array([[-2.407606 , -1.4076059 , -0.4076059 ],
[-4.1429315 , -2.1429315 , -0.14293146]], dtype=float32)
>>> np.allclose(F.log_softmax(x).data, F.log(F.softmax(x)).data)
True
"""
return LogSoftmax(axis=axis).apply((x,))[0]
| 4,633
| 29.688742
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/activation/maxout.py
|
from chainer.functions.array import reshape
from chainer.functions.math import minmax
from chainer.utils import type_check
def maxout(x, pool_size, axis=1):
"""Maxout activation function.
It accepts an input tensor ``x``, reshapes the ``axis`` dimension
(say the size being ``M * pool_size``) into two dimensions
``(M, pool_size)``, and takes maximum along the ``axis`` dimension.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`n`-dimensional (:math:`n \\ge` ``axis``)
float array. In general, its first dimension is assumed to be the
*minibatch dimension*. The other dimensions are treated as one
concatenated dimension.
pool_size (int):
The size used for downsampling of pooling layer.
axis (int):
The ``axis`` dimension to be reshaped. The size of ``axis``
dimension should be ``M * pool_size``.
Returns:
~chainer.Variable:
Output variable. The shape of the output is same as ``x`` except
that ``axis`` dimension is transformed from ``M * pool_size`` to
``M``.
.. seealso:: :class:`~chainer.links.Maxout`
.. admonition:: Example
Typically, ``x`` is the output of a linear layer or a convolution
layer. The following is the example where we use :func:`maxout` in
combination with a Linear link.
>>> in_size, out_size, pool_size = 10, 10, 10
>>> bias = np.arange(out_size * pool_size).astype(np.float32)
>>> l = L.Linear(in_size, out_size * pool_size, initial_bias=bias)
>>> x = np.zeros((1, in_size), np.float32) # prepare data
>>> x = l(x)
>>> y = F.maxout(x, pool_size)
>>> x.shape
(1, 100)
>>> y.shape
(1, 10)
>>> x.reshape((out_size, pool_size)).array
array([[ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.],
[10., 11., 12., 13., 14., 15., 16., 17., 18., 19.],
[20., 21., 22., 23., 24., 25., 26., 27., 28., 29.],
[30., 31., 32., 33., 34., 35., 36., 37., 38., 39.],
[40., 41., 42., 43., 44., 45., 46., 47., 48., 49.],
[50., 51., 52., 53., 54., 55., 56., 57., 58., 59.],
[60., 61., 62., 63., 64., 65., 66., 67., 68., 69.],
[70., 71., 72., 73., 74., 75., 76., 77., 78., 79.],
[80., 81., 82., 83., 84., 85., 86., 87., 88., 89.],
[90., 91., 92., 93., 94., 95., 96., 97., 98., 99.]], \
dtype=float32)
>>> y.array
array([[ 9., 19., 29., 39., 49., 59., 69., 79., 89., 99.]], \
dtype=float32)
"""
if pool_size <= 0:
raise ValueError('pool_size must be a positive integer.')
x_shape = x.shape
if x_shape[axis] % pool_size != 0:
expect = 'x.shape[axis] % pool_size == 0'
actual = 'x.shape[axis]={}, pool_size={}'.format(
x_shape[axis], pool_size)
msg = 'axis dimension must be divided by pool_size'
raise type_check.InvalidType(expect, actual, msg)
shape = (x_shape[:axis] +
(x_shape[axis] // pool_size, pool_size) +
x_shape[axis + 1:])
x = reshape.reshape(x, shape)
return minmax.max(x, axis=axis + 1)
| 3,309
| 38.879518
| 77
|
py
|
chainer
|
chainer-master/chainer/functions/activation/leaky_relu.py
|
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import function_node
from chainer.utils import type_check
_kern = None
def _get_kern():
global _kern
if _kern is None:
_kern = cuda.elementwise(
'T cond, T x, T slope', 'T y',
'y = cond <= 0 ? (T)(slope * x) : x', 'lrelu')
return _kern
class LeakyReLU(function_node.FunctionNode):
"""Leaky rectifier unit."""
def __init__(self, slope=0.2):
self.slope = slope
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type, = in_types
type_check.expect(x_type.dtype.kind == 'f')
def forward_cpu(self, inputs):
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs)):
return self.forward_ideep(inputs)
x, = inputs
y = x.copy()
y[x <= 0] *= self.slope
if self.slope >= 0:
self.retain_outputs((0,))
else:
self.retain_inputs((0,))
return y,
def forward_ideep(self, inputs):
x, = inputs
y = intel64.ideep.relu.Forward(
intel64.ideep.array(x), self.slope)
if self.slope >= 0:
self.retain_outputs((0,))
else:
self.retain_inputs((0,))
return y,
def forward_gpu(self, inputs):
x, = inputs
y = _get_kern()(x, x, self.slope)
if self.slope >= 0:
self.retain_outputs((0,))
else:
self.retain_inputs((0,))
return y,
def backward(self, indexes, grad_outputs):
if self.slope >= 0:
cond = self.get_retained_outputs()[0].array
else:
cond = self.get_retained_inputs()[0].array
return _LeakyReLUGrad(cond, self.slope).apply(grad_outputs)
class _LeakyReLUGrad(function_node.FunctionNode):
def __init__(self, cond, slope):
self.cond = cond
self.slope = slope
def forward_cpu(self, inputs):
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs)):
return self.forward_ideep(inputs)
gy, = inputs
gy = gy.copy()
gy[self.cond <= 0] *= self.slope
return gy,
def forward_ideep(self, inputs):
gy, = inputs
gy = intel64.ideep.relu.Backward(
intel64.ideep.array(self.cond),
intel64.ideep.array(gy), self.slope)
return gy,
def forward_gpu(self, inputs):
gy, = inputs
gy = _get_kern()(self.cond, gy, self.slope)
return gy,
def backward(self, indexes, grad_outputs):
return _LeakyReLUGrad(self.cond, self.slope).apply(grad_outputs)
def leaky_relu(x, slope=0.2):
"""Leaky Rectified Linear Unit function.
This function is expressed as
.. math::
f(x) = \\left \\{ \\begin{array}{ll}
x & {\\rm if}~ x \\ge 0 \\\\
ax & {\\rm if}~ x < 0,
\\end{array} \\right.
where :math:`a` is a configurable slope value.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
slope (float): Slope value :math:`a`.
Returns:
~chainer.Variable: Output variable. A
:math:`(s_1, s_2, ..., s_N)`-shaped float array.
.. admonition:: Example
>>> x = np.array([[-1, 0], [2, -3], [-2, 1]], np.float32)
>>> x
array([[-1., 0.],
[ 2., -3.],
[-2., 1.]], dtype=float32)
>>> F.leaky_relu(x, slope=0.2).array
array([[-0.2, 0. ],
[ 2. , -0.6],
[-0.4, 1. ]], dtype=float32)
"""
return LeakyReLU(slope).apply((x,))[0]
| 3,792
| 25.900709
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/activation/hard_sigmoid.py
|
import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class HardSigmoid(function_node.FunctionNode):
"""Hard-sigmoid function."""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type, = in_types
type_check.expect(x_type.dtype.kind == 'f')
def forward_cpu(self, inputs):
x = inputs[0]
y = numpy.clip(x * 0.2 + 0.5, 0.0, 1.0)
self.retain_inputs((0,))
return utils.force_array(y, x.dtype),
def forward_gpu(self, inputs):
x = inputs[0]
self.retain_inputs((0,))
return cuda.elementwise(
'T x', 'T y',
'y = min(1.0, max(0.0, x * 0.2 + 0.5))',
'hard_sigmoid_fwd'
)(x),
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
return HardSigmoidGrad(x.data).apply(grad_outputs)
class HardSigmoidGrad(function_node.FunctionNode):
"""Hard-sigmoid gradient function."""
def __init__(self, x):
self.x = x
def check_type_forward(self, in_types):
type_check._argname(in_types, ('gy',))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == self.x.dtype
)
def forward_cpu(self, inputs):
gy, = inputs
gx = ((-2.5 < self.x) & (self.x < 2.5)) * gy * 0.2
return utils.force_array(gx, self.x.dtype),
def forward_gpu(self, inputs):
gy, = inputs
return cuda.elementwise(
'T x, T g', 'T gx',
'gx = fabs(x) < 2.5 ? 0.2 * g : 0',
'hard_sigmoid_bwd'
)(self.x, gy),
def backward(self, indexes, grad_outputs):
return HardSigmoidGrad(self.x).apply(grad_outputs)
def hard_sigmoid(x):
"""Element-wise hard-sigmoid function.
This function is defined as
.. math::
f(x) = \\left \\{ \\begin{array}{ll}
0 & {\\rm if}~ x < -2.5 \\\\
0.2 x + 0.5 & {\\rm if}~ -2.5 < x < 2.5 \\\\
1 & {\\rm if}~ 2.5 < x.
\\end{array} \\right.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
Returns:
~chainer.Variable: Output variable. A
:math:`(s_1, s_2, ..., s_N)`-shaped float array.
.. admonition:: Example
It maps the input values into the range of :math:`[0, 1]`.
>>> x = np.array([-2.6, -1, 0, 1, 2.6])
>>> x
array([-2.6, -1. , 0. , 1. , 2.6])
>>> F.hard_sigmoid(x).array
array([0. , 0.3, 0.5, 0.7, 1. ])
"""
return HardSigmoid().apply((x,))[0]
| 2,749
| 25.442308
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/activation/elu.py
|
import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class ELU(function_node.FunctionNode):
"""Exponential Linear Unit."""
def __init__(self, alpha=1.0):
self.alpha = float(alpha)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type, = in_types
type_check.expect(x_type.dtype.kind == 'f')
def forward_cpu(self, inputs):
if self.alpha < 0:
self.retain_inputs((0,))
x, = inputs
y = x.copy()
negzero_indices = y <= 0
y[negzero_indices] = self.alpha * numpy.expm1(y[negzero_indices])
self.retain_outputs((0,))
return y,
def forward_gpu(self, inputs):
if self.alpha < 0:
self.retain_inputs((0,))
x, = inputs
y = cuda.elementwise(
'T x, T alpha', 'T y',
'y = x > 0 ? x : (T)(alpha * expm1(x))',
'elu_fwd')(x, self.alpha)
self.retain_outputs((0,))
return y,
def backward(self, indexes, grad_outputs):
y, = self.get_retained_outputs()
if self.alpha < 0:
cond, = self.get_retained_inputs()
else:
cond = y
gy, = grad_outputs
return ELUGrad(self.alpha, cond.array).apply((y,))[0] * gy,
class ELUGrad(function_node.FunctionNode):
"""Exponential Linear Unit gradient function."""
def __init__(self, alpha, cond):
self.alpha = alpha
self.cond = cond
def forward_cpu(self, inputs):
y, = inputs
gx = utils.force_array(y + y.dtype.type(self.alpha))
gx[self.cond > 0] = 1
return gx,
def forward_gpu(self, inputs):
y, = inputs
gx = cuda.elementwise(
'T y, T alpha, T cond', 'T gx',
'gx = cond > 0 ? (T)1 : (T)(y + alpha)',
'elu_bwd')(y, self.alpha, self.cond)
return gx,
def backward(self, indexes, grad_outputs):
ggx, = grad_outputs
gy2 = ggx * (self.cond <= 0)
return gy2,
def elu(x, alpha=1.0):
"""Exponential Linear Unit function.
For a parameter :math:`\\alpha`, it is expressed as
.. math::
f(x) = \\left \\{ \\begin{array}{ll}
x & {\\rm if}~ x \\ge 0 \\\\
\\alpha (\\exp(x) - 1) & {\\rm if}~ x < 0,
\\end{array} \\right.
See: https://arxiv.org/abs/1511.07289
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
alpha (float): Parameter :math:`\\alpha`. Default is 1.0.
Returns:
~chainer.Variable: Output variable. A
:math:`(s_1, s_2, ..., s_N)`-shaped float array.
.. admonition:: Example
>>> x = np.array([[-1, 0], [2, -3]], np.float32)
>>> x
array([[-1., 0.],
[ 2., -3.]], dtype=float32)
>>> y = F.elu(x, alpha=1.)
>>> y.array
array([[-0.63212055, 0. ],
[ 2. , -0.95021296]], dtype=float32)
"""
return ELU(alpha=alpha).apply((x,))[0]
| 3,179
| 26.413793
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/activation/swish.py
|
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
def _get_extended_shape(beta, x):
return (1,) + beta.shape + (1,) * (x.ndim - beta.ndim - 1)
def _get_reduction_axes(beta, x):
return (0,) + tuple(range(1 + beta.ndim, x.ndim))
def _sigmoid(x):
half = x.dtype.type(0.5)
return numpy.tanh(x * half) * half + half
_preamble = '''
template <typename T> __device__ T sigmoid(T x) {
const T half = 0.5;
return tanh(x * half) * half + half;
}
'''
class Swish(function_node.FunctionNode):
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 'beta'))
x_type, beta_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
beta_type.dtype == x_type.dtype,
beta_type.ndim <= x_type.ndim - 1,
beta_type.shape == x_type.shape[1:1 +
type_check.eval(beta_type.ndim)]
)
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x, beta = inputs
beta = beta.reshape(_get_extended_shape(beta, x))
y = x * _sigmoid(beta * x)
return y,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x, beta = inputs
beta = beta.reshape(_get_extended_shape(beta, x))
# Eliminating intermediate variable `bx` somehow degrades the
# precision.
y = cuda.elementwise(
'T x, T beta', 'T y',
'''
T bx = beta * x;
y = x * sigmoid(bx);
''',
'swish_fwd', preamble=_preamble
)(x, beta)
return y,
def backward(self, indexes, grad_outputs):
x, beta = self.get_retained_inputs()
gy, = grad_outputs
shape = _get_extended_shape(beta, x)
reduction_axes = _get_reduction_axes(beta, x)
return SwishGrad(shape, reduction_axes).apply((x, beta, gy))
class SwishGrad(function_node.FunctionNode):
def __init__(self, extended_shape, reduction_axes):
super(SwishGrad, self).__init__()
self.extended_shape = extended_shape
self.reduction_axes = reduction_axes
def forward_cpu(self, inputs):
self.retain_inputs((0, 1, 2))
x, beta, gy = inputs
beta = beta.reshape(self.extended_shape)
sig = _sigmoid(beta * x)
y = x * sig
by = beta * y
one = x.dtype.type(1)
gx = gy * (by + sig * (one - by))
gb = gy * y * (x - y)
gb = utils.force_array(gb.sum(axis=self.reduction_axes))
return gx, gb
def forward_gpu(self, inputs):
self.retain_inputs((0, 1, 2))
x, beta, gy = inputs
beta = beta.reshape(self.extended_shape)
gx, gb = cuda.elementwise(
'T x, T beta, T gy', 'T gx, T gb',
'''
T bx = beta * x;
T sig = sigmoid(bx);
T y = x * sig;
T by = beta * y;
gx = gy * (by + sig * (1 - by));
gb = gy * y * (x - y);
''',
'swish_bwd', preamble=_preamble
)(x, beta, gy)
gb = utils.force_array(gb.sum(axis=self.reduction_axes))
return gx, gb
def backward(self, indexes, grad_outputs):
x, beta, gy = self.get_retained_inputs()
beta = chainer.functions.broadcast_to(
beta.reshape(self.extended_shape), gy.shape)
ggx, ggb = grad_outputs
ggb = chainer.functions.broadcast_to(
ggb.reshape(self.extended_shape), gy.shape)
sig = chainer.functions.sigmoid(beta * x)
y = x * sig
by = beta * y
one_minus_sig = 1 - sig
sig_one_minus_by = sig * (1 - by)
y_x_minus_y = y * (x - y)
x_minus_2y = x - 2 * y
ret = []
common = gy * y * (2 + beta * x_minus_2y) * one_minus_sig
if 0 in indexes:
gx = ggx * gy * beta * one_minus_sig * \
(by + 2 * sig_one_minus_by) + ggb * common
ret.append(chainer.functions.cast(gx, x.dtype))
if 1 in indexes:
gb = ggx * common + ggb * gy * y_x_minus_y * x_minus_2y
gb = chainer.functions.sum(gb, axis=self.reduction_axes)
ret.append(chainer.functions.cast(gb, beta.dtype))
if 2 in indexes:
ggy = ggx * (by + sig_one_minus_by) + ggb * y_x_minus_y
ret.append(chainer.functions.cast(ggy, gy.dtype))
return ret
def swish(x, beta):
"""Swish activation function.
.. math:: f(x, \\beta) = x \\cdot \\sigma(\\beta x),
where :math:`\\sigma(\\cdot)` is the sigmoid function. It has the
following properties:
.. math::
f(x, 0) &= \\frac{x}{2}, \\\\
\\lim_{\\beta \\to \\infty} f(x, \\beta) &= \\max(0, x).
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable of
shape :math:`(s_B, s_1, s_2, ..., s_N)`, where :math:`s_B` is
assumed to be the *minibatch dimension*.
beta (:class:`~chainer.Variable` or :ref:`ndarray`): Parameter variable
:math:`\\beta` of shape :math:`(s_1, s_2, ..., s_M)`, where
:math:`M` is an arbitrary integer between
:math:`0 \\leq M \\leq N`. The number of dimensions of ``beta``
will be matched with ``x`` by reshaping it as
:math:`(1, s_1, ..., s_M, 1, ... 1)`, then ``beta`` and ``x``
are multiplied together in an element-wise manner.
Returns:
~chainer.Variable: Output variable of the same shape as ``x``.
.. warning::
:math:`\\beta` is a trainable parameter in the original paper
(https://arxiv.org/abs/1710.05941). To train :math:`\\beta`, use
:class:`chainer.links.Swish` instead.
.. seealso::
:class:`chainer.links.Swish` to manage the model parameter ``beta``.
"""
y, = Swish().apply((x, beta))
return y
| 6,024
| 30.217617
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/activation/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/functions/activation/tanh.py
|
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
import chainerx
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
_mode = cuda.libcudnn.CUDNN_ACTIVATION_TANH
class Tanh(function_node.FunctionNode):
"""Hyperbolic tangent function."""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_chainerx(self, x):
return chainerx.tanh(x[0]),
def forward_cpu(self, x):
y = utils.force_array(numpy.tanh(x[0]))
self.retain_outputs((0,))
self._use_cudnn = False
return y,
def forward_gpu(self, x):
if chainer.should_use_cudnn('==always') and x[0].flags.c_contiguous:
y = cudnn.activation_forward(x[0], _mode)
self.retain_inputs((0,))
self._use_cudnn = True
else:
y = cuda.cupy.empty_like(x[0])
cuda.cupy.tanh(x[0], out=y)
self._use_cudnn = False
self.retain_outputs((0,))
return y,
def backward(self, indexes, grad_outputs):
if self._use_cudnn:
x = self.get_retained_inputs()[0].data
else:
x = None
y = self.get_retained_outputs()[0]
gy = grad_outputs[0]
return TanhGrad(x).apply((y, gy))
class TanhGrad(function_node.FunctionNode):
def __init__(self, x):
super(TanhGrad, self).__init__()
# The original input `x` is only required for cuDNN.
# If it is None, this class does not use cuDNN.
# Note that x must be c-contiguous and it is checked
# in Tanh.forward_gpu.
self.x = x
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
y, gy = inputs
one = y.dtype.type(1)
return utils.force_array(gy * (one - y * y)),
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
y, gy = inputs
if (chainer.should_use_cudnn('==always') and
self.x is not None and gy.flags.c_contiguous):
gx = cudnn.activation_backward(self.x, y, gy, _mode)
else:
gx = cuda.elementwise(
'T y, T gy', 'T gx',
'gx = gy * (1 - y * y)',
'tanh_bwd')(y, gy)
return gx,
def backward(self, indexes, grad_outputs):
y, gy = self.get_retained_inputs()
ggx = grad_outputs[0]
y_mul_ggx = y * ggx
grad_y = -2 * gy * y_mul_ggx
ggy = ggx - y * y_mul_ggx
return grad_y, ggy
def tanh(x):
"""Elementwise hyperbolic tangent function.
.. math:: f(x)=\\tanh(x).
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
Returns:
~chainer.Variable: Output variable. A
:math:`(s_1, s_2, ..., s_N)`-shaped float array.
.. admonition:: Example
>>> x = np.arange(-1, 4, 2).astype(np.float32)
>>> x
array([-1., 1., 3.], dtype=float32)
>>> F.tanh(x).array
array([-0.7615942, 0.7615942, 0.9950548], dtype=float32)
"""
return Tanh().apply((x,))[0]
| 3,299
| 27.205128
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/activation/crelu.py
|
import six
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class CReLU(function_node.FunctionNode):
"""Concatenated Rectified Linear Unit."""
def __init__(self, axis=1):
if not isinstance(axis, six.integer_types):
raise TypeError('axis must be an integer value')
self.axis = axis
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].ndim > self.axis,
in_types[0].ndim >= -self.axis
)
def get_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[self.axis] *= 2
return tuple(output_shape)
def forward(self, inputs):
x, = inputs
xp = backend.get_array_module(x)
y = xp.empty(self.get_output_shape(x.shape), dtype=x.dtype)
y_former, y_latter = xp.split(y, 2, axis=self.axis)
zero = x.dtype.type(0)
xp.maximum(zero, x, out=y_former)
xp.maximum(zero, -x, out=y_latter)
self.retain_inputs((0,))
return y,
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
gy, = grad_outputs
gy_former, gy_latter = chainer.functions.split_axis(
gy, 2, axis=self.axis)
return gy_former * (x.data > 0) - gy_latter * (x.data < 0),
def crelu(x, axis=1):
"""Concatenated Rectified Linear Unit function.
This function is expressed as follows
.. math:: f(x) = (\\max(0, x), \\max(0, -x)).
Here, two output values are concatenated along an axis.
See: https://arxiv.org/abs/1603.05201
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
axis (int): Axis that the output values are concatenated along.
Default is 1.
Returns:
~chainer.Variable: Output variable of concatenated array.
If the axis is 1, A :math:`(s_1, s_2 \\times 2, ..., s_N)`-shaped float
array.
.. admonition:: Example
>>> x = np.array([[-1, 0], [2, -3]], np.float32)
>>> x
array([[-1., 0.],
[ 2., -3.]], dtype=float32)
>>> y = F.crelu(x, axis=1)
>>> y.array
array([[0., 0., 1., 0.],
[2., 0., 0., 3.]], dtype=float32)
"""
return CReLU(axis=axis).apply((x,))[0]
| 2,525
| 28.717647
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/activation/relu.py
|
import numpy
import chainer
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
import chainerx
if cuda.available:
_relu_grad2_kernel = cuda.elementwise(
'T y, T gy', 'T gx',
'gx = y > 0 ? gy : (T)0', 'relu_bwd')
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
_mode = cuda.cuda.cudnn.CUDNN_ACTIVATION_RELU # type: ignore
class ReLU(function_node.FunctionNode):
"""Rectified Linear Unit."""
is_elementwise = True
_use_cudnn = False
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_chainerx(self, inputs):
x, = inputs
return chainerx.maximum(x, 0),
def forward_cpu(self, inputs):
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs)):
return self.forward_ideep(inputs)
x, = inputs
y = numpy.maximum(x, 0, dtype=x.dtype)
self.retain_outputs((0,))
return utils.force_array(y),
def forward_ideep(self, inputs):
x, = inputs
y = intel64.ideep.relu.Forward(intel64.ideep.array(x))
self.retain_outputs((0,))
return y,
def forward_gpu(self, inputs):
x, = inputs
if chainer.should_use_cudnn('>=auto') and x.flags.c_contiguous:
self._use_cudnn = True
y = cudnn.activation_forward(x, _mode)
else:
y = cuda.cupy.maximum(x, 0, dtype=x.dtype)
self.retain_outputs((0,))
return y,
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
y, = self.get_retained_outputs()
y_arr = y.raw_array
if self._use_cudnn and chainer.should_use_cudnn('>=auto'):
# cuDNN implementation
return ReLUGradCudnn(y_arr).apply((gy,))
# Generic implementation
return ReLUGrad2(y_arr).apply((gy,))
class ReLUGrad2(function_node.FunctionNode):
"""Computes the gradient of the ReLU function.
This function takes 2 variables b and c, and
computes f(b, c) = sign(b) * c with backpropagation
where operations are done in elementwise manner
and sign(x) = 1 when x > 0 is positive and 0 otherwise.
As the gradient of f with respect to b is 0,
we do not backpropagate errors toward b for computational efficiency.
"""
def __init__(self, b):
super(ReLUGrad2, self).__init__()
self.b = b
def forward_cpu(self, inputs):
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs)):
return self.forward_ideep(inputs)
gy, = inputs
gx = gy * (self.b > 0)
return utils.force_array(gx, dtype=gy.dtype),
def forward_ideep(self, inputs):
gy, = inputs
gx = intel64.ideep.relu.Backward(
intel64.ideep.array(self.b),
intel64.ideep.array(gy))
return gx,
def forward_gpu(self, inputs):
gx = _relu_grad2_kernel(self.b, inputs[0])
return gx,
def backward(self, indexes, grad_outputs):
return ReLUGrad2(self.b).apply(grad_outputs)
class ReLUGradCudnn(function_node.FunctionNode):
"""Computes the gradient of the ReLU function.
This function takes 3 variables a, b, and c, and
computes f(a, b, c) = sign(b) * c with backpropagation
where operations are dones in elementwise manner
and sign(x) = 1 if x > 0 is positive and 0 otherwise.
As the gradient of f with respect to a and b are 0,
we do not backpropagate errors toward them for computational efficiency.
"""
is_elementwise = True
def __init__(self, y):
super(ReLUGradCudnn, self).__init__()
self.y = y
def forward(self, inputs):
gy, = inputs
return cudnn.activation_backward(self.y, self.y, gy, _mode),
def backward(self, indexes, grad_outputs):
return ReLUGrad2(self.y).apply(grad_outputs)
def relu(x):
"""Rectified Linear Unit function.
.. math:: f(x)=\\max(0, x).
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
Returns:
~chainer.Variable: Output variable. A
:math:`(s_1, s_2, ..., s_N)`-shaped float array.
.. admonition:: Example
>>> x = np.array([[-1, 0], [2, -3], [-2, 1]], np.float32)
>>> np.any(x < 0)
True
>>> y = F.relu(x)
>>> np.any(y.array < 0)
False
>>> y.shape
(3, 2)
"""
y, = ReLU().apply((x,))
return y
| 4,736
| 27.196429
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/activation/prelu.py
|
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
def _fwd_kern():
return cuda.elementwise(
'T x, T cond, T W', 'T y',
'y = cond >= 0 ? x : (T)(x * W)', 'prelu')
def _get_extended_shape(W, x):
return (1,) + W.shape + (1,) * (x.ndim - W.ndim - 1)
def _get_reduce_axes(W, x):
return (0,) + tuple(range(1 + W.ndim, x.ndim))
class PReLUFunction(function_node.FunctionNode):
"""Parametric Rectified Linear Unit function."""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 'W'))
x_type, W_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
W_type.dtype == x_type.dtype,
x_type.ndim >= W_type.ndim + 1,
x_type.shape[1:1 + type_check.eval(W_type.ndim)] == W_type.shape
)
def forward_cpu(self, inputs):
x, W = inputs
y = x.copy()
masked = numpy.ma.masked_greater_equal(y, 0, copy=False)
shape = _get_extended_shape(W, y)
masked *= W.reshape(shape)
self.retain_inputs((0, 1))
return y,
def forward_gpu(self, inputs):
x, W = inputs
shape = _get_extended_shape(W, x)
y = _fwd_kern()(x, x, W.reshape(shape))
self.retain_inputs((0, 1))
return y,
def backward(self, indexes, grad_outputs):
x, W = self.get_retained_inputs()
gy, = grad_outputs
return PReLUFunctionGrad(
x.data, _get_reduce_axes(W, x),
_get_extended_shape(W, x)).apply((x, W, gy))
class PReLUFunctionGrad(function_node.FunctionNode):
"""Parametric Rectified Linear Unit gradient function."""
def __init__(self, cond, reduce_axes, extended_shape):
self.cond = cond
self.reduce_axes = reduce_axes
self.extended_shape = extended_shape
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 'W', 'gy'))
x_type, W_type, gy_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
W_type.dtype == x_type.dtype,
gy_type.dtype == x_type.dtype,
x_type.ndim >= W_type.ndim + 1,
x_type.shape[1:1 + type_check.eval(W_type.ndim)] == W_type.shape,
gy_type.shape == x_type.shape
)
def forward_cpu(self, inputs):
x, W, gy = inputs
mask = self.cond >= 0
masked = numpy.where(mask, 0, x * gy)
if self.reduce_axes is None:
# Reached from backward() of PReLUFunctionGrad i.e. this class, to
# compute higher order derivatives
gW = masked
else:
# Reached from backward() of PReLUFunction, to compute first
# derivatives
gW = masked.sum(axis=self.reduce_axes)
if numpy.isscalar(gW):
gW = numpy.array(gW)
gx = gy.copy()
masked = numpy.ma.array(gx, mask=mask)
masked *= W.reshape(self.extended_shape)
self.retain_inputs((0, 1, 2))
return gx, gW
def forward_gpu(self, inputs):
x, W, gy = inputs
masked = cuda.elementwise(
'T x, T cond, T gy', 'T masked',
'masked = cond >= 0 ? (T)0 : (T)(x * gy)',
'prelu_masked')(x, self.cond, gy)
if self.reduce_axes is None:
gW = masked.copy()
else:
gW = masked.sum(axis=self.reduce_axes)
gx = masked # reuse buffer
_fwd_kern()(gy, self.cond, W.reshape(self.extended_shape), gx)
self.retain_inputs((0, 1, 2))
return gx, gW
def backward(self, indexes, grad_outputs):
x, W, gy = self.get_retained_inputs()
ggx, ggW = grad_outputs
ggW = chainer.functions.broadcast_to(
chainer.functions.reshape(ggW, self.extended_shape), x.shape)
ggW *= self.cond < 0
gxgy, gxW = (
PReLUFunctionGrad(self.cond, None, self.extended_shape)
.apply((gy, W, ggx))
)
ret = []
if 0 in indexes:
ret.append(gy * ggW)
if 1 in indexes:
ret.append(chainer.functions.sum(gxW, axis=self.reduce_axes))
if 2 in indexes:
ret.append(x * ggW + gxgy)
return ret
def prelu(x, W):
"""Parametric ReLU function.
It accepts two arguments: an input ``x`` and a weight array ``W``
and computes the output as
.. math::
PReLU(x_i) = \\begin{cases}
x_i & (x_i>0) \\\\ W_i * x_i & (otherwise)\\end{cases}
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Its first axis is assumed to be the minibatch dimension.
W (:class:`~chainer.Variable` or :ref:`ndarray`): Weight variable.
Returns:
~chainer.Variable: Output variable
.. admonition:: Example
>>> x = np.arange(-3, 3, dtype=np.float32).reshape((2, 3))
>>> x
array([[-3., -2., -1.],
[ 0., 1., 2.]], dtype=float32)
>>> W = np.array([0.01, 0.1, 1], dtype=np.float32)
>>> W
array([0.01, 0.1 , 1. ], dtype=float32)
>>> F.prelu(x, W)
variable([[-0.03, -0.2 , -1. ],
[ 0. , 1. , 2. ]])
.. note::
When the PReLU function is combined with two-dimensional convolution,
the elements of parameter :math:`W` are typically shared across the
same filter of different pixels. In order to support such usage,
this function supports the shape of parameter array that indicates
leading dimensions of input arrays except the batch dimension.
For example, if :math:`W` has the shape of :math:`(2, 3, 4)`,
:math:`x` must have the shape of :math:`(B, 2, 3, 4, S_1, ..., S_N)`
where :math:`B` is the batch size and the number of trailing
:math:`S`'s :math:`N` is an arbitrary non-negative integer.
.. warning::
:math:`W` is a trainable parameter in the original paper
(https://arxiv.org/abs/1502.01852). To train :math:`W`, use
:class:`chainer.links.PReLU` instead.
.. seealso::
:class:`chainer.links.PReLU` to manage the model parameter ``W``.
"""
return PReLUFunction().apply((x, W))[0]
| 6,332
| 30.984848
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/activation/clipped_relu.py
|
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
import chainerx
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
_mode = cuda.cuda.cudnn.CUDNN_ACTIVATION_CLIPPED_RELU # type: ignore
class ClippedReLU(function_node.FunctionNode):
"""Clipped Rectifier Unit function.
Clipped ReLU is written as
:math:`ClippedReLU(x, z) = \\min(\\max(0, x), z)`,
where :math:`z(>0)` is a parameter to cap return value of ReLU.
"""
_use_cudnn = False
def __init__(self, z):
if not isinstance(z, float):
raise TypeError('z must be float value')
# z must be positive.
assert z > 0
self.cap = z
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type = in_types[0]
type_check.expect(x_type.dtype.kind == 'f')
def forward_chainerx(self, inputs):
x, = inputs
return chainerx.clipped_relu(x, self.cap),
def forward_cpu(self, inputs):
self.retain_inputs((0,))
x, = inputs
return utils.force_array(numpy.minimum(numpy.maximum(0, x), self.cap),
x.dtype),
def forward_gpu(self, inputs):
self.retain_inputs((0,))
x, = inputs
if chainer.should_use_cudnn('==always') and x.flags.c_contiguous:
self._use_cudnn = True
y = cudnn.activation_forward(x, _mode, self.cap)
self.retain_outputs((0,))
else:
return cuda.elementwise(
'T x, T cap', 'T y', 'y = min(max(x, (T)0), cap)',
'clipped_relu_fwd')(x, self.cap),
return y,
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
if chainer.should_use_cudnn('==always') and self._use_cudnn:
y = self.get_retained_outputs()[0]
return ClippedReLUGrad3(x.data, y.data, self.cap).apply(
grad_outputs)
else:
return ClippedReLUGrad2(x.data, self.cap).apply(grad_outputs)
class ClippedReLUGrad2(function_node.FunctionNode):
"""Clipped Rectifier Unit gradient function."""
def __init__(self, x, z):
self.x = x
self.cap = z
def check_type_forward(self, in_types):
type_check._argname(in_types, ('gy',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, inputs):
gy, = inputs
x = self.x
return utils.force_array(
gy * (0 < x) * (x < self.cap), x.dtype),
def forward_gpu(self, inputs):
gy, = inputs
gx = cuda.elementwise(
'T x, T gy, T z', 'T gx',
'gx = ((x > 0) & (x < z)) ? gy : (T)0',
'clipped_relu_bwd')(self.x, gy, self.cap)
return gx,
def backward(self, indexes, grad_outputs):
return ClippedReLUGrad2(self.x, self.cap).apply(grad_outputs)
class ClippedReLUGrad3(function_node.FunctionNode):
"""Clipped Rectifier Unit gradient function."""
def __init__(self, x, y, z):
self.x = x
self.y = y
self.cap = z
def check_type_forward(self, in_types):
type_check._argname(in_types, ('gy',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, inputs):
gy, = inputs
return utils.force_array(
gy * (0 < self.x) * (self.x < self.cap), self.x.dtype),
def forward_gpu(self, inputs):
assert chainer.should_use_cudnn('==always')
return cudnn.activation_backward(self.x, self.y, inputs[0], _mode,
self.cap),
def backward(self, indexes, grad_outputs):
return ClippedReLUGrad3(self.x, self.y, self.cap).apply(grad_outputs)
def clipped_relu(x, z=20.0):
"""Clipped Rectifier Unit function.
For a clipping value :math:`z(>0)`, it computes
.. math:: \\text{ClippedReLU}(x, z) = \\min(\\max(0, x), z).
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_n)`-shaped float array.
z (float): Clipping value. (default = 20.0)
Returns:
~chainer.Variable: Output variable. A
:math:`(s_1, s_2, ..., s_n)`-shaped float array.
.. admonition:: Example
>>> x = np.random.uniform(-100, 100, (10, 20)).astype(np.float32)
>>> z = 10.0
>>> np.any(x < 0)
True
>>> np.any(x > z)
True
>>> y = F.clipped_relu(x, z=z)
>>> np.any(y.array < 0)
False
>>> np.any(y.array > z)
False
"""
y, = ClippedReLU(z).apply((x,))
return y
def relu6(x):
"""Rectifier Unit function clipped at 6.
It computes
.. math:: \\text{ReLU6}(x) = \\min(\\max(0, x), 6).
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_n)`-shaped float array.
Returns:
~chainer.Variable: Output variable. A
:math:`(s_1, s_2, ..., s_n)`-shaped float array.
.. seealso:: :func:`chainer.functions.clipped_relu`
.. admonition:: Example
>>> x = np.array([-20, -2, 0, 2, 4, 10, 100]).astype(np.float32)
>>> x
array([-20., -2., 0., 2., 4., 10., 100.], dtype=float32)
>>> F.relu6(x)
variable([0., 0., 0., 2., 4., 6., 6.])
"""
y, = ClippedReLU(6.0).apply((x,))
return y
| 5,519
| 27.601036
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/activation/sigmoid.py
|
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
_mode = cuda.libcudnn.CUDNN_ACTIVATION_SIGMOID
class Sigmoid(function_node.FunctionNode):
"""Logistic sigmoid function."""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, inputs):
x = inputs[0]
half = x.dtype.type(0.5)
y = utils.force_array(numpy.tanh(x * half) * half + half)
self.retain_outputs((0,))
self._use_cudnn = False
return y,
def forward_gpu(self, inputs):
x = inputs[0]
if chainer.should_use_cudnn('==always') and x.flags.c_contiguous:
y = cudnn.activation_forward(x, _mode)
self.retain_inputs((0,))
self._use_cudnn = True
else:
y = cuda.elementwise(
'T x', 'T y', 'y = tanh(x * 0.5) * 0.5 + 0.5',
'sigmoid_fwd')(x)
self._use_cudnn = False
self.retain_outputs((0,))
return y,
def backward(self, indexes, grad_outputs):
if self._use_cudnn:
x = self.get_retained_inputs()[0].data
else:
x = None
y = self.get_retained_outputs()[0]
gy, = grad_outputs
return SigmoidGrad((x,)).apply((y, gy))
class SigmoidGrad(function_node.FunctionNode):
"""Logistic sigmoid gradient function."""
def __init__(self, inputs):
super(SigmoidGrad, self).__init__()
self.x = inputs[0]
def check_type_forward(self, in_types):
type_check._argname(in_types, ('y', 'gy'))
type_check.expect(in_types[0].dtype.kind == 'f')
type_check.expect(in_types[1].dtype.kind == 'f')
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
y, gy = inputs
one = y.dtype.type(1)
return utils.force_array(gy * y * (one - y)),
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
y, gy = inputs
if (chainer.should_use_cudnn('==always') and gy.flags.c_contiguous and
self.x is not None and self.x.flags.c_contiguous):
gx = cudnn.activation_backward(self.x, y, gy, _mode)
else:
gx = cuda.elementwise(
'T y, T gy', 'T gx',
'gx = gy * y * (1 - y)',
'sigmoid_bwd')(y, gy)
return gx,
def backward(self, indexes, grad_outputs):
y, gy = self.get_retained_inputs()
ggx, = grad_outputs
return ggx * gy * (1 - 2 * y), ggx * y * (1 - y)
def sigmoid(x):
"""Element-wise sigmoid logistic function.
.. math:: f(x)=(1 + \\exp(-x))^{-1}.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
Returns:
~chainer.Variable: Output variable. A
:math:`(s_1, s_2, ..., s_N)`-shaped float array.
.. admonition:: Example
It maps the input values into the range of :math:`[0, 1]`.
>>> x = np.arange(-2, 3, 2).astype(np.float32)
>>> x
array([-2., 0., 2.], dtype=float32)
>>> F.sigmoid(x).array
array([0.11920291, 0.5 , 0.8807971 ], dtype=float32)
"""
y, = Sigmoid().apply((x,))
return y
| 3,489
| 28.327731
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/connection/shift.py
|
import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
class Shift(function_node.FunctionNode):
def __init__(self, ksize=3, dilate=1):
super(Shift, self).__init__()
self.kh, self.kw = _pair(ksize)
if self.kh % 2 != 1:
raise ValueError('kh must be odd')
if self.kw % 2 != 1:
raise ValueError('kw must be odd')
self.dy, self.dx = _pair(dilate)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4,
x_type.shape[1] >= self.kh * self.kw,
)
def forward_cpu(self, inputs):
x = inputs[0]
b, c, h, w = x.shape
py = self.kh // 2 * abs(self.dy)
px = self.kw // 2 * abs(self.dx)
x = numpy.pad(x, ((0, 0), (0, 0), (py, py), (px, px)),
'constant')
n_groups = self.kh * self.kw
group_size = c // n_groups
ret = []
for i, group_idx in enumerate(range(n_groups)):
# Make sure that center group is last
if group_idx == (n_groups - 1) // 2:
group_idx = n_groups - 1
elif group_idx == (n_groups - 1):
group_idx = (n_groups - 1) // 2
ky = (group_idx // self.kw) - py // abs(self.dy)
kx = (group_idx % self.kw) - px // abs(self.dx)
hs = py + -ky * self.dy
ws = px + -kx * self.dx
he = hs + h
we = ws + w
cs = i * group_size
ce = (i + 1) * group_size if i < n_groups - 1 else None
ret.append(x[:, cs:ce, hs:he, ws:we])
return numpy.concatenate(ret, axis=1),
def forward_gpu(self, inputs):
x = inputs[0]
b, c, h, w = x.shape
y = cuda.cupy.empty_like(x)
cuda.elementwise(
'raw T x, int32 c, int32 h, int32 w,'
'int32 kh, int32 kw,'
'int32 dy, int32 dx',
'T y',
'''
int b0 = i / (c * h * w);
int rest = i % (c * h * w);
int c0 = rest / (h * w);
rest %= h * w;
int out_row = rest / w;
int out_col = rest % w;
int n_groups = kh * kw;
int group_size = c / n_groups;
int group_idx = c0 / group_size;
// Make sure that center group is last
if (group_idx == (n_groups - 1) / 2) {
group_idx = n_groups - 1;
} else if (group_idx == n_groups - 1) {
group_idx = (n_groups - 1) / 2;
}
int ky = (group_idx / kw) - kh / 2;
int kx = (group_idx % kw) - kw / 2;
if (group_idx >= n_groups) {
ky = 0;
kx = 0;
}
int in_row = -ky * dy + out_row;
int in_col = -kx * dx + out_col;
if (in_row >= 0 && in_row < h && in_col >= 0 && in_col < w) {
y = x[b0 * c * h * w + c0 * h * w + in_row * w + in_col];
} else {
y = 0;
}
''',
'shift_gpu')(x, c, h, w, self.kh, self.kw, self.dy, self.dx, y)
return y,
def backward(self, indexes, grad_outputs):
return shift(grad_outputs[0], ksize=(self.kh, self.kw),
dilate=(-self.dy, -self.dx)),
def shift(x, ksize=3, dilate=1):
"""Shift function.
See: `Shift: A Zero FLOP, Zero Parameter Alternative to Spatial
Convolutions <https://arxiv.org/abs/1711.08141>`_
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable of shape :math:`(n, c, h, w)`.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
dilate (int or pair of ints): Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
Returns:
~chainer.Variable:
Output variable of same shape as ``x``.
"""
fnode = Shift(ksize, dilate)
y, = fnode.apply((x,))
return y
| 4,425
| 31.072464
| 77
|
py
|
chainer
|
chainer-master/chainer/functions/connection/deformable_convolution_2d_sampler.py
|
import numpy
from chainer import backend
from chainer.functions.array import broadcast
from chainer.functions.array import concat
from chainer.functions.array import pad as pad_module
from chainer.functions.array import spatial_transformer_sampler
from chainer.functions.math import matmul
def deformable_convolution_2d_sampler(x, offset, W, b=None, stride=1, pad=0):
"""Two-dimensional deformable convolution function using computed offset.
This is an implementation of two-dimensional deformable convolution from
`Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.
It takes four variables: the input image ``x``, the offset image
``offset``, the filter weight ``W``, and the bias vector ``b``.
Notation: here is the notation for the dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output,
respectively.
- :math:`h` and :math:`w` are the height and width of the input image,
respectively.
- :math:`k_H` and :math:`k_W` are the height and width of the filters,
respectively.
- :math:`s_Y` and :math:`s_X` are the strides of the filter.
- :math:`p_H` and :math:`p_W` are the spatial padding sizes.
The output size :math:`(h_O, w_O)` is determined by the following
equations:
.. math::
h_O &= (h + 2p_H - k_H) / s_Y + 1,\\\\
w_O &= (w + 2p_W - k_W) / s_X + 1.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable of shape :math:`(n, c_I, h, w)`.
offset (:class:`~chainer.Variable` or :ref:`ndarray`):
Offset variable of shape
:math:`(n, 2 \\cdot k_H \\cdot k_W, h_O, w_O)`. The first
:math:`k_H \\cdot k_W` index of the second axis corresponds to
the offsets in the horizontal direction. The last
:math:`k_H \\cdot k_W` index of the second axis corresponds to
the offsets in the vertical direction.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Weight variable of shape :math:`(c_O, c_I, k_H, k_W)`.
b (:class:`~chainer.Variable` or :ref:`ndarray`):
Bias variable of length :math:`c_O` (optional).
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
Returns:
~chainer.Variable: Output variable.
Deformable convolution adds 2D offsets to the regular grid sampling
locations in the standard convolution. It enables free form deformation of
the sampling grid.
See `Jifeng Dai, Haozhi Qi, Yuwen Xiong, Yi Li, Guodong Zhang, Han Hu,
Yichen Wei. Deformable Convolutional Networks
<https://arxiv.org/abs/1703.06211>`_
If the bias vector is given, then it is added to all spatial locations of
the output of convolution.
.. seealso::
:class:`~chainer.links.DeformableConvolution2D` to manage the model
parameters ``W`` and ``b``.
.. admonition:: Example
>>> x = np.random.uniform(0, 1, (2, 3, 4, 7)).astype(np.float32)
>>> offset = np.random.uniform(
... 0, 1, (2, 2 * 3 * 3, 2, 5)).astype(np.float32)
>>> W = np.random.uniform(0, 1, (4, 3, 3, 3)).astype(np.float32)
>>> b = np.random.uniform(0, 1, (4,)).astype(np.float32)
>>> y = F.deformable_convolution_2d_sampler(x, offset, W, b)
>>> y.shape
(2, 4, 2, 5)
"""
sy, sx = _pair(stride)
ph, pw = _pair(pad)
out_c, _, kh, kw = W.shape
n, c, h, w = x.shape
_, khkw2, out_h, out_w = offset.shape
if khkw2 != 2 * kh * kw:
raise ValueError(
'The shape of the offset does not match the kernel size')
grid = _offset2grid(offset, kh, kw, sy, sx, ph, pw, h, w)
grid = grid.reshape(n, 2, kh * kw, out_h * out_w)
x_pad = pad_module.pad(x, ((0, 0), (0, 0), (ph, ph), (pw, pw)), 'constant')
x_st = spatial_transformer_sampler.spatial_transformer_sampler(
x_pad, grid)
x_st = x_st.transpose(0, 3, 1, 2).reshape(n * out_h * out_w, c * kh * kw)
W = W.transpose(1, 2, 3, 0).reshape(c * kh * kw, out_c)
y = matmul.matmul(x_st, W)
y = y.reshape(n, out_h, out_w, out_c).transpose(0, 3, 1, 2)
if b is not None:
b = broadcast.broadcast_to(b[None, :, None, None], y.shape)
y += b
return y
def _offset2grid(offset, kh, kw, sy, sx, ph, pw, h, w):
n, khkw2, out_h, out_w = offset.shape
khkw = int(khkw2 / 2)
xp = backend.get_array_module(offset)
ys, xs = xp.meshgrid(
xp.arange(0, sy * out_h, sy, dtype=numpy.float32),
xp.arange(0, sx * out_w, sx, dtype=numpy.float32), indexing='ij',
copy=False
)
filter_offset_x = xp.tile(xp.arange(kw, dtype=numpy.float32), kh)
filter_offset_y = xp.repeat(xp.arange(kh, dtype=numpy.float32), kw)
x_coord = (offset[:, :khkw] + xs[None, None] +
filter_offset_x[None, :, None, None])
y_coord = (offset[:, khkw:] + ys[None, None] +
filter_offset_y[None, :, None, None])
# The values of this variable is clipped in range [-1, 1].
# The coordinate (-1, -1) corresponds to the upper-left
# corner of the input image.
x_coord = (x_coord / (w + 2 * pw - 1) - 0.5) * 2
y_coord = (y_coord / (h + 2 * ph - 1) - 0.5) * 2
# Shape of `coord` is (n, 2 * kh * kw, out_h, out_w)
coord = concat.concat([x_coord, y_coord], axis=1)
return coord
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
| 5,710
| 37.073333
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/connection/bilinear.py
|
import numpy
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
def _as_mat(x):
if x.ndim == 2:
return x
return x.reshape(len(x), -1)
def _ij_ik_il_to_jkl(a, b, c):
ab = chainer.functions.matmul(a[:, :, None], b[:, None, :]) # ijk
return chainer.functions.matmul(_as_mat(ab).T, c).reshape(
a.shape[1], b.shape[1], c.shape[1])
def _ij_ik_jkl_to_il(a, b, c):
ab = chainer.functions.matmul(a[:, :, None], b[:, None, :]) # ijk
c = c.reshape(-1, c.shape[-1]) # [jk]l
return chainer.functions.matmul(_as_mat(ab), c)
def _ij_il_jkl_to_ik(a, b, c):
return _ij_ik_jkl_to_il(a, b, chainer.functions.swapaxes(c, 1, 2))
def _ik_il_jkl_to_ij(a, b, c):
return _ij_ik_jkl_to_il(a, b, chainer.functions.rollaxis(c, 0, c.ndim))
class BilinearFunction(function_node.FunctionNode):
def check_type_forward(self, in_types):
n_in = type_check.eval(in_types.size())
if n_in != 3 and n_in != 6:
raise type_check.InvalidType(
'{0} or {1}'.format(
in_types.size() == 3, in_types.size() == 6),
'{0} == {1}'.format(in_types.size(), n_in))
e1_type, e2_type, W_type = in_types[:3]
type_check_prod = type_check.make_variable(numpy.prod, 'prod')
type_check.expect(
e1_type.dtype == numpy.float32,
e1_type.ndim >= 2,
e2_type.dtype == numpy.float32,
e2_type.ndim >= 2,
e1_type.shape[0] == e2_type.shape[0],
W_type.dtype == numpy.float32,
W_type.ndim == 3,
type_check_prod(e1_type.shape[1:]) == W_type.shape[0],
type_check_prod(e2_type.shape[1:]) == W_type.shape[1],
)
if n_in == 6:
out_size = W_type.shape[2]
V1_type, V2_type, b_type = in_types[3:]
type_check.expect(
V1_type.dtype == numpy.float32,
V1_type.ndim == 2,
V1_type.shape[0] == W_type.shape[0],
V1_type.shape[1] == out_size,
V2_type.dtype == numpy.float32,
V2_type.ndim == 2,
V2_type.shape[0] == W_type.shape[1],
V2_type.shape[1] == out_size,
b_type.dtype == numpy.float32,
b_type.ndim == 1,
b_type.shape[0] == out_size,
)
def forward(self, inputs):
self.retain_inputs(tuple(range(len(inputs))))
e1 = _as_mat(inputs[0])
e2 = _as_mat(inputs[1])
W = inputs[2]
xp = backend.get_array_module(*inputs)
# optimize: y = xp.einsum('ij,ik,jkl->il', e1, e2, W)
y = xp.tensordot(xp.einsum('ij,ik->ijk', e1, e2), W, axes=2)
if len(inputs) == 6:
V1, V2, b = inputs[3:]
y += e1.dot(V1)
y += e2.dot(V2)
y += b
return y,
def backward(self, indexes, grad_outputs):
inputs = self.get_retained_inputs()
e1, e2, W = inputs[:3]
gy, = grad_outputs
if len(inputs) == 6:
V1, V2 = inputs[3], inputs[4]
return BilinearFunctionGrad().apply((e1, e2, W, V1, V2, gy))
return BilinearFunctionGrad().apply((e1, e2, W, gy))
class BilinearFunctionGrad(function_node.FunctionNode):
def forward(self, inputs):
self.retain_inputs(tuple(range(len(inputs))))
e1 = _as_mat(inputs[0])
e2 = _as_mat(inputs[1])
W, gy = inputs[2], inputs[-1]
xp = backend.get_array_module(*inputs)
# optimize: gW = xp.einsum('ij,ik,il->jkl', e1, e2, gy)
gW = xp.einsum('ij,ik->jki', e1, e2).dot(gy)
gy_W = xp.tensordot(gy, W, axes=(1, 2)) # 'il,jkl->ijk'
# optimize: ge1 = xp.einsum('ik,jkl,il->ij', e2, W, gy)
ge1 = xp.einsum('ik,ijk->ij', e2, gy_W)
# optimize: ge2 = xp.einsum('ij,jkl,il->ik', e1, W, gy)
ge2 = xp.einsum('ij,ijk->ik', e1, gy_W)
ret = ge1.reshape(inputs[0].shape), ge2.reshape(inputs[1].shape), gW
if len(inputs) == 6:
V1, V2 = inputs[3], inputs[4]
gV1 = e1.T.dot(gy)
gV2 = e2.T.dot(gy)
gb = gy.sum(0)
ge1 += gy.dot(V1.T)
ge2 += gy.dot(V2.T)
ret += gV1, gV2, gb
return ret
def backward(self, indexes, grad_outputs):
inputs = self.get_retained_inputs()
e1 = _as_mat(inputs[0])
e2 = _as_mat(inputs[1])
W, gy = inputs[2], inputs[-1]
gge1 = _as_mat(grad_outputs[0])
gge2 = _as_mat(grad_outputs[1])
ggW = grad_outputs[2]
dge1_de2 = _ij_il_jkl_to_ik(gge1, gy, W)
dge1_dW = _ij_ik_il_to_jkl(gge1, e2, gy)
dge1_dgy = _ij_ik_jkl_to_il(gge1, e2, W)
dge2_de1 = _ik_il_jkl_to_ij(gge2, gy, W)
dge2_dW = _ij_ik_il_to_jkl(e1, gge2, gy)
dge2_dgy = _ij_ik_jkl_to_il(e1, gge2, W)
dgW_de1 = _ik_il_jkl_to_ij(e2, gy, ggW)
dgW_de2 = _ij_il_jkl_to_ik(e1, gy, ggW)
dgW_dgy = _ij_ik_jkl_to_il(e1, e2, ggW)
ge1 = dgW_de1 + dge2_de1
ge2 = dgW_de2 + dge1_de2
gW = dge1_dW + dge2_dW
ggy = dgW_dgy + dge1_dgy + dge2_dgy
if len(inputs) == 6:
V1, V2 = inputs[3], inputs[4]
ggV1, ggV2, ggb = grad_outputs[3:]
gV1 = chainer.functions.matmul(gge1, gy, transa=True)
gV2 = chainer.functions.matmul(gge2, gy, transa=True)
ge1 += chainer.functions.matmul(gy, ggV1, transb=True)
ge2 += chainer.functions.matmul(gy, ggV2, transb=True)
ggy += chainer.functions.matmul(gge1, V1)
ggy += chainer.functions.matmul(gge2, V2)
ggy += chainer.functions.matmul(e1, ggV1)
ggy += chainer.functions.matmul(e2, ggV2)
ggy += chainer.functions.broadcast_to(ggb, ggy.shape)
ge1 = ge1.reshape(inputs[0].shape)
ge2 = ge2.reshape(inputs[1].shape)
if len(inputs) == 6:
return ge1, ge2, gW, gV1, gV2, ggy
return ge1, ge2, gW, ggy
def bilinear(e1, e2, W, V1=None, V2=None, b=None):
"""Applies a bilinear function based on given parameters.
This is a building block of Neural Tensor Network (see the reference paper
below). It takes two input variables and one or four parameters, and
outputs one variable.
To be precise, denote six input arrays mathematically by
:math:`e^1\\in \\mathbb{R}^{I\\cdot J}`,
:math:`e^2\\in \\mathbb{R}^{I\\cdot K}`,
:math:`W\\in \\mathbb{R}^{J \\cdot K \\cdot L}`,
:math:`V^1\\in \\mathbb{R}^{J \\cdot L}`,
:math:`V^2\\in \\mathbb{R}^{K \\cdot L}`, and
:math:`b\\in \\mathbb{R}^{L}`,
where :math:`I` is mini-batch size.
In this document, we call :math:`V^1`, :math:`V^2`, and :math:`b` linear
parameters.
The output of forward propagation is calculated as
.. math::
y_{il} = \\sum_{jk} e^1_{ij} e^2_{ik} W_{jkl} + \\
\\sum_{j} e^1_{ij} V^1_{jl} + \\sum_{k} e^2_{ik} V^2_{kl} + b_{l}.
Note that V1, V2, b are optional. If these are not given, then this
function omits the last three terms in the above equation.
.. note::
This function accepts an input variable ``e1`` or ``e2`` of a non-matrix
array. In this case, the leading dimension is treated as the batch
dimension, and the other dimensions are reduced to one dimension.
.. note::
In the original paper, :math:`J` and :math:`K`
must be equal and the author denotes :math:`[V^1 V^2]`
(concatenation of matrices) by :math:`V`.
Args:
e1 (:class:`~chainer.Variable` or :ref:`ndarray`):
Left input variable.
e2 (:class:`~chainer.Variable` or :ref:`ndarray`):
Right input variable.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Quadratic weight variable.
V1 (:class:`~chainer.Variable` or :ref:`ndarray`):
Left coefficient variable.
V2 (:class:`~chainer.Variable` or :ref:`ndarray`):
Right coefficient variable.
b (:class:`~chainer.Variable` or :ref:`ndarray`):
Bias variable.
Returns:
~chainer.Variable: Output variable.
See:
`Reasoning With Neural Tensor Networks for Knowledge Base Completion
<https://papers.nips.cc/paper/5028-reasoning-with-neural-tensor-
networks-for-knowledge-base-completion>`_ [Socher+, NIPS2013].
.. seealso::
:class:`~chainer.links.Bilinear` to manage the model parameters
``W``, ``V1``, ``V2``, and ``b``.
"""
flags = [V1 is None, V2 is None, b is None]
if any(flags):
if not all(flags):
raise ValueError('All coefficients and bias for bilinear() must '
'be None, if at least one of them is None.')
return BilinearFunction().apply((e1, e2, W))[0]
return BilinearFunction().apply((e1, e2, W, V1, V2, b))[0]
| 9,015
| 33.412214
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/connection/deconvolution_2d.py
|
import numpy
import chainer
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import configuration
from chainer import function_node
import chainer.functions
from chainer.functions.connection import convolution_2d
from chainer import memory_layouts
from chainer.utils import argument
from chainer.utils import conv
from chainer.utils import type_check
import chainerx
if cuda.cudnn_enabled:
_cudnn_version = cuda.cuda.cudnn.getVersion() # type: ignore
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
class Deconvolution2DFunction(function_node.FunctionNode):
cover_all = None
_use_ideep = False
def __init__(self, stride=1, pad=0, outsize=None, **kwargs):
dilate, groups = argument.parse_kwargs(
kwargs, ('dilate', 1), ('groups', 1),
deterministic='deterministic argument is not supported anymore. '
'Use chainer.using_config(\'cudnn_deterministic\', value) context '
'where value is either `True` or `False`.',
requires_x_grad='requires_x_grad argument is not supported '
'anymore. Just remove the argument. Note that whether to compute '
'the gradient w.r.t. x is automatically decided during '
'backpropagation.')
self.sy, self.sx = _pair(stride)
self.ph, self.pw = _pair(pad)
self.outh, self.outw = (None, None) if outsize is None else outsize
self.dy, self.dx = _pair(dilate)
self.groups = groups
if self.dx < 1 or self.dy < 1:
raise ValueError('Dilate should be positive, but {} is '
'supplied.'.format(dilate))
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type, w_type = in_types[:2]
type_check.expect(
x_type.dtype.kind == 'f',
w_type.dtype.kind == 'f',
x_type.ndim == 4,
w_type.ndim == 4,
x_type.shape[1] == w_type.shape[0]
)
if self.outh is not None:
lower_bound = conv.get_conv_outsize(
self.outh, w_type.shape[2], self.sy, self.ph,
d=self.dy)
upper_bound = conv.get_conv_outsize(
self.outh, w_type.shape[2], self.sy, self.ph, cover_all=True,
d=self.dy)
type_check.expect(
lower_bound <= x_type.shape[2],
x_type.shape[2] <= upper_bound)
if self.outw is not None:
lower_bound = conv.get_conv_outsize(
self.outw, w_type.shape[3], self.sx, self.pw,
d=self.dx)
upper_bound = conv.get_conv_outsize(
self.outw, w_type.shape[3], self.sx, self.pw, cover_all=True,
d=self.dx)
type_check.expect(
lower_bound <= x_type.shape[3],
x_type.shape[3] <= upper_bound)
if type_check.eval(n_in) == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype == x_type.dtype,
b_type.ndim == 1,
# Need to consider the case that group count > 1.
# b_type.shape[0] == w_type.shape[1],
)
def check_layout_forward(self, inputs):
# TODO(niboshi): Write input layout check
pass
def _calc_out_size(self, x_shape, w_shape):
"""Calculates and stores `outh` and `outw`."""
_, _, kh, kw = w_shape
_, _, in_h, in_w = x_shape
# - k, m, n: shape of out_channel
# - b: number of inputs
# - h, w: height and width of kernels
# k, m, n, b, h, w -> b, k, m, n, h, w
if self.outh is None:
self.outh = conv.get_deconv_outsize(
in_h, kh, self.sy, self.ph, d=self.dy)
if self.outh <= 0:
raise RuntimeError('Height in the output must be positive.')
if self.outw is None:
self.outw = conv.get_deconv_outsize(
in_w, kw, self.sx, self.pw, d=self.dx)
if self.outw <= 0:
raise RuntimeError('Width in the output must be positive.')
def forward_cpu(self, inputs):
if ((self.dy == 1 and self.dx == 1)
and intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs)):
self._use_ideep = True
self.retain_inputs((0, 1)) # only retain x and W
if len(inputs) == 2:
(x, W), b = inputs, None
x_layout, w_layout = self.input_layouts
else:
x, W, b = inputs
x_layout, w_layout, _ = self.input_layouts
x_shape = memory_layouts._transpose_shape(x.shape, x_layout, None)
w_shape = memory_layouts._transpose_shape(W.shape, w_layout, None)
self._calc_out_size(x_shape, w_shape)
if self.groups > 1:
# Grouped convolution implementation
return self._forward_grouped_convolution(x, W, b)
elif (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs)):
# iDeep implementation
self._use_ideep = True
return self._forward_ideep(x, W, b)
else:
return self._forward_cpu_core(x, W, b)
def _forward_cpu_core(self, x, W, b):
if self._use_ideep:
return self._forward_ideep(x, W, b)
gcol = numpy.tensordot(W, x, (0, 1)).astype(x.dtype, copy=False)
gcol = numpy.rollaxis(gcol, 3)
y = conv.col2im_cpu(
gcol, self.sy, self.sx, self.ph, self.pw, self.outh, self.outw,
dy=self.dy, dx=self.dx)
# b, k, h, w
if b is not None:
y += b.reshape((1, b.size, 1, 1))
return y,
def _forward_ideep(self, x, W, b):
_, in_c, kh, kw = W.shape
n, _, in_h, in_w = x.shape
pd = (self.sy * (in_h - 1)
+ (kh + (kh - 1) * (self.dy - 1))
- self.outh - self.ph)
pr = (self.sx * (in_w - 1)
+ (kw + (kw - 1) * (self.dx - 1))
- self.outw - self.pw)
param = intel64.ideep.convolution2DParam(
(n, in_c, self.outh, self.outw),
self.dy, self.dx,
self.sy, self.sx,
self.ph, self.pw,
pd, pr)
y = intel64.ideep.convolution2D.BackwardData(
intel64.ideep.array(W),
intel64.ideep.array(x),
param)
if b is not None:
y += b.reshape((1, b.size, 1, 1))
return y,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1)) # only retain x and W
if len(inputs) == 2:
(x, W), b = inputs, None
x_layout, w_layout = self.input_layouts
else:
x, W, b = inputs
x_layout, w_layout, _ = self.input_layouts
x_shape = memory_layouts._transpose_shape(x.shape, x_layout, None)
w_shape = memory_layouts._transpose_shape(W.shape, w_layout, None)
self._calc_out_size(x_shape, w_shape)
self._set_cover_all(x_shape, w_shape)
use_cudnn = (
chainer.should_use_cudnn('>=auto')
and not self.cover_all
and x.dtype == W.dtype
and ((self.dy == 1 and self.dx == 1)
or (_cudnn_version >= 6000
and not configuration.config.cudnn_deterministic))
and (self.groups <= 1 or _cudnn_version >= 7000)
)
if use_cudnn:
# cuDNN implementation
return self._forward_cudnn(x, W, b, (x_layout, w_layout))
elif self.groups > 1:
return self._forward_grouped_convolution(x, W, b)
else:
return self._forward_gpu_core(x, W, b)
def _forward_gpu_core(self, x, W, b):
# Implementation using col2im
gcol = cuda.cupy.tensordot(W, x, (0, 1)).astype(x.dtype,
copy=False)
# - k, m, n: shape of out_channel
# - b: number of inputs
# - h, w: height and width of kernels
# k, m, n, b, h, w -> b, k, m, n, h, w
gcol = cuda.cupy.rollaxis(gcol, 3)
y = conv.col2im_gpu(
gcol, self.sy, self.sx, self.ph, self.pw, self.outh, self.outw,
dy=self.dy, dx=self.dx)
if b is not None:
y += b.reshape(1, b.size, 1, 1)
return y,
def _forward_grouped_convolution(self, x, W, b):
# G: group count
# N: batch size
# kH, kW: kernel height, kernel width
# xC, xH, xW: x channels, x height, x width
# yC, yH, yW: y channels, y height, y width
G = self.groups
N, xC, xH, xW = x.shape
xCg = xC // G
_, yCg, kH, kW = W.shape # _ == xC
yC = yCg * G
x = x.transpose(1, 0, 2, 3) # (xC, N, xH, xW)
x = x.reshape(G, xCg, N * xH * xW)
W = W.reshape(G, xCg, yCg * kH * kW)
W = W.transpose(0, 2, 1) # (G, yCg*kH*kW, xCg)
# (G, yCg*kH*kW, N*xH*xW) = (G, yCg*kH*kW, xCg) @ (G, xCg, N*xH*xW)
col = convolution_2d._matmul(W, x).astype(x.dtype, copy=False)
col = col.reshape(yC, kH, kW, N, xH, xW)
col = col.transpose(3, 0, 1, 2, 4, 5) # (N, yC, kH, kW, xH, xW)
y = conv.col2im(col, self.sy, self.sx, self.ph, self.pw,
self.outh, self.outw, dy=self.dy, dx=self.dx)
if b is not None:
y += b.reshape(1, b.size, 1, 1)
return y,
def _forward_cudnn(self, x, W, b, input_layouts):
x_layout, w_layout = input_layouts
self.output_layouts = (x_layout,)
n = len(x)
_, c, _, _ = memory_layouts._transpose_shape(W.shape, w_layout, None)
y_raw_shape = memory_layouts._transpose_shape(
(n, c*self.groups, self.outh, self.outw), None, x_layout)
y = cuda.cupy.empty(y_raw_shape, dtype=x.dtype)
pad = (self.ph, self.pw)
stride = (self.sy, self.sx)
dilation = (self.dy, self.dx)
deterministic = configuration.config.cudnn_deterministic
auto_tune = configuration.config.autotune
tensor_core = configuration.config.use_cudnn_tensor_core
cudnn_x_layout = cuda._get_cudnn_tensor_layout_x(x_layout)
cudnn_w_layout = cuda._get_cudnn_tensor_layout_w(w_layout)
cuda.cudnn.convolution_backward_data(
W, x, b, y, pad, stride, dilation, self.groups,
deterministic=deterministic, auto_tune=auto_tune,
tensor_core=tensor_core,
d_layout=cudnn_x_layout, w_layout=cudnn_w_layout)
return y,
def forward_chainerx(self, inputs):
# TODO(imanishi): Support it
if self.dy != 1 or self.dx != 1:
return chainer.Fallback
# TODO(imanishi): Support it
if self.groups != 1:
return chainer.Fallback
# TODO(imanishi): Support it
if any(a.dtype != inputs[0].dtype for a in inputs):
return chainer.Fallback
# TODO(imanishi): Support it
self._calc_out_size(inputs[0].shape, inputs[1].shape)
self._set_cover_all(inputs[0].shape, inputs[1].shape)
if self.cover_all:
return chainer.Fallback
stride = (self.sy, self.sx)
pad = (self.ph, self.pw)
outsize = None if self.outh is None else (self.outh, self.outw)
return chainerx.conv_transpose(
*inputs, stride=stride, pad=pad, outsize=outsize),
def backward(self, indexes, grad_outputs):
x, W = self.get_retained_inputs()
if len(self.input_layouts) == 2:
x_layout, w_layout = self.input_layouts
else:
x_layout, w_layout, _ = self.input_layouts
gy, = grad_outputs
ret = []
if 0 in indexes:
if self.cover_all is None:
self._set_cover_all(x.shape, W.shape)
gx = chainer.functions.convolution_2d(
gy, W, stride=(self.sy, self.sx), pad=(self.ph, self.pw),
cover_all=self.cover_all, dilate=(self.dy, self.dx),
groups=self.groups)
ret.append(gx)
if 1 in indexes:
if self.cover_all is None:
self._set_cover_all(x.shape, W.shape)
gW, = convolution_2d.Convolution2DGradW(
self, W.shape, W.dtype, w_layout).apply((gy, x))
ret.append(gW)
if 2 in indexes:
gb = chainer.functions.sum(gy, axis=(0, 2, 3))
ret.append(gb)
return ret
def _set_cover_all(self, x_shape, w_shape):
_, _, kh, kw = w_shape
_, _, in_h, in_w = x_shape
self.cover_all = (
in_h != conv.get_conv_outsize(self.outh, kh, self.sy,
self.ph, d=self.dy) or
in_w != conv.get_conv_outsize(self.outw, kw, self.sx,
self.pw, d=self.dx))
def deconvolution_2d(x, W, b=None, stride=1, pad=0, outsize=None, **kwargs):
"""deconvolution_2d(x, W, b=None, stride=1, pad=0, outsize=None, *, \
dilate=1, groups=1)
Two dimensional deconvolution function.
This is an implementation of two-dimensional deconvolution. In most of deep
learning frameworks and papers, this function is called
**transposed convolution**. But because of historical reasons (e.g. paper
by Ziller `Deconvolutional Networks`_) and backward compatibility, this
function is called **deconvolution** in Chainer.
.. _Deconvolutional Networks: \
http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf
It takes three variables: input image ``x``,
the filter weight ``W``, and the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output
channels, respectively.
- :math:`h_I` and :math:`w_I` are the height and width of the input image,
respectively.
- :math:`h_K` and :math:`w_K` are the height and width of the filters,
respectively.
- :math:`h_P` and :math:`w_P` are the height and width of the spatial
padding size, respectively.
Let :math:`(s_Y, s_X)` be the stride of filter application. Then, the
output size :math:`(h_O, w_O)` is estimated by the following equations:
.. math::
h_O &= s_Y (h_I - 1) + h_K - 2h_P,\\\\
w_O &= s_X (w_I - 1) + w_K - 2w_P.
The output of this function can be non-deterministic when it uses cuDNN.
If ``chainer.configuration.config.deterministic`` is ``True`` and
cuDNN version is >= v3, it forces cuDNN to use a deterministic algorithm.
Deconvolution links can use a feature of cuDNN called autotuning, which
selects the most efficient CNN algorithm for images of fixed-size,
can provide a significant performance boost for fixed neural nets.
To enable, set `chainer.using_config('autotune', True)`
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable of shape :math:`(n, c_I, h_I, w_I)`.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Weight variable of shape :math:`(c_I, c_O, h_K, w_K)`.
b (None or :class:`~chainer.Variable` or :ref:`ndarray`):
Bias variable of length :math:`c_O` (optional).
stride (:class:`int` or pair of :class:`int` s):
Stride of filter applications. ``stride=s`` and ``stride=(s, s)``
are equivalent.
pad (:class:`int` or pair of :class:`int` s):
Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
outsize (None or :class:`tuple` of :class:`int` s):
Expected output size of deconvolutional operation.
It should be pair of height and width :math:`(h_O, w_O)`.
Default value is ``None`` and the outsize is estimated by
input size, stride and pad.
dilate (:class:`int` or pair of :class:`int` s):
Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
groups (:class:`int`):
The number of groups to use grouped deconvolution.
The default is one, where grouped deconvolution is not used.
Returns:
~chainer.Variable:
Output variable of shape :math:`(n, c_O, h_O, w_O)`.
.. seealso::
:class:`~chainer.links.Deconvolution2D` to manage the model parameters
``W`` and ``b``.
.. admonition:: Example
>>> n = 10
>>> c_i, c_o = 1, 3
>>> h_i, w_i = 5, 10
>>> h_k, w_k = 10, 10
>>> h_p, w_p = 5, 5
>>> x = np.random.uniform(0, 1, (n, c_i, h_i, w_i)).astype(np.float32)
>>> x.shape
(10, 1, 5, 10)
>>> W = np.random.uniform(0, 1, (c_i, c_o, h_k, w_k)).\
astype(np.float32)
>>> W.shape
(1, 3, 10, 10)
>>> b = np.random.uniform(0, 1, c_o).astype(np.float32)
>>> b.shape
(3,)
>>> s_y, s_x = 5, 5
>>> y = F.deconvolution_2d(x, W, b, stride=(s_y, s_x), pad=(h_p, w_p))
>>> y.shape
(10, 3, 20, 45)
>>> h_o = s_y * (h_i - 1) + h_k - 2 * h_p
>>> w_o = s_x * (w_i - 1) + w_k - 2 * w_p
>>> y.shape == (n, c_o, h_o, w_o)
True
"""
argument.check_unexpected_kwargs(
kwargs, deterministic='deterministic argument is not '
'supported anymore. '
'Use chainer.using_config(\'cudnn_deterministic\', value) '
'context where value is either `True` or `False`.')
dilate, groups = argument.parse_kwargs(kwargs,
('dilate', 1), ('groups', 1))
func = Deconvolution2DFunction(stride, pad, outsize, dilate=dilate,
groups=groups)
if b is None:
args = x, W
else:
args = x, W, b
y, = func.apply(args)
return y
| 18,105
| 36.255144
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/connection/convolution_nd.py
|
import numpy
from six import moves
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import configuration
from chainer import function_node
from chainer.functions.connection import convolution_2d
from chainer import utils
from chainer.utils import conv
from chainer.utils import conv_nd
from chainer.utils import type_check
import chainerx
class ConvolutionND(function_node.FunctionNode):
def __init__(self, ndim, stride=1, pad=0, cover_all=False,
dilate=1, groups=1):
self.ndim = ndim
self.stride = conv_nd.as_tuple(stride, ndim)
self.pad = conv_nd.as_tuple(pad, ndim)
self.cover_all = cover_all
self.dilate = conv_nd.as_tuple(dilate, ndim)
self.groups = groups
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type = in_types[0]
w_type = in_types[1]
type_check.expect(
x_type.dtype.kind == 'f',
w_type.dtype.kind == 'f',
x_type.ndim == self.ndim + 2,
w_type.ndim == self.ndim + 2,
# Need to consider the case that group count > 1.
# x_type.shape[1] == w_type.shape[1],
)
if type_check.eval(n_in) == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype.kind == 'f',
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
def forward_chainerx(self, inputs):
# TODO(hvy): Support mixed precision.
if any([arr.dtype != inputs[0].dtype for arr in inputs[1:]]):
return chainer.Fallback
# TODO(hvy): Support dilate > 1.
if any(d != 1 for d in self.dilate):
return chainer.Fallback
# TODO(hvy): Support groups > 1.
if self.groups > 1:
return chainer.Fallback
if inputs[0].device.backend.name == 'cuda' and (
self.cover_all or self.ndim < 2):
return chainer.Fallback
return chainerx.conv(
*inputs, stride=self.stride, pad=self.pad,
cover_all=self.cover_all),
def _use_cudnn(self, x, W):
if cuda._cudnn_version < 6000 and any(d != 1 for d in self.dilate):
# cuDNN < 6.0 does not support dilated convolutions
return False
if cuda._cudnn_version < 7000 and 1 < self.groups:
# cuDNN < 7.0 does not support grouped convolutions
return False
return (
chainer.should_use_cudnn('>=auto')
and not self.cover_all
and x.dtype == W.dtype
and self.ndim > 1)
def _forward_xp(self, x, W, b, xp):
if 1 < self.groups:
return self._forward_grouped_convolution_xp(x, W, b, xp)
else:
return self._forward_xp_core(x, W, b, xp)
def _forward_grouped_convolution_xp(self, x, W, b, xp):
# G: group count
# N: batch size
# iC: input channels
# oC: output channels
G = self.groups
N, iC = x.shape[:2]
oC = W.shape[0]
k_size = W.shape[2:]
iCg = iC // G
oCg = oC // G
dims = len(k_size)
if iC % G != 0:
raise TypeError('The number of groups must be '
'a divisor of that of input channels')
if oC % G != 0:
raise TypeError('The number of groups must be '
'a divisor of that of output channels')
xp = backend.get_array_module(x)
# (N, iC, k_size..., o_size...)
x = conv_nd.im2col_nd(x, k_size, self.stride, self.pad,
cover_all=self.cover_all, dilate=self.dilate)
o_size = x.shape[-dims:]
x = xp.rollaxis(x, 0, dims + 2) # (iC, k_size..., N, o_size...)
mul_len = iCg * utils.size_of_shape(k_size)
x = x.reshape(G, mul_len, N * utils.size_of_shape(o_size))
W = W.reshape(G, oCg, mul_len)
# (G, oCg, N*o_size) = (G, oCg, iCg*k_size) @ (G, iCg*k_size, N*o_size)
y = convolution_2d._matmul(W, x).astype(x.dtype, copy=False)
y = y.reshape(oC, N, *o_size)
y = xp.rollaxis(y, 1) # (N, oC, o_size...)
if b is not None:
y += b.reshape(1, b.size, *((1,) * dims))
return y,
def _forward_xp_core(self, x, W, b, xp):
ndim = self.ndim
ksize = W.shape[2:]
stride = self.stride
pad = self.pad
dilate = self.dilate
# Make patch array.
if xp is numpy:
col = conv_nd.im2col_nd_cpu(
x, ksize, stride, pad, cover_all=self.cover_all, dilate=dilate)
else:
col = conv_nd.im2col_nd_gpu(
x, ksize, stride, pad, cover_all=self.cover_all, dilate=dilate)
# Compute correlation.
axes = tuple(moves.range(1, ndim + 2)) # (1, 2, ..., N+1)
y = xp.tensordot(col, W, (axes, axes)).astype(x.dtype, copy=False)
# Apply bias if given.
if b is not None:
y += b
# Roll c_O before the second in (n, y_1, y_2, ..., y_N, c_O).
return xp.rollaxis(y, ndim + 1, 1),
def _forward_cudnn(self, x, W, b):
out_c = W.shape[0] # (c_O, _, k_1, k_2, ..., k_N)
ksize = W.shape[2:]
n, c = x.shape[:2] # (n, c_I, d_1, d_2, ..., d_N)
dims = x.shape[2:]
stride = self.stride
pad = self.pad
dilate = self.dilate
groups = self.groups
# Make empty array for result.
outs = tuple(
conv.get_conv_outsize(d, k, s, p, cover_all=self.cover_all, d=di)
for (d, k, s, p, di) in zip(dims, ksize, stride, pad, dilate))
assert all(out > 0 for out in outs), 'Output sizes should be positive.'
y_shape = (n, out_c) + outs # (n, c_O, out_1, out_2, ..., out_N)
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
auto_tune = configuration.config.autotune
tensor_core = configuration.config.use_cudnn_tensor_core
cuda.cudnn.convolution_forward(
x, W, b, y, pad, stride, dilate, groups,
auto_tune=auto_tune, tensor_core=tensor_core)
return y,
def forward(self, inputs):
self.retain_inputs((0, 1)) # retain only x and W
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
xp = backend.get_array_module(*inputs)
if xp is numpy:
return self._forward_xp(x, W, b, numpy)
elif not self._use_cudnn(x, W):
return self._forward_xp(x, W, b, cuda.cupy)
else:
return self._forward_cudnn(x, W, b)
def backward(self, indexes, grad_outputs):
x, W = self.get_retained_inputs()
gy, = grad_outputs
ret = []
if 0 in indexes:
x_shape = x.shape[2:]
gx = chainer.functions.deconvolution_nd(
gy, W, stride=self.stride, pad=self.pad, outsize=x_shape,
dilate=self.dilate, groups=self.groups)
ret.append(gx)
if 1 in indexes:
gW, = ConvolutionNDGradW(self).apply((x, gy))
ret.append(gW)
if 2 in indexes:
axis = (0,) + tuple(moves.range(2, gy.ndim))
gb = chainer.functions.sum(gy, axis=axis)
if gb.dtype != self.inputs[2].dtype:
gb = chainer.functions.cast(gb, self.inputs[2].dtype)
ret.append(gb)
return ret
class ConvolutionNDGradW(function_node.FunctionNode):
def __init__(self, convnd):
W_node = convnd.inputs[1]
self.ndim = convnd.ndim
self.ksize = W_node.shape[2:]
self.stride = convnd.stride
self.pad = convnd.pad
self.cover_all = convnd.cover_all
self.dilate = convnd.dilate
self.groups = convnd.groups
self.W_dtype = W_node.dtype
def _use_cudnn(self, x, gy):
if cuda._cudnn_version < 6000 and any(d != 1 for d in self.dilate):
# cuDNN < 6.0 does not support dilated convolutions
return False
if cuda._cudnn_version < 7000 and 1 < self.groups:
# cuDNN < 7.0 does not support grouped convolutions
return False
return (
chainer.should_use_cudnn('>=auto')
and not self.cover_all
and x.dtype == self.W_dtype
and gy.dtype == self.W_dtype
and self.ndim > 1)
def forward(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
xp = backend.get_array_module(*inputs)
if xp is numpy:
return self._forward_xp(x, gy, numpy)
elif not self._use_cudnn(x, gy):
return self._forward_xp(x, gy, cuda.cupy)
else:
return self._forward_cudnn(x, gy)
def _forward_xp(self, x, gy, xp):
if 1 < self.groups:
return self._forward_grouped_convolution_xp(x, gy, xp)
else:
return self._forward_xp_core(x, gy, xp)
def _forward_grouped_convolution_xp(self, x, gy, xp):
G = self.groups
N, iC = x.shape[:2]
oC = gy.shape[1]
o_size = gy.shape[2:]
o_size_prod = utils.size_of_shape(o_size)
k_size = self.ksize
dims = len(o_size)
iCg = iC // G
oCg = oC // G
# Do not check iCg and oCg because this class is rarely used alone
# (N, iC, k_size..., o_size...)
x = conv_nd.im2col_nd(x, k_size, self.stride, self.pad,
cover_all=self.cover_all, dilate=self.dilate)
x = xp.rollaxis(x, 0, dims + 2) # (iC, k_size..., N, o_size...)
mul_len = iCg * utils.size_of_shape(k_size)
x = x.reshape(G, mul_len, N * o_size_prod)
x = x.transpose(0, 2, 1) # (G, N*o_size, iCg*k_size)
gy = xp.rollaxis(gy, 1) # (oC, N, o_size...)
gy = gy.reshape(G, oCg, N * o_size_prod)
# (G, oCg, iCg*k_size) = (G, oCg, N*o_size) @ (G, N*o_size, iCg*k_size)
gW = convolution_2d._matmul(gy, x).astype(self.W_dtype, copy=False)
gW = gW.reshape(oC, iCg, *k_size)
return gW,
def _forward_xp_core(self, x, gy, xp):
# Compute filter weight gradient.
# (n, _, out_1, out_2, ..., out_N)
out_axes = (0,) + tuple(moves.range(2, self.ndim + 2))
# (n, _, _, ..., _, out_1, out_2, ..., out_N)
col_axes = (0,) + tuple(moves.range(self.ndim + 2, self.ndim * 2 + 2))
# NumPy raises an error when the array is not contiguous.
# See: https://github.com/chainer/chainer/issues/2744
# TODO(niboshi): Remove this code when NumPy is fixed.
if (xp is numpy and
not (gy.flags.c_contiguous or gy.flags.f_contiguous) and
1 in gy.shape):
gy = numpy.ascontiguousarray(gy)
if xp is numpy:
col = conv_nd.im2col_nd_cpu(
x, self.ksize, self.stride, self.pad,
cover_all=self.cover_all, dilate=self.dilate)
else:
col = conv_nd.im2col_nd_gpu(
x, self.ksize, self.stride, self.pad,
cover_all=self.cover_all, dilate=self.dilate)
gW = xp.tensordot(gy, col, (out_axes, col_axes)).astype(
self.W_dtype, copy=False)
return gW,
def _forward_cudnn(self, x, gy):
# Make empty arrays for result.
out_c = gy.shape[1]
in_c = x.shape[1] // self.groups
gW = cuda.cupy.empty(
(out_c, in_c) + self.ksize, dtype=self.W_dtype)
# Compute
pad = self.pad
stride = self.stride
dilate = self.dilate
groups = self.groups
deterministic = configuration.config.cudnn_deterministic
auto_tune = configuration.config.autotune
tensor_core = configuration.config.use_cudnn_tensor_core
cuda.cudnn.convolution_backward_filter(
x, gy, gW, pad, stride, dilate, groups,
deterministic=deterministic, auto_tune=auto_tune,
tensor_core=tensor_core)
return gW,
def backward(self, indexes, grad_outputs):
x, gy = self.get_retained_inputs()
ggW, = grad_outputs
ret = []
if 0 in indexes:
x_shape = x.shape[2:]
gx = chainer.functions.deconvolution_nd(
gy, ggW, stride=self.stride, pad=self.pad, outsize=x_shape,
groups=self.groups, dilate=self.dilate)
ret.append(gx)
if 1 in indexes:
ggy = convolution_nd(
x, ggW, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, groups=self.groups,
dilate=self.dilate)
ret.append(ggy)
return ret
def convolution_nd(x, W, b=None, stride=1, pad=0, cover_all=False,
dilate=1, groups=1):
"""N-dimensional convolution function.
This is an implementation of N-dimensional convolution which is generalized
two-dimensional convolution in ConvNets. It takes three variables: the
input ``x``, the filter weight ``W`` and the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`N` is the number of spatial dimensions.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output
channels, respectively.
- :math:`d_1, d_2, ..., d_N` are the size of each axis of the input's
spatial dimensions, respectively.
- :math:`k_1, k_2, ..., k_N` are the size of each axis of the filters,
respectively.
- :math:`l_1, l_2, ..., l_N` are the size of each axis of the output's
spatial dimensions, respectively.
- :math:`p_1, p_2, ..., p_N` are the size of each axis of the spatial
padding size, respectively.
Then the ``convolution_nd`` function computes correlations between filters
and patches of size :math:`(k_1, k_2, ..., k_N)` in ``x``.
Note that correlation here is equivalent to the inner product between
expanded tensors.
Patches are extracted at positions shifted by multiples of ``stride`` from
the first position ``(-p_1, -p_2, ..., -p_N)`` for each spatial axis.
Let :math:`(s_1, s_2, ..., s_N)` be the stride of filter application.
Then, the output size :math:`(l_1, l_2, ..., l_N)` is determined by the
following equations:
.. math::
l_n = (d_n + 2p_n - k_n) / s_n + 1 \\ \\ (n = 1, ..., N)
If ``cover_all`` option is ``True``, the filter will cover the all
spatial locations. So, if the last stride of filter does not cover the
end of spatial locations, an additional stride will be applied to the end
part of spatial locations. In this case, the output size is determined by
the following equations:
.. math::
l_n = (d_n + 2p_n - k_n + s_n - 1) / s_n + 1 \\ \\ (n = 1, ..., N)
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable of shape :math:`(n, c_I, d_1, d_2, ..., d_N)`.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Weight variable of shape :math:`(c_O, c_I, k_1, k_2, ..., k_N)`.
b (None or :class:`~chainer.Variable` or :ref:`ndarray`):
One-dimensional bias variable with length :math:`c_O` (optional).
stride (:class:`int` or :class:`tuple` of :class:`int` s):
Stride of filter applications :math:`(s_1, s_2, ..., s_N)`.
``stride=s`` is equivalent to ``(s, s, ..., s)``.
pad (:class:`int` or :class:`tuple` of :class:`int` s):
Spatial padding width for input arrays
:math:`(p_1, p_2, ..., p_N)`. ``pad=p`` is equivalent to
``(p, p, ..., p)``.
cover_all (bool): If ``True``, all spatial locations are convoluted
into some output pixels. It may make the output size larger.
`cover_all` needs to be ``False`` if you want to use cuDNN.
dilate (:class:`int` or :class:`tuple` of :class:`int` s):
Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d, ..., d)`` are equivalent.
groups (:class:`int`):
The number of groups to use grouped convolution.
The default is one, where grouped convolution is not used.
Returns:
~chainer.Variable:
Output variable of shape :math:`(n, c_O, l_1, l_2, ..., l_N)`.
.. note::
This function uses cuDNN implementation for its forward and backward
computation if ALL of the following conditions are satisfied:
- ``cuda.cudnn_enabled`` is ``True``
- ``chainer.config.use_cudnn`` is ``'always'`` or ``'auto'``
- The number of spatial dimensions is more than one.
- ``cover_all`` is ``False``
- The input's ``dtype`` is equal to the filter weight's.
- The ``dtype`` is FP16, FP32 or FP64. (FP16 is only available when
cuDNN version :math:`\\geq` v3.)
Convolution links can use a feature of cuDNN called autotuning, which
selects the most efficient CNN algorithm for images of fixed-size,
can provide a significant performance boost for fixed neural nets.
To enable, set `chainer.using_config('autotune', True)`
.. seealso::
:class:`~chainer.links.ConvolutionND` to manage the model parameters
``W`` and ``b``.
.. seealso:: :func:`convolution_2d`
.. admonition:: Example
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 30, 40, 50
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = np.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32)
>>> x.shape
(10, 3, 30, 40, 50)
>>> W = np.random.uniform(0, 1, (c_o, c_i, k1, k2, k3)).\
astype(np.float32)
>>> W.shape
(1, 3, 10, 10, 10)
>>> b = np.random.uniform(0, 1, (c_o)).astype(np.float32)
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> y = F.convolution_nd(x, W, b, stride=(s1, s2, s3),\
pad=(p1, p2, p3))
>>> y.shape
(10, 1, 16, 11, 9)
>>> l1 = int((d1 + 2 * p1 - k1) / s1 + 1)
>>> l2 = int((d2 + 2 * p2 - k2) / s2 + 1)
>>> l3 = int((d3 + 2 * p3 - k3) / s3 + 1)
>>> y.shape == (n, c_o, l1, l2, l3)
True
>>> y = F.convolution_nd(x, W, b, stride=(s1, s2, s3),\
pad=(p1, p2, p3), cover_all=True)
>>> y.shape == (n, c_o, l1, l2, l3 + 1)
True
"""
ndim = len(x.shape[2:])
fnode = ConvolutionND(
ndim, stride, pad, cover_all, dilate=dilate, groups=groups)
args = (x, W) if b is None else (x, W, b)
y, = fnode.apply(args)
return y
def convolution_1d(x, W, b=None, stride=1, pad=0, cover_all=False,
dilate=1, groups=1):
"""1-dimensional convolution function.
.. note::
This function calls :func:`~chainer.functions.convolution_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.convolution_nd`.
"""
if len(x.shape[2:]) != 1:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 1. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return convolution_nd(x, W, b, stride, pad, cover_all, dilate, groups)
def convolution_3d(x, W, b=None, stride=1, pad=0, cover_all=False,
dilate=1, groups=1):
"""3-dimensional convolution function.
.. note::
This function calls :func:`~chainer.functions.convolution_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.convolution_nd`.
"""
if len(x.shape[2:]) != 3:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 3. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return convolution_nd(x, W, b, stride, pad, cover_all, dilate, groups)
| 20,150
| 36.247689
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/connection/linear.py
|
import numpy
from chainer import backend
from chainer.backends import intel64
from chainer import function_node
import chainer.functions
from chainer.graph_optimizations import static_code
from chainer import utils
from chainer.utils import type_check
import chainerx
class LinearFunction(function_node.FunctionNode):
_config_use_ideep = None
_supports_static_optimizations = True
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type, w_type = in_types[:2]
type_check._argname((x_type, w_type), ('x', 'W'))
type_check.expect(
x_type.dtype.kind == 'f',
w_type.dtype.kind == 'f',
x_type.ndim == 2,
w_type.ndim == 2,
x_type.shape[1] == w_type.shape[1],
)
if type_check.eval(n_in) == 3:
b_type = in_types[2]
type_check._argname((b_type,), ('b',))
type_check.expect(
b_type.dtype == x_type.dtype,
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
@static_code
def static_linear_no_bias(self, xp, optimized, inputs, outputs):
x, W = inputs
y = outputs[0]
# NumPy raises an error when the array is not contiguous.
# See: https://github.com/chainer/chainer/issues/2744
# TODO(niboshi): Remove this code when NumPy is fixed.
if (isinstance(x, numpy.ndarray) and
not (x.flags.c_contiguous or x.flags.f_contiguous) and
1 in x.shape):
x = numpy.ascontiguousarray(x)
if optimized:
# Note: We can only call this function when both x and W
# have the same dtype. Otherwise, the output type (for y)
# may not be as expected (i.e., not the same dtype as x).
xp.dot(x, W.T, out=y)
else:
y[:] = x.dot(W.T).astype(x.dtype, copy=False)
@static_code
def static_add_bias(self, inputs, outputs):
bias = inputs[0]
y = outputs[0]
y += bias
def forward_chainerx(self, inputs):
# TODO(niboshi): Support dtype casting in ChainerX
if inputs[0].dtype != inputs[1].dtype:
return chainer.Fallback
# Generic implementation
if len(inputs) == 3:
x, W, b = inputs
if x.dtype != b.dtype:
return chainer.Fallback
return chainerx.linear(x, W, b),
else:
x, W = inputs
return chainerx.linear(x, W),
def forward(self, inputs):
self._config_use_ideep = chainer.config.use_ideep
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs)):
# iDeep implementation
return self._forward_ideep(inputs)
# Generic implementation
if len(inputs) == 3:
x, W, b = inputs
else:
(x, W), b = inputs, None
# NumPy raises an error when the array is not contiguous.
# See: https://github.com/chainer/chainer/issues/2744
# TODO(niboshi): Remove this code when NumPy is fixed.
if (isinstance(x, numpy.ndarray) and
not (x.flags.c_contiguous or x.flags.f_contiguous) and
1 in x.shape):
x = numpy.ascontiguousarray(x)
# In order to be compatible with the "static graph" feature, it is
# required that all output arrays of this forward
# function be allocated explicitly:
xp = backend.get_array_module(x)
y = xp.empty((x.shape[0], W.shape[0]), dtype=x.dtype)
# This is required because all of the "static_*()" functions
# use the convention that any output arrays are supplied
# as input arguments to the function. That is because it is
# not allowed for a "static_*()" function to return anything
# other than `None`. The reason is to prevent dynamic allocation
# of output arrays during execution of the static schedule
# because it would break the model.
self.static_linear_no_bias(xp, x.dtype == W.dtype, inputs=[x, W],
outputs=[y])
if len(inputs) == 3:
self.static_add_bias(inputs=[b], outputs=[y])
self.retain_inputs((0, 1)) # b is not retained
return y,
def _forward_ideep(self, inputs):
if len(inputs) == 3:
x, W, b = inputs
else:
(x, W), b = inputs, None
y = intel64.ideep.linear.Forward(
intel64.ideep.array(x),
intel64.ideep.array(W),
intel64.ideep.array(b) if b is not None else None)
self.retain_inputs((0, 1))
return y,
def backward(self, indexes, grad_outputs):
x, W = self.get_retained_inputs()
gy, = grad_outputs
ret = []
with chainer.using_config('use_ideep', self._config_use_ideep):
if 0 in indexes:
gx, = LinearGradData().apply((W, gy))
ret.append(chainer.functions.cast(gx, x.dtype))
if 1 in indexes:
gW, = LinearGradWeight(W.dtype).apply((x, gy))
ret.append(chainer.functions.cast(gW, W.dtype))
if 2 in indexes:
gb = chainer.functions.sum(gy, axis=0)
ret.append(gb)
return ret
class LinearGradData(function_node.FunctionNode):
_config_use_ideep = None
def forward(self, inputs):
self._config_use_ideep = chainer.config.use_ideep
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs)):
# iDeep implementation
return self._forward_ideep(inputs)
# Generic implementation
self.retain_inputs((0, 1))
W, gy = inputs
if (isinstance(gy, numpy.ndarray) and
not (gy.flags.c_contiguous or gy.flags.f_contiguous) and
1 in gy.shape):
gy = numpy.ascontiguousarray(gy)
gx = gy.dot(W).astype(gy.dtype, copy=False)
return gx,
def _forward_ideep(self, inputs):
self.retain_inputs((0, 1))
W, gy = inputs
gx = intel64.ideep.linear.BackwardData(
intel64.ideep.array(W),
intel64.ideep.array(gy))
return gx,
def backward(self, indexes, grad_outputs):
W, gy = self.get_retained_inputs()
ggx, = grad_outputs
ret = []
with chainer.using_config('use_ideep', self._config_use_ideep):
if 0 in indexes:
gw, = LinearGradWeight(W.dtype).apply((ggx, gy))
ret.append(chainer.functions.cast(gw, W.dtype))
if 1 in indexes:
ggy = linear(ggx, W)
ret.append(chainer.functions.cast(ggy, gy.dtype))
return ret
class LinearGradWeight(function_node.FunctionNode):
_config_use_ideep = None
def __init__(self, w_dtype):
self._w_dtype = w_dtype
def forward(self, inputs):
self._config_use_ideep = chainer.config.use_ideep
if (intel64.should_use_ideep('>=auto')
and self._w_dtype == numpy.float32
and intel64.inputs_all_ready(inputs)):
# iDeep implementation
return self._forward_ideep(inputs)
# Generic implementation
self.retain_inputs((0, 1))
x, gy = inputs
if (isinstance(gy, numpy.ndarray) and
not (gy.flags.c_contiguous or gy.flags.f_contiguous) and
1 in gy.shape):
gy = numpy.ascontiguousarray(gy)
gW = gy.T.dot(x).astype(self._w_dtype, copy=False)
return gW,
def _forward_ideep(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
gW = intel64.ideep.linear.BackwardWeights(
intel64.ideep.array(x),
intel64.ideep.array(gy))
return gW,
def backward(self, indexes, grad_outputs):
x, gy = self.get_retained_inputs()
ggW, = grad_outputs
ret = []
with chainer.using_config('use_ideep', self._config_use_ideep):
if 0 in indexes:
gx, = LinearGradData().apply((ggW, gy))
ret.append(chainer.functions.cast(gx, x.dtype))
if 1 in indexes:
ggy = linear(x, ggW)
ret.append(chainer.functions.cast(ggy, gy.dtype))
return ret
def linear(x, W, b=None, n_batch_axes=1):
"""Linear function, or affine transformation.
It accepts two or three arguments: an input minibatch ``x``, a weight
matrix ``W``, and optionally a bias vector ``b``. It computes
.. math:: y_i = W x_i + b.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable,
which is a :math:`(s_1, s_2, ..., s_n)`-shaped float array.
Its first ``n_batch_axes`` dimensions are handled as
*minibatch dimensions*. The other dimensions are handled as
concatenated one dimension whose size must be
:math:`(s_{\\rm n\\_batch\\_axes} * ... * s_n = N)`.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Weight variable of shape :math:`(M, N)`,
where :math:`(N = s_{\\rm n\\_batch\\_axes} * ... * s_n)`.
b (:class:`~chainer.Variable` or :ref:`ndarray`):
Bias variable (optional) of shape :math:`(M,)`.
n_batch_axes (int): The number of batch axes. The default is 1. The
input variable is reshaped into
(:math:`{\\rm n\\_batch\\_axes} + 1`)-dimensional tensor.
This should be greater than 0.
Returns:
~chainer.Variable: Output variable. A float array with shape
of :math:`(s_1, ..., s_{\\rm n\\_batch\\_axes}, M)`.
.. seealso::
:class:`~chainer.links.Linear` to manage the model parameters
``W`` and ``b``.
.. admonition:: Example
>>> x = np.random.uniform(0, 1, (3, 4)).astype(np.float32)
>>> W = np.random.uniform(0, 1, (5, 4)).astype(np.float32)
>>> b = np.random.uniform(0, 1, (5,)).astype(np.float32)
>>> y = F.linear(x, W, b)
>>> y.shape
(3, 5)
"""
if n_batch_axes <= 0:
raise ValueError('n_batch_axes should be greater than 0.')
if n_batch_axes > 1:
batch_shape = x.shape[:n_batch_axes]
batch_size = utils.size_of_shape(batch_shape)
x = x.reshape(batch_size, -1)
elif x.ndim > 2:
x = x.reshape(x.shape[0], -1)
if b is None:
args = x, W
else:
args = x, W, b
y, = LinearFunction().apply(args)
if n_batch_axes > 1:
y = y.reshape(batch_shape + (-1,))
return y
| 10,790
| 33.586538
| 75
|
py
|
chainer
|
chainer-master/chainer/functions/connection/deconvolution_nd.py
|
import numpy
from six import moves
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import configuration
from chainer import function_node
from chainer.functions.connection import convolution_2d
from chainer.functions.connection import convolution_nd
from chainer import utils
from chainer.utils import conv
from chainer.utils import conv_nd
from chainer.utils import type_check
import chainerx
class DeconvolutionND(function_node.FunctionNode):
cover_all = None
def __init__(self, ndim, stride=1, pad=0, outsize=None,
dilate=1, groups=1):
self.ndim = ndim
self.stride = conv_nd.as_tuple(stride, ndim)
self.pad = conv_nd.as_tuple(pad, ndim)
if outsize is not None:
assert len(outsize) == ndim
self.outs = outsize
self.dilate = conv_nd.as_tuple(dilate, ndim)
self.groups = groups
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type, w_type = in_types[:2]
type_check.expect(
x_type.dtype.kind == 'f',
w_type.dtype.kind == 'f',
x_type.ndim == self.ndim + 2,
w_type.ndim == self.ndim + 2,
x_type.shape[1] == w_type.shape[0]
)
if self.outs is not None:
for i, (out, s, p, di) in enumerate(zip(
self.outs, self.stride, self.pad, self.dilate)):
lower_bound = conv.get_conv_outsize(
out, w_type.shape[i + 2], s, p, d=di)
upper_bound = conv.get_conv_outsize(
out, w_type.shape[i + 2], s, p, cover_all=True, d=di)
type_check.expect(
lower_bound <= x_type.shape[i + 2],
x_type.shape[i + 2] <= upper_bound)
if type_check.eval(n_in) == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype.kind == 'f',
b_type.ndim == 1,
# Need to consider the case that group count > 1.
# b_type.shape[0] == w_type.shape[1]
)
def _use_cudnn(self, x, W, b):
if ((cuda._cudnn_version < 6000
or configuration.config.cudnn_deterministic)
and any(d != 1 for d in self.dilate)):
# cuDNN < 6.0 and deterministic algorithms
# does not support dilated convolutions
return False
if cuda._cudnn_version < 7000 and 1 < self.groups:
# cuDNN < 7.0 does not support grouped convolutions
return False
return (
chainer.should_use_cudnn('>=auto')
and not self.cover_all
and self.ndim > 1
and x.dtype == W.dtype
and (b is None or x.dtype == b.dtype))
def _forward_xp(self, x, W, b, xp):
if 1 < self.groups:
return self._forward_grouped_convolution_xp(x, W, b, xp)
else:
return self._forward_xp_core(x, W, b, xp)
def _forward_grouped_convolution_xp(self, x, W, b, xp):
# G: group count
# N: batch size
# xC: input channels
# yC: output channels
G = self.groups
N, xC = x.shape[:2]
x_size = x.shape[2:]
yCg = W.shape[1]
yC = yCg * G
xCg = xC // G
k_size = W.shape[2:]
dims = len(k_size)
if xC % G != 0:
raise TypeError('The number of groups must be '
'a divisor of that of input channels')
x = xp.rollaxis(x, 1) # (xC, N, x_size...)
x = x.reshape(G, xCg, N * utils.size_of_shape(x_size))
W = W.reshape(G, xCg, yCg * utils.size_of_shape(k_size))
W = W.transpose(0, 2, 1) # (G, yCg*k_size, xCg)
# (G, yCg*k_size, N*x_size) = (G, yCg*k_size, xCg) @ (G, xCg, N*x_size)
col = convolution_2d._matmul(W, x).astype(x.dtype, copy=False)
col = col.reshape((yC,) + k_size + (N,) + x_size)
col = xp.rollaxis(col, dims + 1) # (N, yC, k_size..., x_size...)
y = conv_nd.col2im_nd(col, self.stride, self.pad, self.outs,
dilate=self.dilate)
if b is not None:
y += b.reshape(1, yC, *((1,) * dims))
return y,
def _forward_xp_core(self, x, W, b, xp):
ndim = self.ndim
stride = self.stride
pad = self.pad
dilate = self.dilate
# gcol: C_O, k_1, ..., k_N, n, d_1, ..., d_N
gcol = xp.tensordot(W, x, (0, 1)).astype(x.dtype, copy=False)
# Roll n, which is batch size, before the first.
gcol = xp.rollaxis(gcol, ndim + 1)
# y: n, C_O, d_1, d_2, ..., d_N
if xp is numpy:
y = conv_nd.col2im_nd_cpu(
gcol, stride, pad, self.outs, dilate=dilate)
else:
y = conv_nd.col2im_nd_gpu(
gcol, stride, pad, self.outs, dilate=dilate)
if b is not None:
b_shape = (1, -1) + (1,) * ndim
y += b.reshape(b_shape)
return y,
def _forward_cudnn(self, x, W, b):
c = W.shape[1] * self.groups
n, in_c = x.shape[:2] # x: n, C_I, d_1, d_2, ..., d_N
# Make empty array for output.
y_shape = (n, c) + self.outs # (n, c_O, out_1, out_2, ..., out_N)
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
pad = self.pad
stride = self.stride
dilate = self.dilate
groups = self.groups
deterministic = configuration.config.cudnn_deterministic
auto_tune = configuration.config.autotune
tensor_core = configuration.config.use_cudnn_tensor_core
cuda.cudnn.convolution_backward_data(
W, x, b, y, pad, stride, dilate, groups,
deterministic=deterministic, auto_tune=auto_tune,
tensor_core=tensor_core)
return y,
def forward_chainerx(self, inputs):
# TODO(imanishi): Support it
if any(d != 1 for d in self.dilate):
return chainer.Fallback
# TODO(imanishi): Support it
if self.groups != 1:
return chainer.Fallback
# TODO(imanishi): Support it
if any(a.dtype != inputs[0].dtype for a in inputs):
return chainer.Fallback
# TODO(imanishi): Supporft it
if inputs[0].device.backend.name == 'cuda' and self.ndim < 2:
return chainer.Fallback
stride = self.stride
pad = self.pad
return chainerx.conv_transpose(*inputs, stride=stride, pad=pad),
def forward(self, inputs):
self.retain_inputs((0, 1)) # only retain x and W
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
if self.outs is None:
dims = x.shape[2:]
ksize = W.shape[2:]
self.outs = tuple(
conv.get_deconv_outsize(d, k, s, p, d=di)
for d, k, s, p, di
in zip(dims, ksize, self.stride, self.pad, self.dilate))
assert all(out > 0 for out in self.outs), \
'Output sizes should be positive.'
self._set_cover_all(x, W)
xp = backend.get_array_module(*inputs)
if xp is numpy:
return self._forward_xp(x, W, b, numpy)
elif not self._use_cudnn(x, W, b):
return self._forward_xp(x, W, b, cuda.cupy)
else:
return self._forward_cudnn(x, W, b)
def backward(self, indexes, grad_outputs):
x, W = self.get_retained_inputs()
gy, = grad_outputs
ret = []
if 0 in indexes:
gx = chainer.functions.convolution_nd(
gy, W, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
ret.append(gx)
if 1 in indexes:
gW, = convolution_nd.ConvolutionNDGradW(self).apply((gy, x))
ret.append(gW)
if 2 in indexes:
axis = (0,) + tuple(moves.range(2, gy.ndim))
gb = chainer.functions.sum(gy, axis=axis)
if gb.dtype != self.inputs[2].dtype:
gb = chainer.functions.cast(gb, self.inputs[2].dtype)
ret.append(gb)
return ret
def _set_cover_all(self, x, W):
x_shape = x.shape[2:]
k_shape = W.shape[2:]
self.cover_all = any(
ix != conv.get_conv_outsize(oy, k, s, p, d=di)
for (ix, oy, k, s, p, di)
in zip(x_shape, self.outs, k_shape, self.stride, self.pad,
self.dilate))
def deconvolution_nd(x, W, b=None, stride=1, pad=0, outsize=None,
dilate=1, groups=1):
"""N-dimensional deconvolution function.
This is an implementation of N-dimensional deconvolution which generalizes
two-dimensional one. In most of deep learning frameworks and papers, this
function is called **transposed convolution**. But because of historical
reasons (e.g. paper by Ziller `Deconvolutional Networks`_) and backward
compatibility, this function is called **deconvolution** in Chainer.
.. _Deconvolutional Networks: \
http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf
It takes three variables: the input ``x``, the filter weight ``W``, and the
bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`N` is the number of spatial dimensions.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output
channels, respectively.
- :math:`d_1, d_2, ..., d_N` are the size of each axis of the input's
spatial dimensions, respectively.
- :math:`k_1, k_2, ..., k_N` are the size of each axis of the filters,
respectively.
- :math:`p_1, p_2, ..., p_N` are the size of each axis of the spatial
padding size, respectively.
- :math:`s_1, s_2, ..., s_N` are the stride of each axis of filter
application, respectively.
If ``outsize`` option is ``None``, the output size
:math:`(l_1, l_2, ..., l_N)` is determined by the following equations with
the items in the above list:
.. math::
l_n = s_n (d_n - 1) + k_n - 2 p_n \\ \\ (n = 1, ..., N)
If ``outsize`` option is given, the output size is determined by
``outsize``. In this case, the ``outsize`` :math:`(l_1, l_2, ..., l_N)`
must satisfy the following equations:
.. math::
d_n = \\lfloor (l_n + 2p_n - k_n) / s_n \\rfloor + 1 \\ \\ \
(n = 1, ..., N)
Deconvolution links can use a feature of cuDNN called autotuning, which
selects the most efficient CNN algorithm for images of fixed-size,
can provide a significant performance boost for fixed neural nets.
To enable, set `chainer.using_config('autotune', True)`
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable of shape :math:`(n, c_I, d_1, d_2, ..., d_N)`.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Weight variable of shape :math:`(c_I, c_O, k_1, k_2, ..., k_N)`.
b (None or :class:`~chainer.Variable` or :ref:`ndarray`):
One-dimensional bias variable with length :math:`c_O` (optional).
stride (:class:`int` or :class:`tuple` of :class:`int` s):
Stride of filter applications :math:`(s_1, s_2, ..., s_N)`.
``stride=s`` is equivalent to ``(s, s, ..., s)``.
pad (:class:`int` or :class:`tuple` of :class:`int` s):
Spatial padding width for input arrays
:math:`(p_1, p_2, ..., p_N)`. ``pad=p`` is equivalent to
``(p, p, ..., p)``.
outsize (None or :class:`tuple` of :class:`int` s):
Expected output size of deconvolutional operation. It should be a
tuple of ints :math:`(l_1, l_2, ..., l_N)`. Default value is
``None`` and the outsize is estimated by input size, stride and
pad.
dilate (:class:`int` or :class:`tuple` of :class:`int` s):
Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d, ..., d)`` are equivalent.
groups (:class:`int`):
The number of groups to use grouped convolution.
The default is one, where grouped convolution is not used.
Returns:
~chainer.Variable:
Output variable of shape :math:`(n, c_O, l_1, l_2, ..., l_N)`.
.. seealso::
:class:`~chainer.links.DeconvolutionND` to manage the model parameters
``W`` and ``b``.
.. seealso:: :func:`deconvolution_2d`
.. admonition:: Example
**Example1**: the case when ``outsize`` is not given.
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 5, 10, 15
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = np.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32)
>>> x.shape
(10, 3, 5, 10, 15)
>>> W = np.random.uniform(0, 1, (c_i, c_o, k1, k2, k3)).\
astype(np.float32)
>>> W.shape
(3, 1, 10, 10, 10)
>>> b = np.random.uniform(0, 1, (c_o)).astype(np.float32)
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> y = F.deconvolution_nd(x, W, b, stride=(s1, s2, s3), \
pad=(p1, p2, p3))
>>> y.shape
(10, 1, 8, 36, 84)
>>> l1 = s1 * (d1 - 1) + k1 - 2 * p1
>>> l2 = s2 * (d2 - 1) + k2 - 2 * p2
>>> l3 = s3 * (d3 - 1) + k3 - 2 * p3
>>> y.shape == (n, c_o, l1, l2, l3)
True
**Example2**: the case when ``outsize`` is given.
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 5, 10, 15
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = np.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32)
>>> x.shape
(10, 3, 5, 10, 15)
>>> W = np.random.uniform(0, 1, (c_i, c_o, k1, k2, k3)).\
astype(np.float32)
>>> W.shape
(3, 1, 10, 10, 10)
>>> b = np.random.uniform(0, 1, (c_o)).astype(np.float32)
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> l1, l2, l3 = 9, 38, 87
>>> d1 == int((l1 + 2 * p1 - k1) / s1) + 1
True
>>> d2 == int((l2 + 2 * p2 - k2) / s2) + 1
True
>>> d3 == int((l3 + 2 * p3 - k3) / s3) + 1
True
>>> y = F.deconvolution_nd(x, W, b, stride=(s1, s2, s3), \
pad=(p1, p2, p3), outsize=(l1, l2, l3))
>>> y.shape
(10, 1, 9, 38, 87)
>>> y.shape == (n, c_o, l1, l2, l3)
True
"""
ndim = len(x.shape[2:])
func = DeconvolutionND(
ndim, stride, pad, outsize, dilate=dilate, groups=groups)
args = (x, W) if b is None else (x, W, b)
y, = func.apply(args)
return y
def deconvolution_1d(x, W, b=None, stride=1, pad=0, outsize=None,
dilate=1, groups=1):
"""1-dimensional deconvolution function.
.. note::
This function calls :func:`~chainer.functions.deconvolution_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.deconvolution_nd`.
"""
if len(x.shape[2:]) != 1:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 1. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return deconvolution_nd(x, W, b, stride, pad, outsize, dilate, groups)
def deconvolution_3d(x, W, b=None, stride=1, pad=0, outsize=None,
dilate=1, groups=1):
"""3-dimensional deconvolution function.
.. note::
This function calls :func:`~chainer.functions.deconvolution_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.deconvolution_nd`.
"""
if len(x.shape[2:]) != 3:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 3. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return deconvolution_nd(x, W, b, stride, pad, outsize, dilate, groups)
| 16,246
| 35.428251
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/connection/depthwise_convolution_2d.py
|
import chainer
def depthwise_convolution_2d(x, W, b=None, stride=1, pad=0):
"""Two-dimensional depthwise convolution function.
This is an implementation of two-dimensional depthwise convolution.
It takes two or three variables: the input image ``x``, the filter weight
``W``, and optionally, the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` is the number of the input.
- :math:`c_M` is the channel multiplier.
- :math:`h` and :math:`w` are the height and width of the input image,
respectively.
- :math:`h_O` and :math:`w_O` are the height and width of the output image,
respectively.
- :math:`k_H` and :math:`k_W` are the height and width of the filters,
respectively.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable of shape :math:`(n, c_I, h, w)`.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Weight variable of shape :math:`(c_M, c_I, k_H, k_W)`.
b (:class:`~chainer.Variable` or :ref:`ndarray`):
Bias variable of length :math:`c_M * c_I` (optional).
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
Returns:
~chainer.Variable:
Output variable. Its shape is :math:`(n, c_I * c_M, h_O, w_O)`.
Like ``Convolution2D``, ``DepthwiseConvolution2D`` function computes
correlations between filters and patches of size :math:`(k_H, k_W)` in
``x``.
But unlike ``Convolution2D``, ``DepthwiseConvolution2D`` does not add up
input channels of filters but concatenates them.
For that reason, the shape of outputs of depthwise convolution are
:math:`(n, c_I * c_M, h_O, w_O)`, :math:`c_M` is called channel_multiplier.
:math:`(h_O, w_O)` is determined by the equivalent equation of
``Convolution2D``.
If the bias vector is given, then it is added to all spatial locations of
the output of convolution.
See: `L. Sifre. Rigid-motion scattering for image classification
<https://www.di.ens.fr/data/publications/papers/phd_sifre.pdf>`_
.. seealso::
:class:`~chainer.links.DepthwiseConvolution2D`
to manage the model parameters ``W`` and ``b``.
.. admonition:: Example
>>> x = np.random.uniform(0, 1, (2, 3, 4, 7))
>>> W = np.random.uniform(0, 1, (2, 3, 3, 3))
>>> b = np.random.uniform(0, 1, (6,))
>>> y = F.depthwise_convolution_2d(x, W, b)
>>> y.shape
(2, 6, 2, 5)
"""
multiplier, in_channels, kh, kw = W.shape
F = chainer.functions
W = F.transpose(W, (1, 0, 2, 3))
W = F.reshape(W, (multiplier * in_channels, 1, kh, kw))
return F.convolution_2d(x, W, b, stride, pad, groups=in_channels)
| 3,003
| 38.012987
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/connection/local_convolution_2d.py
|
from six import moves
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
from chainer import variable
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
class LocalConvolution2DFunction(function_node.FunctionNode):
def __init__(self, stride=1):
self.sy, self.sx = _pair(stride)
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type, w_type = in_types[:2]
type_check.expect(
x_type.dtype.kind == 'f',
w_type.dtype.kind == 'f',
x_type.ndim == 4,
w_type.ndim == 6,
x_type.shape[1] == w_type.shape[3],
)
if type_check.eval(n_in) == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype == x_type.dtype,
b_type.ndim == 3,
b_type.shape == w_type.shape[:3]
)
def forward(self, inputs):
# Channels-first is Chainer's tensor format
# W is 6-dimensional
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
stride_row, stride_col = self.sy, self.sx
output_row, output_col = W.shape[1], W.shape[2]
feature_dim = W.shape[3] * W.shape[4] * W.shape[5]
xp = backend.get_array_module(*inputs)
output = xp.empty((x.shape[0], W.shape[0], output_row, output_col,),
dtype=x.dtype)
for i in moves.range(output_row):
for j in moves.range(output_col):
slice_row = slice(i * stride_row,
i * stride_row + W.shape[4])
slice_col = slice(j * stride_col,
j * stride_col + W.shape[5])
x_flatten = xp.reshape(x[..., slice_row, slice_col],
(-1, feature_dim))
W_flatten = xp.reshape(W[:, i, j, ...],
(-1, feature_dim))
output[..., i, j] = xp.dot(x_flatten, W_flatten.T)
if b is not None:
output += b[None, :, :, :]
self.retain_inputs((0, 1)) # only retain x and W
return output,
def backward(self, indices, grad_outputs):
xvar, Wvar = self.get_retained_inputs()
x = xvar.data
W = Wvar.data
gyvar, = grad_outputs
gy = gyvar.data
xp = backend.get_array_module(x, W)
stride_row, stride_col = self.sy, self.sx
output_row, output_col = W.shape[1], W.shape[2]
ret = []
if 0 in indices:
gx = xp.zeros_like(x)
for i in moves.range(output_row):
for j in moves.range(output_col):
slice_row = slice(i * stride_row,
i * stride_row + W.shape[4])
slice_col = slice(j * stride_col,
j * stride_col + W.shape[5])
# ochans * ichans * krows * kcols
W_slice = W[:, i, j, ...]
# nsamps * ochans
gy_slice = gy[..., i, j]
# -> nsamps * ichans * krows * kcols
gx[:, :, slice_row, slice_col] += xp.tensordot(
gy_slice, W_slice, axes=[(1,), (0,)]
)
ret.append(chainer.functions.cast(variable.as_variable(gx),
x.dtype))
if 1 in indices:
gW = xp.empty_like(W)
for i in moves.range(output_row):
for j in moves.range(output_col):
slice_row = slice(i * stride_row,
i * stride_row + W.shape[4])
slice_col = slice(j * stride_col,
j * stride_col + W.shape[5])
# nsamps * inchans * krows * kcols
x_slice = x[:, :, slice_row, slice_col]
# nsamps * outchans * 1 * 1
gy_slice = gy[:, :, i, j]
gW[:, i, j, :, :, :] = xp.tensordot(
gy_slice, x_slice, axes=[(0,), (0,)]
)
ret.append(chainer.functions.cast(variable.as_variable(gW),
W.dtype))
if 2 in indices:
gb = chainer.functions.sum(gyvar, axis=0)
ret.append(gb)
return ret
def local_convolution_2d(x, W, b=None, stride=1):
"""Two-dimensional local convolution function.
Locally-connected function for 2D inputs. Works similarly to
convolution_2d, except that weights are unshared, that is, a different set
of filters is applied at each different patch of the input.
It takes two or three variables: the input image ``x``, the filter weight
``W``, and optionally, the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` is the number of the input.
- :math:`c_O` is the number of output channels.
- :math:`h` and :math:`w` are the height and width of the input image,
respectively.
- :math:`h_O` and :math:`w_O` are the height and width of the output image,
respectively.
- :math:`k_H` and :math:`k_W` are the height and width of the filters,
respectively.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable of shape :math:`(n, c_I, h, w)`.
W (:class:`~chainer.Variable` or :ref:`ndarray`): Weight variable of
shape :math:`(c_O, h_O, w_O, c_I, k_H, k_W)`.
b (:class:`~chainer.Variable` or :ref:`ndarray`):
Bias variable of shape :math:`(c_O,h_O,w_O)` (optional).
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
Returns:
~chainer.Variable:
Output variable. Its shape is :math:`(n, c_O, h_O, w_O)`.
Like ``Convolution2D``, ``LocalConvolution2D`` function computes
correlations between filters and patches of size :math:`(k_H, k_W)` in
``x``.
But unlike ``Convolution2D``, ``LocalConvolution2D`` has a separate filter
for each patch of the input
:math:`(h_O, w_O)` is determined by the equivalent equation of
``Convolution2D``, without any padding
If the bias vector is given, then it is added to all spatial locations of
the output of convolution.
.. seealso::
:class:`~chainer.links.LocalConvolution2D` to manage the model
parameters ``W`` and ``b``.
.. admonition:: Example
>>> x = np.random.uniform(0, 1, (2, 3, 7, 7))
>>> W = np.random.uniform(0, 1, (2, 5, 5, 3, 3, 3))
>>> b = np.random.uniform(0, 1, (2, 5, 5))
>>> y = F.local_convolution_2d(x, W, b)
>>> y.shape
(2, 2, 5, 5)
"""
fnode = LocalConvolution2DFunction(stride)
if b is None:
args = (x, W)
else:
args = (x, W, b)
y, = fnode.apply(args)
return y
| 7,212
| 36.373057
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/connection/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/functions/connection/convolution_2d.py
|
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import configuration
from chainer import function_node
import chainer.functions
from chainer import memory_layouts
from chainer.utils import argument
from chainer.utils import conv
from chainer.utils import type_check
import chainerx
if cuda.cudnn_enabled:
_cudnn_version = cuda.cuda.cudnn.getVersion() # type: ignore
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
# Used by deconvolution_2d.py.
# TODO(beam2d): Unify matmul implementations
def _matmul(a, b):
xp = backend.get_array_module(a)
if not hasattr(xp, 'matmul'):
# NumPy 1.9 does not support matmul. We use einsum instead.
return xp.einsum('ijl,ilk->ijk', a, b)
return xp.matmul(a, b)
class Convolution2DFunction(function_node.FunctionNode):
_use_ideep = False
def __init__(self, stride=1, pad=0, cover_all=False, **kwargs):
dilate, groups, cudnn_fast = argument.parse_kwargs(
kwargs, ('dilate', 1), ('groups', 1), ('cudnn_fast', False),
deterministic='deterministic argument is not supported anymore. '
'Use chainer.using_config(\'cudnn_deterministic\', value) context '
'where value is either `True` or `False`.',
requires_x_grad='requires_x_grad argument is not supported '
'anymore. Just remove the argument. Note that whether to compute '
'the gradient w.r.t. x is automatically decided during '
'backpropagation.')
self.sy, self.sx = _pair(stride)
self.ph, self.pw = _pair(pad)
self.cover_all = cover_all
self.dy, self.dx = _pair(dilate)
self.groups = groups
self.cudnn_fast = cudnn_fast
if self.dx < 1 or self.dy < 1:
raise ValueError('Dilate should be positive, but {} is '
'supplied.'.format(dilate))
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type = in_types[0]
w_type = in_types[1]
type_check.expect(
x_type.dtype.kind == 'f',
w_type.dtype.kind == 'f',
x_type.ndim == 4,
w_type.ndim == 4,
x_type.shape[1] == w_type.shape[1] * self.groups,
)
if type_check.eval(n_in) == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype == x_type.dtype,
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
def check_layout_forward(self, inputs):
input_layouts = self.input_layouts
n = len(inputs)
layouts = (
(memory_layouts.CUDNN_CHANNEL_FIRST_X,
memory_layouts.CUDNN_CHANNEL_LAST_X),
(memory_layouts.CUDNN_CHANNEL_FIRST_W,
memory_layouts.CUDNN_CHANNEL_LAST_W),
(None,))
for i, (input_layout, expected_layouts) in (
enumerate(zip(input_layouts, layouts[:n]))):
if input_layout not in expected_layouts:
raise RuntimeError(
'Invalid layout for input {}: {}'.format(i, input_layout))
def _get_out_size(self, x_shape, w_shape):
_, _, kh, kw = w_shape
_, _, h, w = x_shape
out_h = conv.get_conv_outsize(
h, kh, self.sy, self.ph, cover_all=self.cover_all, d=self.dy)
if out_h <= 0:
raise RuntimeError('Height in the output should be positive.')
out_w = conv.get_conv_outsize(
w, kw, self.sx, self.pw, cover_all=self.cover_all, d=self.dx)
if out_w <= 0:
raise RuntimeError('Width in the output should be positive.')
return out_h, out_w
def _check_input_layouts_all_standard(self):
if not all([layout is None for layout in self.input_layouts]):
raise RuntimeError(
'Non-standard memory layouts are only supported with cupy '
'arrays in {}. Input layouts: {}'.format(
self.label,
self.input_layouts))
def forward_chainerx(self, inputs):
# TODO(hvy): Support mixed precision.
if any([arr.dtype != inputs[0].dtype for arr in inputs[1:]]):
return chainer.Fallback
# TODO(hvy): Support dilate > 1.
if self.dy > 1 or self.dx > 1:
return chainer.Fallback
# TODO(hvy): Support groups > 1.
if self.groups > 1:
return chainer.Fallback
if inputs[0].device.backend.name == 'cuda' and self.cover_all:
return chainer.Fallback
return chainerx.conv(
*inputs, stride=(self.sy, self.sx), pad=(self.ph, self.pw),
cover_all=self.cover_all),
def forward_cpu(self, inputs):
if self.cudnn_fast:
raise RuntimeError(
'\'cudnn_fast\' can\'t be used in the CPU backend')
self._check_input_layouts_all_standard()
self.retain_inputs((0, 1)) # retain only x and W
if len(inputs) == 2:
(x, W), b = inputs, None
else:
x, W, b = inputs
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs)):
self._use_ideep = True
if self.groups > 1:
return self._forward_grouped_convolution(x, W, b)
else:
return self._forward_cpu_core(x, W, b)
def _forward_cpu_core(self, x, W, b):
if self._use_ideep:
return self._forward_ideep(x, W, b)
kh, kw = W.shape[2:]
col = conv.im2col_cpu(
x, kh, kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
y = numpy.tensordot(
col, W, ((1, 2, 3), (1, 2, 3))).astype(x.dtype, copy=False)
if b is not None:
y += b
y = numpy.rollaxis(y, 3, 1)
return y,
def _forward_ideep(self, x, W, b):
out_c, input_c, kh, kw = W.shape
n, c, h, w = x.shape
out_h, out_w = self._get_out_size(x.shape, W.shape)
pd = (self.sy * (out_h - 1)
+ (kh + (kh - 1) * (self.dy - 1)) - h - self.ph)
pr = (self.sx * (out_w - 1)
+ (kw + (kw - 1) * (self.dx - 1)) - w - self.pw)
param = intel64.ideep.convolution2DParam(
(n, out_c, out_h, out_w),
self.dy, self.dx,
self.sy, self.sx,
self.ph, self.pw,
pd, pr)
y = intel64.ideep.convolution2D.Forward(
intel64.ideep.array(x),
intel64.ideep.array(W),
intel64.ideep.array(b) if b is not None else None,
param)
return y,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1)) # retain only x and W
if len(inputs) == 2:
(x, W), b = inputs, None
x_layout, w_layout = self.input_layouts
else:
x, W, b = inputs
x_layout, w_layout, _ = self.input_layouts
x_shape = memory_layouts._transpose_shape(x.shape, x_layout, None)
w_shape = memory_layouts._transpose_shape(W.shape, w_layout, None)
n, _, h, w = x_shape
out_c, _, kh, kw = w_shape
out_h, out_w = self._get_out_size(x_shape, w_shape)
y_raw_shape = memory_layouts._transpose_shape(
(n, out_c, out_h, out_w), None, x_layout)
y = cuda.cupy.empty(y_raw_shape, dtype=x.dtype)
use_cudnn = (
chainer.should_use_cudnn('>=auto')
and not self.cover_all
and x.dtype == W.dtype
and ((self.dy == 1 and self.dx == 1) or _cudnn_version >= 6000)
and (self.groups <= 1 or _cudnn_version >= 7000)
)
if self.cudnn_fast and not use_cudnn:
raise RuntimeError('\'cudnn_fast\' requires cuDNN to work')
if use_cudnn:
# cuDNN implementation
return self._forward_cudnn(x, W, b, y, (x_layout, w_layout))
elif self.groups > 1:
return self._forward_grouped_convolution(x, W, b)
else:
return self._forward_gpu_core(x, W, b)
def _forward_gpu_core(self, x, W, b):
kh, kw = W.shape[2:]
# Implementation using im2col
col = conv.im2col_gpu(
x, kh, kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
y = cuda.cupy.tensordot(
col, W, ((1, 2, 3), (1, 2, 3))).astype(x.dtype, copy=False)
# TODO(beam2d): Support unshared bias
if b is not None:
y += b
y = cuda.cupy.rollaxis(y, 3, 1)
return y,
def _forward_grouped_convolution(self, x, W, b):
# G: group count
# N: batch size
# kH, kW: kernel height, kernel width
# iC, iH, iW: input channels, input height, input width
# oC, oH, oW: output channels, output height, output width
G = self.groups
N, iC, iH, iW = x.shape
oC, _, kH, kW = W.shape # _ == iCg
iCg = iC // G
oCg = oC // G
# (N, iC, kW, kW, oH, oW)
x = conv.im2col(x, kH, kW, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
oH, oW = x.shape[-2:]
x = x.transpose(1, 2, 3, 0, 4, 5) # (iC, kH, kW, N, oH, oW)
x = x.reshape(G, iCg * kH * kW, N * oH * oW)
W = W.reshape(G, oCg, iCg * kH * kW)
# (G, oCg, N*oH*oW) = (G, oCg, iCg*kH*kW) @ (G, iCg*kH*kW, N*oH*oW)
y = _matmul(W, x).astype(x.dtype, copy=False)
y = y.reshape(oC, N, oH, oW)
y = y.transpose(1, 0, 2, 3) # (N, oC, oH, oW)
if b is not None:
y += b.reshape(1, b.size, 1, 1)
return y,
def _forward_cudnn(self, x, W, b, y, input_layouts):
x_layout, w_layout = input_layouts
self.output_layouts = (x_layout,)
pad = (self.ph, self.pw)
stride = (self.sy, self.sx)
dilation = (self.dy, self.dx)
auto_tune = configuration.config.autotune
tensor_core = configuration.config.use_cudnn_tensor_core
cudnn_x_layout = cuda._get_cudnn_tensor_layout_x(x_layout)
cudnn_w_layout = cuda._get_cudnn_tensor_layout_w(w_layout)
cuda.cudnn.convolution_forward(
x, W, b, y, pad, stride, dilation, self.groups,
auto_tune=auto_tune, tensor_core=tensor_core,
d_layout=cudnn_x_layout, w_layout=cudnn_w_layout)
return y,
def backward(self, indexes, grad_outputs):
x, W = self.get_retained_inputs()
if len(self.input_layouts) == 2:
x_layout, _ = self.input_layouts
else:
x_layout, _, _ = self.input_layouts
gy, = grad_outputs
ret = []
if 0 in indexes:
_, _, xh, xw = x.shape
gx = chainer.functions.deconvolution_2d(
gy, W, stride=(self.sy, self.sx), pad=(self.ph, self.pw),
outsize=(xh, xw), dilate=(self.dy, self.dx),
groups=self.groups)
assert gx.shape == x.shape
ret.append(gx)
if 1 in indexes:
gW, = Convolution2DGradW(
self, W.shape, W.dtype, W.layout).apply((x, gy))
ret.append(gW)
if 2 in indexes:
axis = (0, 2, 3)
inv_trans = memory_layouts._get_layout_transpose_axes(
gy.ndim, None, x_layout)
if inv_trans is None:
raw_axis = axis
else:
raw_axis = tuple([inv_trans[i] for i in axis])
gb = chainer.functions.sum(gy, axis=raw_axis)
ret.append(gb)
return ret
class Convolution2DGradW(function_node.FunctionNode):
def __init__(self, conv2d, w_shape, w_dtype, w_layout):
self.kh, self.kw = w_shape[2::]
self.sy = conv2d.sy
self.sx = conv2d.sx
self.ph = conv2d.ph
self.pw = conv2d.pw
self.dy = conv2d.dy
self.dx = conv2d.dx
self.cover_all = conv2d.cover_all
self.W_shape = w_shape
self.W_dtype = w_dtype
self.w_layout = w_layout
self.groups = conv2d.groups
self._use_ideep = conv2d._use_ideep
def check_layout_forward(self, inputs):
pass
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
if self.groups > 1:
return self._forward_grouped_convolution(x, gy)
else:
return self._forward_cpu_core(x, gy)
def _forward_cpu_core(self, x, gy):
if self._use_ideep:
return self._forward_ideep(x, gy)
# NumPy raises an error when the array is not contiguous.
# See: https://github.com/chainer/chainer/issues/2744
# TODO(niboshi): Remove this code when NumPy is fixed.
if (not (gy.flags.c_contiguous or gy.flags.f_contiguous) and
1 in gy.shape):
gy = numpy.ascontiguousarray(gy)
col = conv.im2col_cpu(
x, self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
gW = numpy.tensordot(gy, col, ((0, 2, 3), (0, 4, 5))
).astype(self.W_dtype, copy=False)
return gW,
def _forward_ideep(self, x, gy):
n, input_c, h, w = x.shape
n, out_c, out_h, out_w = gy.shape
pd = (self.sy * (out_h - 1)
+ (self.kh + (self.kh - 1) * (self.dy - 1))
- h - self.ph)
pr = (self.sx * (out_w - 1)
+ (self.kw + (self.kw - 1) * (self.dx - 1))
- w - self.pw)
param = intel64.ideep.convolution2DParam(
(out_c, input_c, self.kh, self.kw),
self.dy, self.dx,
self.sy, self.sx,
self.ph, self.pw,
pd, pr)
gW = intel64.ideep.convolution2D.BackwardWeights(
intel64.ideep.array(x),
intel64.ideep.array(gy),
param)
return gW,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
use_cudnn = (
chainer.should_use_cudnn('>=auto')
and not self.cover_all
and x.dtype == self.W_dtype
and ((self.dy == 1 and self.dx == 1)
or (_cudnn_version >= 6000
and not configuration.config.cudnn_deterministic))
and (self.groups <= 1 or _cudnn_version >= 7000)
)
if use_cudnn:
# cuDNN implementation
return self._forward_cudnn(x, gy)
elif self.groups > 1:
return self._forward_grouped_convolution(x, gy)
else:
return self._forward_gpu_core(x, gy)
def _forward_gpu_core(self, x, gy):
col = conv.im2col_gpu(
x, self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
gW = cuda.cupy.tensordot(gy, col, ((0, 2, 3), (0, 4, 5))
).astype(self.W_dtype, copy=False)
return gW,
def _forward_grouped_convolution(self, x, gy):
# G: group count
# N: batch size
# kH, kW: kernel height, kernel width
# iC, iH, iW: input channels, input height, input width
# oC, oH, oW: output channels, output height, output width
G = self.groups
N, iC, iH, iW = x.shape
_, oC, oH, oW = gy.shape # _ == N
kH = self.kh
kW = self.kw
iCg = iC // G
oCg = oC // G
# (N, iC, kH, kW, oH, oW)
x = conv.im2col(x, kH, kW, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
x = x.transpose(1, 2, 3, 0, 4, 5) # (iC, kH, kW, N, oH, oW)
x = x.reshape(G, iCg * kH * kW, N * oH * oW)
x = x.transpose(0, 2, 1) # (G, N*oH*oW, iCg*kH*kW)
gy = gy.transpose(1, 0, 2, 3) # (oC, N, oH, oW)
gy = gy.reshape(G, oCg, N * oH * oW)
# (G, oCg, iCg*kH*kW) = (G, oCg, N*oH*oW) @ (G, N*oH*oW, iCg*kH*kW)
gW = _matmul(gy, x).astype(self.W_dtype, copy=False)
gW = gW.reshape(oC, iCg, kH, kW)
return gW,
def _forward_cudnn(self, x, gy):
x_layout, gy_layout = self.input_layouts
w_layout = self.w_layout
w_raw_shape = memory_layouts._transpose_shape(
self.W_shape, None, w_layout)
gW = cuda.cupy.empty(w_raw_shape, dtype=self.W_dtype)
pad = (self.ph, self.pw)
stride = (self.sy, self.sx)
dilation = (self.dy, self.dx)
deterministic = configuration.config.cudnn_deterministic
auto_tune = configuration.config.autotune
tensor_core = configuration.config.use_cudnn_tensor_core
cudnn_x_layout = cuda._get_cudnn_tensor_layout_x(x_layout)
cudnn_w_layout = cuda._get_cudnn_tensor_layout_w(w_layout)
cuda.cudnn.convolution_backward_filter(
x, gy, gW, pad, stride, dilation, self.groups,
deterministic=deterministic, auto_tune=auto_tune,
tensor_core=tensor_core,
d_layout=cudnn_x_layout, w_layout=cudnn_w_layout)
return gW,
def backward(self, indexes, grad_outputs):
x, gy = self.get_retained_inputs()
ggW, = grad_outputs
ret = []
if 0 in indexes:
xh, xw = x.shape[2:]
gx = chainer.functions.deconvolution_2d(
gy, ggW, stride=(self.sy, self.sx), pad=(self.ph, self.pw),
outsize=(xh, xw), dilate=(self.dy, self.dx),
groups=self.groups)
ret.append(gx)
if 1 in indexes:
ggy = convolution_2d(
x, ggW, stride=(self.sy, self.sx), pad=(self.ph, self.pw),
cover_all=self.cover_all, dilate=(self.dy, self.dx),
groups=self.groups)
ret.append(ggy)
return ret
def convolution_2d(x, W, b=None, stride=1, pad=0, cover_all=False, **kwargs):
"""convolution_2d(x, W, b=None, stride=1, pad=0, cover_all=False, *, \
dilate=1, groups=1)
Two-dimensional convolution function.
This is an implementation of two-dimensional convolution in ConvNets.
It takes three variables: the input image ``x``, the filter weight ``W``,
and the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output
channels, respectively.
- :math:`h_I` and :math:`w_I` are the height and width of the input image,
respectively.
- :math:`h_K` and :math:`w_K` are the height and width of the filters,
respectively.
- :math:`h_P` and :math:`w_P` are the height and width of the spatial
padding size, respectively.
Then the ``Convolution2D`` function computes correlations between filters
and patches of size :math:`(h_K, w_K)` in ``x``.
Note that correlation here is equivalent to the inner product between
expanded vectors.
Patches are extracted at positions shifted by multiples of ``stride`` from
the first position ``(-h_P, -w_P)`` for each spatial axis.
The right-most (or bottom-most) patches do not run over the padded spatial
size.
Let :math:`(s_Y, s_X)` be the stride of filter application. Then, the
output size :math:`(h_O, w_O)` is determined by the following equations:
.. math::
h_O &= (h_I + 2h_P - h_K) / s_Y + 1,\\\\
w_O &= (w_I + 2w_P - w_K) / s_X + 1.
If ``cover_all`` option is ``True``, the filter will cover the all
spatial locations. So, if the last stride of filter does not cover the
end of spatial locations, an additional stride will be applied to the end
part of spatial locations. In this case, the output size :math:`(h_O, w_O)`
is determined by the following equations:
.. math::
h_O &= (h_I + 2h_P - h_K + s_Y - 1) / s_Y + 1,\\\\
w_O &= (w_I + 2w_P - w_K + s_X - 1) / s_X + 1.
If the bias vector is given, then it is added to all spatial locations of
the output of convolution.
The output of this function can be non-deterministic when it uses cuDNN.
If ``chainer.configuration.config.cudnn_deterministic`` is ``True`` and
cuDNN version is >= v3, it forces cuDNN to use a deterministic algorithm.
Convolution links can use a feature of cuDNN called autotuning, which
selects the most efficient CNN algorithm for images of fixed-size,
can provide a significant performance boost for fixed neural nets.
To enable, set `chainer.using_config('autotune', True)`
When the dilation factor is greater than one, cuDNN is not used unless
the version is 6.0 or higher.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable of shape :math:`(n, c_I, h_I, w_I)`.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Weight variable of shape :math:`(c_O, c_I, h_K, w_K)`.
b (None or :class:`~chainer.Variable` or :ref:`ndarray`):
Bias variable of length :math:`c_O` (optional).
stride (:class:`int` or pair of :class:`int` s):
Stride of filter applications. ``stride=s`` and ``stride=(s, s)``
are equivalent.
pad (:class:`int` or pair of :class:`int` s):
Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
cover_all (:class:`bool`):
If ``True``, all spatial locations are convoluted into some output
pixels.
dilate (:class:`int` or pair of :class:`int` s):
Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
groups (:class:`int`): Number of groups of channels. If the number
is greater than 1, input tensor :math:`W` is divided into some
blocks by this value. For each tensor blocks, convolution
operation will be executed independently. Input channel size
:math:`c_I` and output channel size :math:`c_O` must be exactly
divisible by this value.
Returns:
~chainer.Variable:
Output variable of shape :math:`(n, c_O, h_O, w_O)`.
.. seealso::
:class:`~chainer.links.Convolution2D` to manage the model parameters
``W`` and ``b``.
.. admonition:: Example
>>> n = 10
>>> c_i, c_o = 3, 1
>>> h_i, w_i = 30, 40
>>> h_k, w_k = 10, 10
>>> h_p, w_p = 5, 5
>>> x = np.random.uniform(0, 1, (n, c_i, h_i, w_i)).astype(np.float32)
>>> x.shape
(10, 3, 30, 40)
>>> W = np.random.uniform(0, 1, (c_o, c_i, h_k, w_k)).\
astype(np.float32)
>>> W.shape
(1, 3, 10, 10)
>>> b = np.random.uniform(0, 1, (c_o,)).astype(np.float32)
>>> b.shape
(1,)
>>> s_y, s_x = 5, 7
>>> y = F.convolution_2d(x, W, b, stride=(s_y, s_x), pad=(h_p, w_p))
>>> y.shape
(10, 1, 7, 6)
>>> h_o = int((h_i + 2 * h_p - h_k) / s_y + 1)
>>> w_o = int((w_i + 2 * w_p - w_k) / s_x + 1)
>>> y.shape == (n, c_o, h_o, w_o)
True
>>> y = F.convolution_2d(x, W, b, stride=(s_y, s_x), pad=(h_p, w_p), \
cover_all=True)
>>> y.shape == (n, c_o, h_o, w_o + 1)
True
"""
dilate, groups, cudnn_fast = argument.parse_kwargs(
kwargs, ('dilate', 1), ('groups', 1), ('cudnn_fast', False),
deterministic='deterministic argument is not supported anymore. '
'Use chainer.using_config(\'cudnn_deterministic\', value) '
'context where value is either `True` or `False`.')
fnode = Convolution2DFunction(stride, pad, cover_all, dilate=dilate,
groups=groups, cudnn_fast=cudnn_fast)
if b is None:
args = x, W
else:
args = x, W, b
y, = fnode.apply(args)
return y
| 24,207
| 35.678788
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/connection/embed_id.py
|
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class EmbedIDFunction(function_node.FunctionNode):
def __init__(self, ignore_label=None):
self.ignore_label = ignore_label
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, w_type = in_types
type_check.expect(
x_type.dtype.kind == 'i',
x_type.ndim >= 1,
)
type_check.expect(
w_type.dtype.kind == 'f',
w_type.ndim == 2
)
def forward(self, inputs):
self.retain_inputs((0,))
x, W = inputs
self._w_shape = W.shape
xp = backend.get_array_module(*inputs)
if chainer.is_debug():
valid_x = xp.logical_and(0 <= x, x < len(W))
if self.ignore_label is not None:
valid_x = xp.logical_or(valid_x, x == self.ignore_label)
if not valid_x.all():
raise ValueError('Each not ignored `x` value need to satisfy '
'`0 <= x < len(W)`')
if self.ignore_label is not None:
mask = (x == self.ignore_label)
return xp.where(mask[..., None], 0, W[xp.where(mask, 0, x)]),
return W[x],
def backward(self, indexes, grad_outputs):
inputs = self.get_retained_inputs()
gW = EmbedIDGrad(
self._w_shape, self.ignore_label).apply(inputs + grad_outputs)[0]
return None, gW
class EmbedIDGrad(function_node.FunctionNode):
def __init__(self, w_shape, ignore_label=None):
self.w_shape = w_shape
self.ignore_label = ignore_label
def forward(self, inputs):
self.retain_inputs((0,))
xp = backend.get_array_module(*inputs)
x, gy = inputs
self._gy_shape = gy.shape
gW = xp.zeros(self.w_shape, dtype=gy.dtype)
if xp is numpy:
# It is equivalent to `numpy.add.at(gW, x, gy)` but ufunc.at is
# too slow.
for ix, igy in six.moves.zip(x.ravel(),
gy.reshape(x.size, -1)):
if ix == self.ignore_label:
continue
gW[ix] += igy
else:
utils.nondeterministic('atomicAdd')
if self.ignore_label is None:
cuda.elementwise(
'T gy, S x, S n_out', 'raw T gW',
'ptrdiff_t w_ind[] = {x, i % n_out};'
'atomicAdd(&gW[w_ind], gy)',
'embed_id_bwd')(
gy, xp.expand_dims(x, -1), gW.shape[1], gW)
else:
cuda.elementwise(
'T gy, S x, S n_out, S ignore', 'raw T gW',
'''
if (x != ignore) {
ptrdiff_t w_ind[] = {x, i % n_out};
atomicAdd(&gW[w_ind], gy);
}
''',
'embed_id_bwd_ignore_label')(
gy, xp.expand_dims(x, -1), gW.shape[1],
self.ignore_label, gW)
return gW,
def backward(self, indexes, grads):
xp = backend.get_array_module(*grads)
x = self.get_retained_inputs()[0].data
ggW = grads[0]
if self.ignore_label is not None:
mask = x == self.ignore_label
# To prevent index out of bounds, we need to check if ignore_label
# is inside of W.
if not (0 <= self.ignore_label < self.w_shape[1]):
x = xp.where(mask, 0, x)
ggy = ggW[x]
if self.ignore_label is not None:
mask, zero, _ = xp.broadcast_arrays(
mask[..., None], xp.zeros((), ggy.dtype), ggy.data)
ggy = chainer.functions.where(mask, zero, ggy)
return None, ggy
def embed_id(x, W, ignore_label=None):
"""Efficient linear function for one-hot input.
This function implements so called *word embeddings*. It takes two
arguments: a set of IDs (words) ``x`` in :math:`B` dimensional integer
vector, and a set of all ID (word) embeddings ``W`` in :math:`V \\times d`
float matrix. It outputs :math:`B \\times d` matrix whose ``i``-th
row is the ``x[i]``-th row of ``W``.
This function is only differentiable on the input ``W``.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Batch vectors of IDs. Each element must be signed integer.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Distributed representation of each ID (a.k.a. word embeddings).
ignore_label (:class:`int` or :class:`None`):
If ``ignore_label`` is an int value, ``i``-th row of return
value is filled with ``0``.
Returns:
~chainer.Variable: Output variable.
.. seealso::
:class:`~chainer.links.EmbedID` to manage the model parameter ``W``.
.. admonition:: Example
>>> x = np.array([2, 1]).astype(np.int32)
>>> x
array([2, 1], dtype=int32)
>>> W = np.array([[0, 0, 0],
... [1, 1, 1],
... [2, 2, 2]]).astype(np.float32)
>>> W
array([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.]], dtype=float32)
>>> F.embed_id(x, W).array
array([[2., 2., 2.],
[1., 1., 1.]], dtype=float32)
>>> F.embed_id(x, W, ignore_label=1).array
array([[2., 2., 2.],
[0., 0., 0.]], dtype=float32)
"""
return EmbedIDFunction(ignore_label=ignore_label).apply((x, W))[0]
| 5,774
| 33.171598
| 78
|
py
|
chainer
|
chainer-master/chainer/functions/connection/dilated_convolution_2d.py
|
from chainer.functions.connection import convolution_2d
def dilated_convolution_2d(x, W, b=None, stride=1, pad=0, dilate=1,
cover_all=False):
"""Two-dimensional dilated convolution function.
This is an implementation of two-dimensional dilated convolution
in ConvNets.
It takes three variables: the input image ``x``, the filter weight ``W``,
and the bias vector ``b``.
.. note::
You can also perform dilated convolution by passing ``dilate``
argument to :class:`chainer.functions.convolution_2d`.
The functionality is the same.
Notation: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output,
respectively.
- :math:`h` and :math:`w` are the height and width of the input image,
respectively.
- :math:`k_H` and :math:`k_W` are the height and width of the filters,
respectively.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable of shape :math:`(n, c_I, h, w)`.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Weight variable of shape :math:`(c_O, c_I, k_H, k_W)`.
b (:class:`~chainer.Variable` or :ref:`ndarray`):
Bias variable of length :math:`c_O` (optional).
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
dilate (int or pair of ints): Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
cover_all (bool): If ``True``, all spatial locations are convoluted
into some output pixels. It may make the output size larger.
Returns:
~chainer.Variable: Output variable.
The two-dimensional dilated convolution function is defined as follows.
Then the ``DilatedConvolution2D`` function computes correlations
between filters and patches of size :math:`(k_H, k_W)` in ``x``.
Patches here are extracted at intervals of the dilation factor.
Note that correlation here is equivalent to the inner product between
expanded vectors.
Patches are extracted at intervals of the dilation factor and at positions
shifted by multiples of ``stride`` from the first position ``-pad`` for
each spatial axis. The right-most (or bottom-most) patches do not run over
the padded spatial size.
Let :math:`(s_Y, s_X)` be the stride of filter application,
:math:`(p_H, p_W)` the spatial padding size, and :math:`(d_Y, d_X)`
the dilation factor of filter application. Then, the output size
:math:`(h_O, w_O)` is determined by the following equations:
.. math::
h_O &= (h + 2p_H - k_H - (k_H - 1) * (d_Y - 1)) / s_Y + 1,\\\\
w_O &= (w + 2p_W - k_W - (k_W - 1) * (d_X - 1)) / s_X + 1.
If the bias vector is given, then it is added to all spatial locations of
the output of convolution.
"""
return convolution_2d.convolution_2d(x, W, b,
stride, pad, cover_all, dilate=dilate)
| 3,279
| 43.324324
| 79
|
py
|
chainer
|
chainer-master/chainer/functions/loss/mean_absolute_error.py
|
import numpy
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
def _get_intermediate_dtype(dtype):
# Returns the dtype for intermediate calculation.
# For float16 input, float32 is used.
# Otherwise the same dtype as the parameter is used.
if dtype == numpy.float16:
return numpy.float32
return dtype
class MeanAbsoluteError(function_node.FunctionNode):
"""Mean absolute error function."""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x0', 'x1'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].shape == in_types[1].shape
)
def forward_cpu(self, inputs):
x0, x1 = inputs
self.diff = x0 - x1
orig_dtype = self.diff.dtype
dtype = _get_intermediate_dtype(orig_dtype)
diff = self.diff.ravel().astype(dtype, copy=False)
return numpy.array(abs(diff).sum() / diff.size, dtype=orig_dtype),
def forward_gpu(self, inputs):
x0, x1 = inputs
self.diff = x0 - x1
orig_dtype = self.diff.dtype
dtype = _get_intermediate_dtype(orig_dtype)
diff = self.diff.ravel().astype(dtype, copy=False)
return (abs(diff).sum() / diff.dtype.type(diff.size)).astype(
orig_dtype, copy=False),
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
coeff = gy * gy.data.dtype.type(1. / self.diff.size)
coeff = chainer.functions.broadcast_to(coeff, self.diff.shape)
gx0 = coeff * backend.get_array_module(gy.data).sign(self.diff)
return gx0, -gx0
def mean_absolute_error(x0, x1):
"""Mean absolute error function.
The function computes the mean absolute error between two variables. The
mean is taken over the minibatch. Args ``x0`` and ``x1`` must have the
same dimensions. This function first calculates the absolute value
differences between the corresponding elements in x0 and x1, and then
returns the mean of those differences.
Args:
x0 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
x1 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable:
A variable holding an array representing the mean absolute
error of two inputs.
.. admonition:: Example
1D array examples:
>>> x = np.array([1, 2, 3]).astype(np.float32)
>>> y = np.array([0, 0, 0]).astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(2.)
>>> x = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
>>> y = np.array([7, 8, 9, 10, 11, 12]).astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(6.)
2D array example:
In this example, there are 4 elements, and thus 4 errors
>>> x = np.array([[1, 2], [3, 4]]).astype(np.float32)
>>> y = np.array([[8, 8], [8, 8]]).astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(5.5)
3D array example:
In this example, there are 8 elements, and thus 8 errors
>>> x = np.reshape(np.array([1, 2, 3, 4, 5, 6, 7, 8]), (2, 2, 2))
>>> y = np.reshape(np.array([8, 8, 8, 8, 8, 8, 8, 8]), (2, 2, 2))
>>> x = x.astype(np.float32)
>>> y = y.astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(3.5)
"""
return MeanAbsoluteError().apply((x0, x1))[0]
| 3,585
| 32.830189
| 76
|
py
|
chainer
|
chainer-master/chainer/functions/loss/ctc.py
|
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function
from chainer import utils
from chainer.utils import collections_abc
from chainer.utils import type_check
def _logsumexp(a, xp, axis=None):
vmax = xp.amax(a, axis=axis, keepdims=True)
if xp is numpy:
vmax += xp.log(xp.sum(xp.exp(a - vmax),
axis=axis, keepdims=True, dtype=a.dtype))
else:
_logsumexp_impl = cuda.reduce(
'T x, T vmax', 'T y',
'exp(x - vmax)', 'a + b', 'y += log(a)', '0',
'logsumexp_impl')
_logsumexp_impl(a, vmax, vmax, axis=axis, keepdims=True)
return xp.squeeze(vmax, axis=axis)
def _softmax(x, xp):
val = xp.exp(x - xp.amax(x, axis=2, keepdims=True))
val /= xp.sum(val, axis=2, keepdims=True)
return val
def _label_to_path(labels, blank_symbol, xp):
path = xp.full((len(labels), labels.shape[1] * 2 + 1),
blank_symbol, dtype=numpy.int32)
path[:, 1::2] = labels
return path
def _flip_path(path, path_length, xp):
"""Flips label sequence.
This function rotates a label sequence and flips it.
``path[b, t]`` stores a label at time ``t`` in ``b``-th batch.
The rotated matrix ``r`` is defined as
``r[b, t] = path[b, t + path_length[b]]``
.. ::
a b c d . . a b c d d c b a .
e f . . . -> . . . e f -> f e . . .
g h i j k g h i j k k j i h g
"""
n_batch, n_label = path.shape
rotate = (xp.arange(n_label) + path_length[:, None]) % n_label
return path[xp.arange(n_batch, dtype=xp.int32)[:, None],
rotate][:, ::-1]
def _flip_label_probability(y, input_length, xp):
"""Flips a label probability matrix.
This function rotates a label probability matrix and flips it.
``y[i, b, l]`` stores log probability of label ``l`` at ``i``-th
input in ``b``-th batch.
The rotated matrix ``r`` is defined as
``r[i, b, l] = y[i + input_length[b], b, l]``
"""
seq, n_batch, n_vocab = y.shape
rotate = (xp.arange(seq, dtype=xp.int32)[:, None] + input_length) % seq
return y[
rotate[:, :, None],
xp.arange(n_batch, dtype=xp.int32)[None, :, None],
xp.arange(n_vocab, dtype=xp.int32)[None, None, :]][::-1]
def _flip_path_probability(prob, input_length, path_length, xp):
"""Flips a path probability matrix.
This function returns a path probability matrix and flips it.
``prob[i, b, t]`` stores log probability at ``i``-th input and
at time ``t`` in a output sequence in ``b``-th batch.
The rotated matrix ``r`` is defined as
``r[i, j, k] = prob[i + input_length[j], j, k + path_length[j]]``
"""
seq, n_batch, n_label = prob.shape
rotate_input = ((xp.arange(seq, dtype=xp.int32)[:, None] + input_length)
% seq)
rotate_label = ((xp.arange(n_label, dtype=xp.int32) + path_length[:, None])
% n_label)
return prob[
rotate_input[:, :, None],
xp.arange(n_batch, dtype=xp.int32)[None, :, None],
rotate_label][::-1, :, ::-1]
class ConnectionistTemporalClassification(function.Function):
"""The implementation of Connectionist Temporal Classfication loss functions.
To make it usable for real-world cases, this class has two policies below.
1. This class computes forward and backward variables in the log domain.
2. This class applies the softmax function to inputs. The Backward
values of CTC loss is often overflows. This is avoided by computing
backward values before the activation function is applied.
"""
def __init__(self, blank_symbol, reduce='mean'):
self.blank_symbol = blank_symbol
# Lazily initialized in the first forward computation for dtype
self.zero_padding = None
if reduce not in ('mean', 'no'):
raise ValueError(
'only \'mean\' and \'no\' are valid '
'for \'reduce\', but \'%s\' is given' % reduce)
self.reduce = reduce
def check_type_forward(self, in_types):
type_check._argname(
in_types, ('input_length', 'label_length', 't', 'x'))
input_length_type, label_length_type, t_type, x_type = in_types
type_check.expect(
input_length_type.dtype == numpy.int32,
input_length_type.ndim == 1,
label_length_type.dtype == numpy.int32,
label_length_type.ndim == 1,
t_type.ndim == 2,
t_type.dtype == numpy.int32,
x_type.ndim == 3,
x_type.dtype.kind == 'f',
)
n_batch = x_type.shape[1]
type_check.expect(
t_type.shape[0] == n_batch,
input_length_type.shape[0] == n_batch,
label_length_type.shape[0] == n_batch,
)
def log_matrix(self, x, xp):
if xp == numpy:
res = numpy.ma.log(x).filled(fill_value=self.zero_padding)
else:
create_recurrence_relation = cuda.elementwise(
'T x, T e', 'T y',
'y = x == 0 ? e : (T)log(x)',
'create_recurrence_relation')
res = create_recurrence_relation(x, self.zero_padding)
return res.astype(x.dtype, copy=False)
# path probability to label probability
def label_probability(self, label_size, path, path_length,
multiply_seq, xp):
seq_length = len(multiply_seq)
n_batch = len(path)
dtype = multiply_seq.dtype
ret = xp.zeros((seq_length, n_batch, label_size), dtype)
if xp == numpy:
for b in six.moves.range(len(path)):
target_path = path[b, :path_length[b]]
chars = {c for c in target_path}
for c in chars:
ret[:, b, c] = xp.sum(
multiply_seq[:, b, 0:path_length[b]]
[:, target_path == c], axis=1)
else:
utils.nondeterministic('atomicAdd')
cuda.elementwise(
'T prob, I path, I path_length, I max_path_length',
'raw T cum_prob',
'''
I t = i % max_path_length;
if (t < path_length) {
int n_batch = cum_prob.shape()[1];
I s = i / (max_path_length * n_batch);
I b = (i - s * (max_path_length * n_batch))
/ max_path_length;
int ind[] = {s, b, path};
atomicAdd(&cum_prob[ind], prob);
}
''', 'ctc_label_prob_sum'
)(multiply_seq, path, path_length[:, None], path.shape[1], ret)
return ret
def _computes_transition(
self, prev_prob, path, path_length, cum_prob, y):
xp = backend.get_array_module(prev_prob)
if xp == numpy:
n_batch, max_path_length = path.shape
mat = xp.full(
(3, n_batch, max_path_length), self.zero_padding, y.dtype)
mat[0, :, :] = prev_prob
mat[1, :, 1:] = prev_prob[:, :-1]
mat[2, :, 2:] = prev_prob[:, :-2]
# disable transition between the same symbols
# (including blank-to-blank)
same_transition = (path[:, :-2] == path[:, 2:])
mat[2, :, 2:][same_transition] = self.zero_padding
prob = _logsumexp(mat, xp, axis=0)
outside = xp.arange(max_path_length) >= path_length[:, None]
prob[outside] = self.zero_padding
cum_prob += prob
batch_index = xp.arange(n_batch, dtype=xp.int32)
prob += y[batch_index[:, None], path]
else:
prob = xp.empty_like(prev_prob)
cuda.elementwise(
'raw T prob, raw I path, I path_length, T zero, raw T y',
'T z, T cum_prob',
'''
int length = prob.shape()[1];
int b = i / length;
int t = i - b * length;
if (t >= path_length) {
z = zero;
cum_prob += zero;
return;
}
int ind1[] = {b, t};
int ind2[] = {b, t - 1};
int ind3[] = {b, t - 2};
T f1 = prob[ind1];
T f2 = (0 <= t - 1) ? prob[ind2] : zero;
T f3 = (0 <= t - 2 && path[ind3] != path[ind1]) ?
prob[ind3] : zero;
// calculates log-sum-exp
T m = max(f1, max(f2, f3));
z = m + log(exp(f1 - m) + exp(f2 - m) + exp(f3 - m));
cum_prob += z;
int y_ind[] = {b, path[ind1]};
z += y[y_ind];
''', 'ctc_transition'
)(prev_prob, path, path_length[:, None], self.zero_padding, y,
prob, cum_prob)
return prob
def calc_trans(self, yseq, input_length,
label, label_length, path, path_length, xp):
max_input_length, n_batch, n_unit = yseq.shape
max_label_length = label.shape[1]
max_path_length = path.shape[1]
assert label.shape == (n_batch, max_label_length), label.shape
assert path.shape == (n_batch, max_label_length * 2 + 1)
forward_prob = xp.full(
(n_batch, max_path_length), self.zero_padding, dtype=yseq.dtype)
forward_prob[:, 0] = 0
backward_prob = forward_prob
batch_index = xp.arange(n_batch, dtype=xp.int32)
seq_index = xp.arange(len(yseq), dtype=xp.int32)
prob = yseq[seq_index[:, None, None], batch_index[:, None], path]
# forward computation.
for i, y in enumerate(yseq):
forward_prob = self._computes_transition(
forward_prob, path, path_length, prob[i], y)
r_path = _flip_path(path, path_length, xp)
yseq_inv = _flip_label_probability(yseq, input_length, xp)
prob = _flip_path_probability(prob, input_length, path_length, xp)
for i, y_inv in enumerate(yseq_inv):
backward_prob = self._computes_transition(
backward_prob, r_path, path_length, prob[i], y_inv)
return _flip_path_probability(prob, input_length, path_length, xp)
def forward(self, inputs):
xp = backend.get_array_module(inputs[0])
self.input_length, label_length, t, xs = inputs
if self.zero_padding is None:
if xs.dtype == numpy.float16:
self.zero_padding = -10000.0
else:
self.zero_padding = -10000000000.0
if chainer.is_debug():
assert len(xs) >= xp.max(self.input_length)
assert t.shape[1] >= xp.max(label_length)
self.path_length = 2 * label_length + 1
self.yseq = _softmax(xs, xp)
log_yseq = self.log_matrix(self.yseq, xp)
self.path = _label_to_path(t, self.blank_symbol, xp)
self.prob_trans = self.calc_trans(
log_yseq, self.input_length, t,
label_length, self.path, self.path_length, xp)
loss = -_logsumexp(self.prob_trans[0], xp, axis=1)
if self.reduce == 'mean':
loss = utils.force_array(xp.mean(loss))
return loss,
def backward(self, inputs, grad_output):
xp = backend.get_array_module(inputs[0])
batch_size = len(inputs[2])
total_probability = _logsumexp(self.prob_trans[0], xp, axis=1)
label_prob = self.label_probability(
self.yseq.shape[2], self.path, self.path_length,
xp.exp(self.prob_trans - total_probability[:, None]), xp)
self.yseq -= label_prob
if self.reduce == 'mean':
self.yseq *= grad_output[0] / batch_size
else:
self.yseq *= grad_output[0][..., None]
# mask
self.yseq *= (
xp.arange(len(self.yseq))[:, None] < self.input_length)[..., None]
return None, None, None, self.yseq
def connectionist_temporal_classification(
x, t, blank_symbol, input_length=None, label_length=None,
reduce='mean'):
"""Connectionist Temporal Classification loss function.
Connectionist Temporal Classification(CTC) [Graves2006]_ is a loss function
of sequence labeling where the alignment between the inputs and target is
unknown. See also [Graves2012]_
The output is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds the samplewise
loss values. If it is ``'mean'``, it takes the mean of loss values.
Args:
x (list or tuple of :class:`~chainer.Variable`):
A list of unnormalized probabilities for labels.
Each element of ``x``, ``x[i]`` is a :class:`~chainer.Variable`
object, which has shape ``(B, V)``, where ``B``
is the batch size and ``V`` is the number of labels.
The softmax of ``x[i]`` represents the probabilities of the labels
at time ``i``.
t (:class:`~chainer.Variable` or :ref:`ndarray`):
A matrix including expected label sequences.
Its shape is ``(B, M)``, where ``B`` is the batch size and ``M`` is
the maximum length of the label sequences.
All elements in ``t`` must be less than ``V``, the number of
labels.
blank_symbol (int): Index of blank_symbol.
This value must be non-negative.
input_length (:class:`~chainer.Variable` or :ref:`ndarray`):
Length of sequence for each of mini batch ``x`` (optional).
Its shape must be ``(B,)``.
If the ``input_length`` is omitted or ``None``, it assumes that
all of ``x`` is valid input.
label_length (:class:`~chainer.Variable` or :ref:`ndarray`):
Length of sequence for each of mini batch ``t`` (optional).
Its shape must be ``(B,)``.
If the ``label_length`` is omitted or ``None``, it assumes that
all of ``t`` is valid input.
reduce (str): Reduction option. Its value must be either
``'mean'`` or ``'no'``. Otherwise,
:class:`ValueError` is raised.
Returns:
~chainer.Variable:
A variable holding a scalar value of the CTC loss.
If ``reduce`` is ``'no'``, the output variable holds array
whose shape is `(B,)` where `B` is the number of samples.
If it is ``'mean'``, it holds a scalar.
.. note::
You need to input ``x`` without applying to activation functions(e.g.
softmax function), because this function applies softmax functions
to ``x`` before calculating CTC loss to avoid numerical limitations.
You also need to apply softmax function to forwarded values before you
decode it.
.. note::
This function is differentiable only by ``x``.
.. note::
This function supports (batch, sequence, 1-dimensional input)-data.
.. [Graves2006] Alex Graves, Santiago Fernandez,
Faustino Gomez, Jurgen Schmidhuber,
`Connectionist Temporal Classification: Labelling Unsegmented
Sequence Data with Recurrent Neural Networks
<ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf>`_
.. [Graves2012] Alex Graves,
`Supervised Sequence Labelling with Recurrent Neural Networks
<https://www.cs.toronto.edu/~graves/preprint.pdf>`_
"""
if not isinstance(x, collections_abc.Sequence):
raise TypeError('x must be a list of Variables')
if not isinstance(blank_symbol, int):
raise TypeError('blank_symbol must be non-negative integer.')
assert 0 <= blank_symbol < x[0].shape[1]
# This implementation only supports 1-dimensional data.
# TODO(jnishi): Support d(>1)-dimensional inputs.
assert x[0].ndim == 2
xp = backend.get_array_module(x[0])
if input_length is None:
input_length = xp.full(len(x[0]), len(x), dtype=numpy.int32)
if label_length is None:
label_length = xp.full(len(t), t.shape[1], dtype=numpy.int32)
return ConnectionistTemporalClassification(blank_symbol, reduce)(
input_length, label_length, t, chainer.functions.stack(x))
| 16,314
| 38.21875
| 81
|
py
|
chainer
|
chainer-master/chainer/functions/loss/absolute_error.py
|
from chainer import backend
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
import chainerx
class AbsoluteError(function_node.FunctionNode):
"""Element-wise absolute error function."""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x0', 'x1'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].shape == in_types[1].shape
)
def forward_chainerx(self, inputs):
x0, x1 = inputs
self.diff = x0 - x1
return chainerx.abs(self.diff),
def forward(self, inputs):
x0, x1 = inputs
self.diff = x0 - x1
return utils.force_array(abs(self.diff), dtype=x0.dtype),
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
gx = gy * backend.get_array_module(gy).sign(self.diff)
return gx, -gx
def absolute_error(x0, x1):
"""Element-wise absolute error function.
Computes the element-wise absolute error :math:`L` between two inputs
:math:`x_0` and :math:`x_1` defined as follows.
.. math::
L = |x_0 - x_1|
Args:
x0 (:class:`~chainer.Variable` or :ref:`ndarray`):
First input variable.
x1 (:class:`~chainer.Variable` or :ref:`ndarray`):
Second input variable.
Returns:
~chainer.Variable:
An array representing the element-wise absolute error between the
two inputs.
"""
return AbsoluteError().apply((x0, x1))[0]
| 1,603
| 26.186441
| 77
|
py
|
chainer
|
chainer-master/chainer/functions/loss/sigmoid_cross_entropy.py
|
import chainer
from chainer import backend
from chainer import function_node
from chainer.functions.activation import sigmoid
from chainer import utils
from chainer.utils import type_check
class SigmoidCrossEntropy(function_node.FunctionNode):
"""Sigmoid activation followed by a sigmoid cross entropy loss."""
ignore_label = -1
def __init__(self, normalize=True, reduce='mean'):
self.normalize = normalize
if reduce not in ('mean', 'no'):
raise ValueError(
'only \'mean\' and \'no\' are valid for \'reduce\', but '
'\'%s\' is given' % reduce)
self.reduce = reduce
self.count = None
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 't'))
x_type, t_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
t_type.dtype.kind == 'i',
x_type.shape == t_type.shape
)
def forward(self, inputs):
self.retain_inputs((0, 1))
xp = backend.get_array_module(*inputs)
x, t = inputs
self.ignore_mask = (t != self.ignore_label)
# stable computation of the cross entropy.
loss = -(
self.ignore_mask *
(x * (t - (x >= 0)) - xp.log1p(xp.exp(-xp.abs(x)))))
if not self.reduce == 'mean':
return utils.force_array(loss.astype(x.dtype)),
if self.normalize:
count = xp.maximum(1, self.ignore_mask.sum())
else:
count = max(1, len(x))
self.count = count
# TODO(takagi): Fix to perform division in a specific dtype. See
# cupy/cupy#1534.
return utils.force_array(
xp.divide(xp.sum(loss), self.count), dtype=x.dtype),
def backward(self, inputs, grad_outputs):
x, t = self.get_retained_inputs()
gy, = grad_outputs
gx, = SigmoidCrossEntropyGrad(
self.reduce, self.count, self.ignore_mask, t.data).apply((x, gy))
return gx, None
class SigmoidCrossEntropyGrad(function_node.FunctionNode):
"""Sigmoid cross entropy gradient function."""
def __init__(self, reduce, count, ignore_mask, t):
self.reduce = reduce
self.count = count
self.ignore_mask = ignore_mask
self.t = t
def forward(self, inputs):
self.retain_inputs((0, 1))
xp = backend.get_array_module(*inputs)
x, gy = inputs
y, = sigmoid.Sigmoid().forward((x,))
if self.reduce == 'mean':
# TODO(takagi): Fix to perform division in a specific dtype. See
# cupy/cupy#1534.
gx = xp.divide(
gy * self.ignore_mask * (y - self.t), self.count).astype(
y.dtype)
else:
gx = (gy * self.ignore_mask * (y - self.t)).astype(y.dtype)
return gx,
def backward(self, indexes, grad_outputs):
ggx, = grad_outputs
x, gy = self.get_retained_inputs()
y = chainer.functions.sigmoid(x)
yp = y * (1 - y)
gx = yp * chainer.functions.broadcast_to(gy, yp.shape)
ggy = y - self.t.astype(y.dtype)
gx *= self.ignore_mask * ggx
ggy *= self.ignore_mask * ggx
if self.reduce == 'mean':
gx /= self.count
ggy = chainer.functions.sum(ggy) / self.count
return gx, ggy
def sigmoid_cross_entropy(x, t, normalize=True, reduce='mean'):
"""Computes cross entropy loss for pre-sigmoid activations.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
A variable object holding a matrix whose
(i, j)-th element indicates the unnormalized log probability of
the j-th unit at the i-th example.
t (:class:`~chainer.Variable` or :ref:`ndarray`):
A variable object holding a matrix whose
(i, j)-th element indicates a signed integer vector of
ground truth labels 0 or 1.
If ``t[i, j] == -1``, corresponding ``x[i, j]`` is ignored.
Loss is zero if all ground truth labels are ``-1``.
normalize (bool): Variable holding a boolean value which
determines the normalization constant. If true, this function
normalizes the cross entropy loss across all instances. If else,
it only normalizes along a batch size.
reduce (str): Variable holding a ``str`` which
determines whether to reduce the shape of the input.
If it is ``'mean'``, it computes the sum of cross entropy
and normalize it according to ``normalize`` option.
If is is ``'no'``, this function computes cross entropy for each
instance and does not normalize it (``normalize`` option is
ignored). In this case, the loss value of the ignored instance,
which has ``-1`` as its target value, is set to ``0``.
Returns:
~chainer.Variable: A variable object holding an array of the cross
entropy.
If ``reduce`` is ``'mean'``, it is a scalar array.
If ``reduce`` is ``'no'``, the shape is same as those of ``x`` and
``t``.
.. note::
This function is differentiable only by ``x``.
.. admonition:: Example
>>> x = np.array([[-2.0, 3.0, 0.5], [5.0, 2.0, -0.5]]).\
astype(np.float32)
>>> x
array([[-2. , 3. , 0.5],
[ 5. , 2. , -0.5]], dtype=float32)
>>> t = np.array([[0, 1, 0], [1, 1, -1]]).astype(np.int32)
>>> t
array([[ 0, 1, 0],
[ 1, 1, -1]], dtype=int32)
>>> F.sigmoid_cross_entropy(x, t)
variable(0.25664714)
>>> F.sigmoid_cross_entropy(x, t, normalize=False)
variable(0.64161783)
>>> y = F.sigmoid_cross_entropy(x, t, reduce='no')
>>> y.shape
(2, 3)
>>> y.array
array([[ 0.126928 , 0.04858735, 0.974077 ],
[ 0.00671535, 0.126928 , -0. ]], dtype=float32)
"""
return SigmoidCrossEntropy(normalize, reduce).apply((x, t))[0]
| 6,098
| 33.851429
| 77
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.